1 /*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34 /**
35 * @file
36 * Defines and implements the Hardware Abstraction Layer (HW).
37 * All interaction with the hardware is performed through the HW, which abstracts
38 * the details of the underlying SLI-4 implementation.
39 */
40
41 /**
42 * @defgroup devInitShutdown Device Initialization and Shutdown
43 * @defgroup domain Domain Functions
44 * @defgroup port Port Functions
45 * @defgroup node Remote Node Functions
46 * @defgroup io IO Functions
47 * @defgroup interrupt Interrupt handling
48 * @defgroup os OS Required Functions
49 */
50
51 #include "ocs.h"
52 #include "ocs_os.h"
53 #include "ocs_hw.h"
54 #include "ocs_hw_queues.h"
55
56 #define OCS_HW_MQ_DEPTH 128
57 #define OCS_HW_READ_FCF_SIZE 4096
58 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256
59 #define OCS_HW_WQ_TIMER_PERIOD_MS 500
60
61 /* values used for setting the auto xfer rdy parameters */
62 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */
63 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE
64 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE
65 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0
66 #define OCS_HW_REQUE_XRI_REGTAG 65534
67 /* max command and response buffer lengths -- arbitrary at the moment */
68 #define OCS_HW_DMTF_CLP_CMD_MAX 256
69 #define OCS_HW_DMTF_CLP_RSP_MAX 256
70
71 /* HW global data */
72 ocs_hw_global_t hw_global;
73
74 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
75 static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
76 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
77 static int32_t ocs_hw_cb_link(void *, void *);
78 static int32_t ocs_hw_cb_fip(void *, void *);
79 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
80 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
81 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
82 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
83 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
84 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
85 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
86 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
87 static int32_t ocs_hw_flush(ocs_hw_t *);
88 static int32_t ocs_hw_command_cancel(ocs_hw_t *);
89 static int32_t ocs_hw_io_cancel(ocs_hw_t *);
90 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
91 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
92 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
93 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
94 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *);
95 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *);
96 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *);
97 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *);
98 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
99 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
100 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
101 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
102 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
103 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
104
105 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
106 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
107 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
108 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
109 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
110 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
111 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
112 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
113 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
114 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
115 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
116 static void ocs_hw_io_free_internal(void *arg);
117 static void ocs_hw_io_free_port_owned(void *arg);
118 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
119 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
120 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
121 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
122 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
123 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
124
125 /* HW domain database operations */
126 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
127 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
128
129 /* Port state machine */
130 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
131 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
132 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
133 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
134 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
135
136 /* Domain state machine */
137 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
138 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
139 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
140 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
141 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
142 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
143 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
144 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
145
146 /* BZ 161832 */
147 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
148
149 /* WQE timeouts */
150 static void target_wqe_timer_cb(void *arg);
151 static void shutdown_target_wqe_timer(ocs_hw_t *hw);
152
153 static inline void
154 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
155 {
156 if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) {
157 /*
158 * Active WQE list currently only used for
159 * target WQE timeouts.
160 */
161 ocs_lock(&hw->io_lock);
162 ocs_list_add_tail(&hw->io_timed_wqe, io);
163 io->submit_ticks = ocs_get_os_ticks();
164 ocs_unlock(&hw->io_lock);
165 }
166 }
167
168 static inline void
169 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
170 {
171 if (hw->config.emulate_tgt_wqe_timeout) {
172 /*
173 * If target wqe timeouts are enabled,
174 * remove from active wqe list.
175 */
176 ocs_lock(&hw->io_lock);
177 if (ocs_list_on_list(&io->wqe_link)) {
178 ocs_list_remove(&hw->io_timed_wqe, io);
179 }
180 ocs_unlock(&hw->io_lock);
181 }
182 }
183
184 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
185 {
186 switch (io_type) {
187 case OCS_HW_IO_INITIATOR_READ:
188 case OCS_HW_IO_INITIATOR_WRITE:
189 case OCS_HW_IO_INITIATOR_NODATA:
190 case OCS_HW_FC_CT:
191 case OCS_HW_ELS_REQ:
192 return 1;
193 default:
194 return 0;
195 }
196 }
197
198 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
199 {
200 /* if exchange not active, nothing to abort */
201 if (!xb) {
202 return FALSE;
203 }
204 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
205 switch (ext) {
206 /* exceptions where abort is not needed */
207 case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
208 case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
209 return FALSE;
210 default:
211 break;
212 }
213 }
214 return TRUE;
215 }
216
217 /**
218 * @brief Determine the number of chutes on the device.
219 *
220 * @par Description
221 * Some devices require queue resources allocated per protocol processor
222 * (chute). This function returns the number of chutes on this device.
223 *
224 * @param hw Hardware context allocated by the caller.
225 *
226 * @return Returns the number of chutes on the device for protocol.
227 */
228 static uint32_t
229 ocs_hw_get_num_chutes(ocs_hw_t *hw)
230 {
231 uint32_t num_chutes = 1;
232
233 if (sli_get_is_dual_ulp_capable(&hw->sli) &&
234 sli_get_is_ulp_enabled(&hw->sli, 0) &&
235 sli_get_is_ulp_enabled(&hw->sli, 1)) {
236 num_chutes = 2;
237 }
238 return num_chutes;
239 }
240
241 static ocs_hw_rtn_e
242 ocs_hw_link_event_init(ocs_hw_t *hw)
243 {
244 ocs_hw_assert(hw);
245
246 hw->link.status = SLI_LINK_STATUS_MAX;
247 hw->link.topology = SLI_LINK_TOPO_NONE;
248 hw->link.medium = SLI_LINK_MEDIUM_MAX;
249 hw->link.speed = 0;
250 hw->link.loop_map = NULL;
251 hw->link.fc_id = UINT32_MAX;
252
253 return OCS_HW_RTN_SUCCESS;
254 }
255
256 /**
257 * @ingroup devInitShutdown
258 * @brief If this is physical port 0, then read the max dump size.
259 *
260 * @par Description
261 * Queries the FW for the maximum dump size
262 *
263 * @param hw Hardware context allocated by the caller.
264 *
265 * @return Returns 0 on success, or a non-zero value on failure.
266 */
267 static ocs_hw_rtn_e
268 ocs_hw_read_max_dump_size(ocs_hw_t *hw)
269 {
270 uint8_t buf[SLI4_BMBX_SIZE];
271 uint8_t bus, dev, func;
272 int rc;
273
274 /* lancer only */
275 if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) &&
276 (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(&hw->sli))) {
277 ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
278 return OCS_HW_RTN_ERROR;
279 }
280
281 /*
282 * Make sure the FW is new enough to support this command. If the FW
283 * is too old, the FW will UE.
284 */
285 if (hw->workaround.disable_dump_loc) {
286 ocs_log_test(hw->os, "FW version is too old for this feature\n");
287 return OCS_HW_RTN_ERROR;
288 }
289
290 /* attempt to detemine the dump size for function 0 only. */
291 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
292 if (func == 0) {
293 if (sli_cmd_common_set_dump_location(&hw->sli, buf,
294 SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
295 sli4_res_common_set_dump_location_t *rsp =
296 (sli4_res_common_set_dump_location_t *)
297 (buf + offsetof(sli4_cmd_sli_config_t,
298 payload.embed));
299
300 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
301 if (rc != OCS_HW_RTN_SUCCESS) {
302 ocs_log_test(hw->os, "set dump location command failed\n");
303 return rc;
304 } else {
305 hw->dump_size = rsp->buffer_length;
306 ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
307 }
308 }
309 }
310 return OCS_HW_RTN_SUCCESS;
311 }
312
313 /**
314 * @ingroup devInitShutdown
315 * @brief Set up the Hardware Abstraction Layer module.
316 *
317 * @par Description
318 * Calls set up to configure the hardware.
319 *
320 * @param hw Hardware context allocated by the caller.
321 * @param os Device abstraction.
322 * @param port_type Protocol type of port, such as FC and NIC.
323 *
324 * @todo Why is port_type a parameter?
325 *
326 * @return Returns 0 on success, or a non-zero value on failure.
327 */
328 ocs_hw_rtn_e
329 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
330 {
331 uint32_t i;
332 char prop_buf[32];
333
334 if (hw == NULL) {
335 ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
336 return OCS_HW_RTN_ERROR;
337 }
338
339 if (hw->hw_setup_called) {
340 /* Setup run-time workarounds.
341 * Call for each setup, to allow for hw_war_version
342 */
343 ocs_hw_workaround_setup(hw);
344 return OCS_HW_RTN_SUCCESS;
345 }
346
347 /*
348 * ocs_hw_init() relies on NULL pointers indicating that a structure
349 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
350 * free/realloc that memory
351 */
352 ocs_memset(hw, 0, sizeof(ocs_hw_t));
353
354 hw->hw_setup_called = TRUE;
355
356 hw->os = os;
357
358 ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
359 ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
360 ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
361 hw->cmd_head_count = 0;
362
363 ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
364 ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
365
366 ocs_atomic_init(&hw->io_alloc_failed_count, 0);
367
368 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
369 hw->config.dif_seed = 0;
370 hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
371 hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
372 hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
373 hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
374
375 if (sli_setup(&hw->sli, hw->os, port_type)) {
376 ocs_log_err(hw->os, "SLI setup failed\n");
377 return OCS_HW_RTN_ERROR;
378 }
379
380 ocs_memset(hw->domains, 0, sizeof(hw->domains));
381
382 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
383
384 ocs_hw_link_event_init(hw);
385
386 sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
387 sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
388
389 /*
390 * Set all the queue sizes to the maximum allowed. These values may
391 * be changes later by the adjust and workaround functions.
392 */
393 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
394 hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
395 }
396
397 /*
398 * The RQ assignment for RQ pair mode.
399 */
400 hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
401 hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
402 if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
403 hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
404 }
405
406 /* by default, enable initiator-only auto-ABTS emulation */
407 hw->config.i_only_aab = TRUE;
408
409 /* Setup run-time workarounds */
410 ocs_hw_workaround_setup(hw);
411
412 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
413 if (hw->workaround.override_fcfi) {
414 hw->first_domain_idx = -1;
415 }
416
417 /* Must be done after the workaround setup */
418 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) ||
419 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))) {
420
421 (void)ocs_hw_read_max_dump_size(hw);
422 }
423
424 /* calculate the number of WQs required. */
425 ocs_hw_adjust_wqs(hw);
426
427 /* Set the default dif mode */
428 if (! sli_is_dif_inline_capable(&hw->sli)) {
429 ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
430 hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
431 }
432 /* Workaround: BZ 161832 */
433 if (hw->workaround.use_dif_sec_xri) {
434 ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
435 }
436
437 /*
438 * Figure out the starting and max ULP to spread the WQs across the
439 * ULPs.
440 */
441 if (sli_get_is_dual_ulp_capable(&hw->sli)) {
442 if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
443 sli_get_is_ulp_enabled(&hw->sli, 1)) {
444 hw->ulp_start = 0;
445 hw->ulp_max = 1;
446 } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
447 hw->ulp_start = 0;
448 hw->ulp_max = 0;
449 } else {
450 hw->ulp_start = 1;
451 hw->ulp_max = 1;
452 }
453 } else {
454 if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
455 hw->ulp_start = 0;
456 hw->ulp_max = 0;
457 } else {
458 hw->ulp_start = 1;
459 hw->ulp_max = 1;
460 }
461 }
462 ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
463 hw->ulp_start, hw->ulp_max);
464 hw->config.queue_topology = hw_global.queue_topology_string;
465
466 hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
467
468 hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
469 hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
470 hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
471 hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
472 hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
473
474 /* Verify qtop configuration against driver supported configuration */
475 if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
476 ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
477 OCE_HW_MAX_NUM_MRQ_PAIRS);
478 return OCS_HW_RTN_ERROR;
479 }
480
481 if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
482 ocs_log_crit(hw->os, "Max supported EQs = %d\n",
483 OCS_HW_MAX_NUM_EQ);
484 return OCS_HW_RTN_ERROR;
485 }
486
487 if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
488 ocs_log_crit(hw->os, "Max supported CQs = %d\n",
489 OCS_HW_MAX_NUM_CQ);
490 return OCS_HW_RTN_ERROR;
491 }
492
493 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
494 ocs_log_crit(hw->os, "Max supported WQs = %d\n",
495 OCS_HW_MAX_NUM_WQ);
496 return OCS_HW_RTN_ERROR;
497 }
498
499 if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
500 ocs_log_crit(hw->os, "Max supported MQs = %d\n",
501 OCS_HW_MAX_NUM_MQ);
502 return OCS_HW_RTN_ERROR;
503 }
504
505 return OCS_HW_RTN_SUCCESS;
506 }
507
508 /**
509 * @ingroup devInitShutdown
510 * @brief Allocate memory structures to prepare for the device operation.
511 *
512 * @par Description
513 * Allocates memory structures needed by the device and prepares the device
514 * for operation.
515 * @n @n @b Note: This function may be called more than once (for example, at
516 * initialization and then after a reset), but the size of the internal resources
517 * may not be changed without tearing down the HW (ocs_hw_teardown()).
518 *
519 * @param hw Hardware context allocated by the caller.
520 *
521 * @return Returns 0 on success, or a non-zero value on failure.
522 */
523 ocs_hw_rtn_e
524 ocs_hw_init(ocs_hw_t *hw)
525 {
526 ocs_hw_rtn_e rc;
527 uint32_t i = 0;
528 uint8_t buf[SLI4_BMBX_SIZE];
529 uint32_t max_rpi;
530 int rem_count;
531 int written_size = 0;
532 uint32_t count;
533 char prop_buf[32];
534 uint32_t ramdisc_blocksize = 512;
535 uint32_t q_count = 0;
536 /*
537 * Make sure the command lists are empty. If this is start-of-day,
538 * they'll be empty since they were just initialized in ocs_hw_setup.
539 * If we've just gone through a reset, the command and command pending
540 * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
541 */
542 ocs_lock(&hw->cmd_lock);
543 if (!ocs_list_empty(&hw->cmd_head)) {
544 ocs_log_test(hw->os, "command found on cmd list\n");
545 ocs_unlock(&hw->cmd_lock);
546 return OCS_HW_RTN_ERROR;
547 }
548 if (!ocs_list_empty(&hw->cmd_pending)) {
549 ocs_log_test(hw->os, "command found on pending list\n");
550 ocs_unlock(&hw->cmd_lock);
551 return OCS_HW_RTN_ERROR;
552 }
553 ocs_unlock(&hw->cmd_lock);
554
555 /* Free RQ buffers if prevously allocated */
556 ocs_hw_rx_free(hw);
557
558 /*
559 * The IO queues must be initialized here for the reset case. The
560 * ocs_hw_init_io() function will re-add the IOs to the free list.
561 * The cmd_head list should be OK since we free all entries in
562 * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
563 */
564
565 /* If we are in this function due to a reset, there may be stale items
566 * on lists that need to be removed. Clean them up.
567 */
568 rem_count=0;
569 if (ocs_list_valid(&hw->io_wait_free)) {
570 while ((!ocs_list_empty(&hw->io_wait_free))) {
571 rem_count++;
572 ocs_list_remove_head(&hw->io_wait_free);
573 }
574 if (rem_count > 0) {
575 ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
576 }
577 }
578 rem_count=0;
579 if (ocs_list_valid(&hw->io_inuse)) {
580 while ((!ocs_list_empty(&hw->io_inuse))) {
581 rem_count++;
582 ocs_list_remove_head(&hw->io_inuse);
583 }
584 if (rem_count > 0) {
585 ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
586 }
587 }
588 rem_count=0;
589 if (ocs_list_valid(&hw->io_free)) {
590 while ((!ocs_list_empty(&hw->io_free))) {
591 rem_count++;
592 ocs_list_remove_head(&hw->io_free);
593 }
594 if (rem_count > 0) {
595 ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
596 }
597 }
598 if (ocs_list_valid(&hw->io_port_owned)) {
599 while ((!ocs_list_empty(&hw->io_port_owned))) {
600 ocs_list_remove_head(&hw->io_port_owned);
601 }
602 }
603 ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
604 ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
605 ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
606 ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
607 ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
608 ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
609
610 /* If MRQ not required, Make sure we dont request feature. */
611 if (hw->config.n_rq == 1) {
612 hw->sli.config.features.flag.mrqp = FALSE;
613 }
614
615 if (sli_init(&hw->sli)) {
616 ocs_log_err(hw->os, "SLI failed to initialize\n");
617 return OCS_HW_RTN_ERROR;
618 }
619
620 /*
621 * Enable the auto xfer rdy feature if requested.
622 */
623 hw->auto_xfer_rdy_enabled = FALSE;
624 if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
625 hw->config.auto_xfer_rdy_size > 0) {
626 if (hw->config.esoc){
627 if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
628 ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
629 }
630 written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
631 } else {
632 written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
633 }
634 if (written_size) {
635 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
636 if (rc != OCS_HW_RTN_SUCCESS) {
637 ocs_log_err(hw->os, "config auto xfer rdy failed\n");
638 return rc;
639 }
640 }
641 hw->auto_xfer_rdy_enabled = TRUE;
642
643 if (hw->config.auto_xfer_rdy_t10_enable) {
644 rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
645 if (rc != OCS_HW_RTN_SUCCESS) {
646 ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
647 return rc;
648 }
649 }
650 }
651
652 if(hw->sliport_healthcheck) {
653 rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
654 if (rc != OCS_HW_RTN_SUCCESS) {
655 ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
656 return rc;
657 }
658 }
659
660 /*
661 * Set FDT transfer hint, only works on Lancer
662 */
663 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
664 /*
665 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
666 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
667 */
668 ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
669 }
670
671 /*
672 * Verify that we have not exceeded any queue sizes
673 */
674 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
675 OCS_HW_MAX_NUM_EQ);
676 if (hw->config.n_eq > q_count) {
677 ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
678 hw->config.n_eq, q_count);
679 return OCS_HW_RTN_ERROR;
680 }
681
682 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
683 OCS_HW_MAX_NUM_CQ);
684 if (hw->config.n_cq > q_count) {
685 ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
686 hw->config.n_cq, q_count);
687 return OCS_HW_RTN_ERROR;
688 }
689
690 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
691 OCS_HW_MAX_NUM_MQ);
692 if (hw->config.n_mq > q_count) {
693 ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
694 hw->config.n_mq, q_count);
695 return OCS_HW_RTN_ERROR;
696 }
697
698 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
699 OCS_HW_MAX_NUM_RQ);
700 if (hw->config.n_rq > q_count) {
701 ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
702 hw->config.n_rq, q_count);
703 return OCS_HW_RTN_ERROR;
704 }
705
706 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
707 OCS_HW_MAX_NUM_WQ);
708 if (hw->config.n_wq > q_count) {
709 ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
710 hw->config.n_wq, q_count);
711 return OCS_HW_RTN_ERROR;
712 }
713
714 /* zero the hashes */
715 ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
716 ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
717 OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
718
719 ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
720 ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
721 OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
722
723 ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
724 ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
725 OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
726
727 rc = ocs_hw_init_queues(hw, hw->qtop);
728 if (rc != OCS_HW_RTN_SUCCESS) {
729 return rc;
730 }
731
732 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
733 i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
734 if (i) {
735 ocs_dma_t payload_memory;
736
737 rc = OCS_HW_RTN_ERROR;
738
739 if (hw->rnode_mem.size) {
740 ocs_dma_free(hw->os, &hw->rnode_mem);
741 }
742
743 if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
744 ocs_log_err(hw->os, "remote node memory allocation fail\n");
745 return OCS_HW_RTN_NO_MEMORY;
746 }
747
748 payload_memory.size = 0;
749 if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
750 &hw->rnode_mem, UINT16_MAX, &payload_memory)) {
751 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
752
753 if (payload_memory.size != 0) {
754 /* The command was non-embedded - need to free the dma buffer */
755 ocs_dma_free(hw->os, &payload_memory);
756 }
757 }
758
759 if (rc != OCS_HW_RTN_SUCCESS) {
760 ocs_log_err(hw->os, "header template registration failed\n");
761 return rc;
762 }
763 }
764
765 /* Allocate and post RQ buffers */
766 rc = ocs_hw_rx_allocate(hw);
767 if (rc) {
768 ocs_log_err(hw->os, "rx_allocate failed\n");
769 return rc;
770 }
771
772 /* Populate hw->seq_free_list */
773 if (hw->seq_pool == NULL) {
774 uint32_t count = 0;
775 uint32_t i;
776
777 /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
778 for (i = 0; i < hw->hw_rq_count; i++) {
779 count += hw->hw_rq[i]->entry_count;
780 }
781
782 hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
783 if (hw->seq_pool == NULL) {
784 ocs_log_err(hw->os, "malloc seq_pool failed\n");
785 return OCS_HW_RTN_NO_MEMORY;
786 }
787 }
788
789 if(ocs_hw_rx_post(hw)) {
790 ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
791 }
792
793 /* Allocate rpi_ref if not previously allocated */
794 if (hw->rpi_ref == NULL) {
795 hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
796 OCS_M_ZERO | OCS_M_NOWAIT);
797 if (hw->rpi_ref == NULL) {
798 ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
799 return OCS_HW_RTN_NO_MEMORY;
800 }
801 }
802
803 for (i = 0; i < max_rpi; i ++) {
804 ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
805 ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
806 }
807
808 ocs_memset(hw->domains, 0, sizeof(hw->domains));
809
810 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
811 if (hw->workaround.override_fcfi) {
812 hw->first_domain_idx = -1;
813 }
814
815 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
816
817 /* Register a FCFI to allow unsolicited frames to be routed to the driver */
818 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
819 if (hw->hw_mrq_count) {
820 ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
821
822 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
823 if (rc != OCS_HW_RTN_SUCCESS) {
824 ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
825 return rc;
826 }
827
828 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
829 if (rc != OCS_HW_RTN_SUCCESS) {
830 ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
831 return rc;
832 }
833 } else {
834 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
835
836 ocs_log_debug(hw->os, "using REG_FCFI standard\n");
837
838 /* Set the filter match/mask values from hw's filter_def values */
839 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
840 rq_cfg[i].rq_id = 0xffff;
841 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
842 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
843 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
844 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
845 }
846
847 /*
848 * Update the rq_id's of the FCF configuration (don't update more than the number
849 * of rq_cfg elements)
850 */
851 for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
852 hw_rq_t *rq = hw->hw_rq[i];
853 uint32_t j;
854 for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
855 uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
856 if (mask & (1U << j)) {
857 rq_cfg[j].rq_id = rq->hdr->id;
858 ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
859 j, hw->config.filter_def[j], i, rq->hdr->id);
860 }
861 }
862 }
863
864 rc = OCS_HW_RTN_ERROR;
865
866 if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
867 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
868 }
869
870 if (rc != OCS_HW_RTN_SUCCESS) {
871 ocs_log_err(hw->os, "FCFI registration failed\n");
872 return rc;
873 }
874 hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
875 }
876 }
877
878 /*
879 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
880 * thus the pool allocation size of 64k)
881 */
882 rc = ocs_hw_reqtag_init(hw);
883 if (rc) {
884 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
885 return rc;
886 }
887
888 rc = ocs_hw_setup_io(hw);
889 if (rc) {
890 ocs_log_err(hw->os, "IO allocation failure\n");
891 return rc;
892 }
893
894 rc = ocs_hw_init_io(hw);
895 if (rc) {
896 ocs_log_err(hw->os, "IO initialization failure\n");
897 return rc;
898 }
899
900 ocs_queue_history_init(hw->os, &hw->q_hist);
901
902 /* get hw link config; polling, so callback will be called immediately */
903 hw->linkcfg = OCS_HW_LINKCFG_NA;
904 ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
905
906 /* if lancer ethernet, ethernet ports need to be enabled */
907 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
908 (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
909 if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
910 /* log warning but continue */
911 ocs_log_err(hw->os, "Failed to set ethernet license\n");
912 }
913 }
914
915 /* Set the DIF seed - only for lancer right now */
916 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
917 ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
918 ocs_log_err(hw->os, "Failed to set DIF seed value\n");
919 return rc;
920 }
921
922 /* Set the DIF mode - skyhawk only */
923 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
924 sli_get_dif_capable(&hw->sli)) {
925 rc = ocs_hw_set_dif_mode(hw);
926 if (rc != OCS_HW_RTN_SUCCESS) {
927 ocs_log_err(hw->os, "Failed to set DIF mode value\n");
928 return rc;
929 }
930 }
931
932 /*
933 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
934 */
935 for (i = 0; i < hw->eq_count; i++) {
936 sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
937 }
938
939 /*
940 * Initialize RQ hash
941 */
942 for (i = 0; i < hw->rq_count; i++) {
943 ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
944 }
945
946 /*
947 * Initialize WQ hash
948 */
949 for (i = 0; i < hw->wq_count; i++) {
950 ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
951 }
952
953 /*
954 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
955 */
956 for (i = 0; i < hw->cq_count; i++) {
957 ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
958 sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
959 }
960
961 /* record the fact that the queues are functional */
962 hw->state = OCS_HW_STATE_ACTIVE;
963
964 /* Note: Must be after the IOs are setup and the state is active*/
965 if (ocs_hw_rqpair_init(hw)) {
966 ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
967 }
968
969 /* finally kick off periodic timer to check for timed out target WQEs */
970 if (hw->config.emulate_tgt_wqe_timeout) {
971 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
972 OCS_HW_WQ_TIMER_PERIOD_MS);
973 }
974
975 /*
976 * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there
977 * are none of those, allocate one for WQ[0]
978 */
979 if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
980 for (i = 0; i < count; i++) {
981 hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
982 wq->send_frame_io = ocs_hw_io_alloc(hw);
983 if (wq->send_frame_io == NULL) {
984 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
985 }
986 }
987 } else {
988 hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
989 if (hw->hw_wq[0]->send_frame_io == NULL) {
990 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
991 }
992 }
993
994 /* Initialize send frame frame sequence id */
995 ocs_atomic_init(&hw->send_frame_seq_id, 0);
996
997 /* Initialize watchdog timer if enabled by user */
998 hw->expiration_logged = 0;
999 if(hw->watchdog_timeout) {
1000 if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
1001 ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
1002 }else if(!ocs_hw_config_watchdog_timer(hw)) {
1003 ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
1004 }
1005 }
1006
1007 if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
1008 ocs_log_err(hw->os, "domain node memory allocation fail\n");
1009 return OCS_HW_RTN_NO_MEMORY;
1010 }
1011
1012 if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
1013 ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
1014 return OCS_HW_RTN_NO_MEMORY;
1015 }
1016
1017 if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map,
1018 SLI4_MIN_LOOP_MAP_BYTES, 4)) {
1019 ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
1020 }
1021
1022 return OCS_HW_RTN_SUCCESS;
1023 }
1024
1025 /**
1026 * @brief Configure Multi-RQ
1027 *
1028 * @param hw Hardware context allocated by the caller.
1029 * @param mode 1 to set MRQ filters and 0 to set FCFI index
1030 * @param vlanid valid in mode 0
1031 * @param fcf_index valid in mode 0
1032 *
1033 * @return Returns 0 on success, or a non-zero value on failure.
1034 */
1035 static int32_t
1036 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
1037 {
1038 uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
1039 hw_rq_t *rq;
1040 sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
1041 uint32_t i, j;
1042 sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
1043 int32_t rc;
1044
1045 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1046 goto issue_cmd;
1047 }
1048
1049 /* Set the filter match/mask values from hw's filter_def values */
1050 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1051 rq_filter[i].rq_id = 0xffff;
1052 rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
1053 rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
1054 rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
1055 rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
1056 }
1057
1058 /* Accumulate counts for each filter type used, build rq_ids[] list */
1059 for (i = 0; i < hw->hw_rq_count; i++) {
1060 rq = hw->hw_rq[i];
1061 for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
1062 if (rq->filter_mask & (1U << j)) {
1063 if (rq_filter[j].rq_id != 0xffff) {
1064 /* Already used. Bailout ifts not RQset case */
1065 if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
1066 ocs_log_err(hw->os, "Wrong queue topology.\n");
1067 return OCS_HW_RTN_ERROR;
1068 }
1069 continue;
1070 }
1071
1072 if (rq->is_mrq) {
1073 rq_filter[j].rq_id = rq->base_mrq_id;
1074 mrq_bitmask |= (1U << j);
1075 } else {
1076 rq_filter[j].rq_id = rq->hdr->id;
1077 }
1078 }
1079 }
1080 }
1081
1082 issue_cmd:
1083 /* Invoke REG_FCFI_MRQ */
1084 rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
1085 buf, /* buf */
1086 SLI4_BMBX_SIZE, /* size */
1087 mode, /* mode 1 */
1088 fcf_index, /* fcf_index */
1089 vlanid, /* vlan_id */
1090 hw->config.rq_selection_policy, /* RQ selection policy*/
1091 mrq_bitmask, /* MRQ bitmask */
1092 hw->hw_mrq_count, /* num_mrqs */
1093 rq_filter); /* RQ filter */
1094 if (rc == 0) {
1095 ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
1096 return OCS_HW_RTN_ERROR;
1097 }
1098
1099 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
1100
1101 rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
1102
1103 if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
1104 ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
1105 rsp->hdr.command, rsp->hdr.status);
1106 return OCS_HW_RTN_ERROR;
1107 }
1108
1109 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1110 hw->fcf_indicator = rsp->fcfi;
1111 }
1112 return 0;
1113 }
1114
1115 /**
1116 * @brief Callback function for getting linkcfg during HW initialization.
1117 *
1118 * @param status Status of the linkcfg get operation.
1119 * @param value Link configuration enum to which the link configuration is set.
1120 * @param arg Callback argument (ocs_hw_t *).
1121 *
1122 * @return None.
1123 */
1124 static void
1125 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
1126 {
1127 ocs_hw_t *hw = (ocs_hw_t *)arg;
1128 if (status == 0) {
1129 hw->linkcfg = (ocs_hw_linkcfg_e)value;
1130 } else {
1131 hw->linkcfg = OCS_HW_LINKCFG_NA;
1132 }
1133 ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
1134 }
1135
1136 /**
1137 * @ingroup devInitShutdown
1138 * @brief Tear down the Hardware Abstraction Layer module.
1139 *
1140 * @par Description
1141 * Frees memory structures needed by the device, and shuts down the device. Does
1142 * not free the HW context memory (which is done by the caller).
1143 *
1144 * @param hw Hardware context allocated by the caller.
1145 *
1146 * @return Returns 0 on success, or a non-zero value on failure.
1147 */
1148 ocs_hw_rtn_e
1149 ocs_hw_teardown(ocs_hw_t *hw)
1150 {
1151 uint32_t i = 0;
1152 uint32_t iters = 10;/*XXX*/
1153 uint32_t max_rpi;
1154 uint32_t destroy_queues;
1155 uint32_t free_memory;
1156
1157 if (!hw) {
1158 ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
1159 return OCS_HW_RTN_ERROR;
1160 }
1161
1162 destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
1163 free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
1164
1165 /* shutdown target wqe timer */
1166 shutdown_target_wqe_timer(hw);
1167
1168 /* Cancel watchdog timer if enabled */
1169 if(hw->watchdog_timeout) {
1170 hw->watchdog_timeout = 0;
1171 ocs_hw_config_watchdog_timer(hw);
1172 }
1173
1174 /* Cancel Sliport Healthcheck */
1175 if(hw->sliport_healthcheck) {
1176 hw->sliport_healthcheck = 0;
1177 ocs_hw_config_sli_port_health_check(hw, 0, 0);
1178 }
1179
1180 if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
1181 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1182
1183 ocs_hw_flush(hw);
1184
1185 /* If there are outstanding commands, wait for them to complete */
1186 while (!ocs_list_empty(&hw->cmd_head) && iters) {
1187 ocs_udelay(10000);
1188 ocs_hw_flush(hw);
1189 iters--;
1190 }
1191
1192 if (ocs_list_empty(&hw->cmd_head)) {
1193 ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1194 } else {
1195 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1196 }
1197
1198 /* Cancel any remaining commands */
1199 ocs_hw_command_cancel(hw);
1200 } else {
1201 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1202 }
1203
1204 ocs_lock_free(&hw->cmd_lock);
1205
1206 /* Free unregistered RPI if workaround is in force */
1207 if (hw->workaround.use_unregistered_rpi) {
1208 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
1209 }
1210
1211 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1212 if (hw->rpi_ref) {
1213 for (i = 0; i < max_rpi; i++) {
1214 if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
1215 ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
1216 i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
1217 }
1218 }
1219 ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
1220 hw->rpi_ref = NULL;
1221 }
1222
1223 ocs_dma_free(hw->os, &hw->rnode_mem);
1224
1225 if (hw->io) {
1226 for (i = 0; i < hw->config.n_io; i++) {
1227 if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
1228 (hw->io[i]->sgl->virt != NULL)) {
1229 if(hw->io[i]->is_port_owned) {
1230 ocs_lock_free(&hw->io[i]->axr_lock);
1231 }
1232 ocs_dma_free(hw->os, hw->io[i]->sgl);
1233 }
1234 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
1235 hw->io[i] = NULL;
1236 }
1237 ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
1238 hw->wqe_buffs = NULL;
1239 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
1240 hw->io = NULL;
1241 }
1242
1243 ocs_dma_free(hw->os, &hw->xfer_rdy);
1244 ocs_dma_free(hw->os, &hw->dump_sges);
1245 ocs_dma_free(hw->os, &hw->loop_map);
1246
1247 ocs_lock_free(&hw->io_lock);
1248 ocs_lock_free(&hw->io_abort_lock);
1249
1250 for (i = 0; i < hw->wq_count; i++) {
1251 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
1252 }
1253
1254 for (i = 0; i < hw->rq_count; i++) {
1255 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
1256 }
1257
1258 for (i = 0; i < hw->mq_count; i++) {
1259 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
1260 }
1261
1262 for (i = 0; i < hw->cq_count; i++) {
1263 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
1264 }
1265
1266 for (i = 0; i < hw->eq_count; i++) {
1267 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
1268 }
1269
1270 ocs_hw_qtop_free(hw->qtop);
1271
1272 /* Free rq buffers */
1273 ocs_hw_rx_free(hw);
1274
1275 hw_queue_teardown(hw);
1276
1277 ocs_hw_rqpair_teardown(hw);
1278
1279 if (sli_teardown(&hw->sli)) {
1280 ocs_log_err(hw->os, "SLI teardown failed\n");
1281 }
1282
1283 ocs_queue_history_free(&hw->q_hist);
1284
1285 /* record the fact that the queues are non-functional */
1286 hw->state = OCS_HW_STATE_UNINITIALIZED;
1287
1288 /* free sequence free pool */
1289 ocs_array_free(hw->seq_pool);
1290 hw->seq_pool = NULL;
1291
1292 /* free hw_wq_callback pool */
1293 ocs_pool_free(hw->wq_reqtag_pool);
1294
1295 ocs_dma_free(hw->os, &hw->domain_dmem);
1296 ocs_dma_free(hw->os, &hw->fcf_dmem);
1297 /* Mark HW setup as not having been called */
1298 hw->hw_setup_called = FALSE;
1299
1300 return OCS_HW_RTN_SUCCESS;
1301 }
1302
1303 ocs_hw_rtn_e
1304 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
1305 {
1306 uint32_t i;
1307 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1308 uint32_t iters;
1309 ocs_hw_state_e prev_state = hw->state;
1310
1311 if (hw->state != OCS_HW_STATE_ACTIVE) {
1312 ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
1313 }
1314
1315 hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
1316
1317 /* shutdown target wqe timer */
1318 shutdown_target_wqe_timer(hw);
1319
1320 ocs_hw_flush(hw);
1321
1322 /*
1323 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
1324 * then the FW will UE when the reset is issued. So attempt to complete
1325 * all mailbox commands.
1326 */
1327 iters = 10;
1328 while (!ocs_list_empty(&hw->cmd_head) && iters) {
1329 ocs_udelay(10000);
1330 ocs_hw_flush(hw);
1331 iters--;
1332 }
1333
1334 if (ocs_list_empty(&hw->cmd_head)) {
1335 ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1336 } else {
1337 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1338 }
1339
1340 /* Reset the chip */
1341 switch(reset) {
1342 case OCS_HW_RESET_FUNCTION:
1343 ocs_log_debug(hw->os, "issuing function level reset\n");
1344 if (sli_reset(&hw->sli)) {
1345 ocs_log_err(hw->os, "sli_reset failed\n");
1346 rc = OCS_HW_RTN_ERROR;
1347 }
1348 break;
1349 case OCS_HW_RESET_FIRMWARE:
1350 ocs_log_debug(hw->os, "issuing firmware reset\n");
1351 if (sli_fw_reset(&hw->sli)) {
1352 ocs_log_err(hw->os, "sli_soft_reset failed\n");
1353 rc = OCS_HW_RTN_ERROR;
1354 }
1355 /*
1356 * Because the FW reset leaves the FW in a non-running state,
1357 * follow that with a regular reset.
1358 */
1359 ocs_log_debug(hw->os, "issuing function level reset\n");
1360 if (sli_reset(&hw->sli)) {
1361 ocs_log_err(hw->os, "sli_reset failed\n");
1362 rc = OCS_HW_RTN_ERROR;
1363 }
1364 break;
1365 default:
1366 ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
1367 hw->state = prev_state;
1368 return OCS_HW_RTN_ERROR;
1369 }
1370
1371 /* Not safe to walk command/io lists unless they've been initialized */
1372 if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1373 ocs_hw_command_cancel(hw);
1374
1375 /* Clean up the inuse list, the free list and the wait free list */
1376 ocs_hw_io_cancel(hw);
1377
1378 ocs_memset(hw->domains, 0, sizeof(hw->domains));
1379 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
1380
1381 ocs_hw_link_event_init(hw);
1382
1383 ocs_lock(&hw->io_lock);
1384 /* The io lists should be empty, but remove any that didn't get cleaned up. */
1385 while (!ocs_list_empty(&hw->io_timed_wqe)) {
1386 ocs_list_remove_head(&hw->io_timed_wqe);
1387 }
1388 /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
1389
1390 while (!ocs_list_empty(&hw->io_free)) {
1391 ocs_list_remove_head(&hw->io_free);
1392 }
1393 while (!ocs_list_empty(&hw->io_wait_free)) {
1394 ocs_list_remove_head(&hw->io_wait_free);
1395 }
1396
1397 /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
1398 ocs_hw_reqtag_reset(hw);
1399
1400 ocs_unlock(&hw->io_lock);
1401 }
1402
1403 if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1404 for (i = 0; i < hw->wq_count; i++) {
1405 sli_queue_reset(&hw->sli, &hw->wq[i]);
1406 }
1407
1408 for (i = 0; i < hw->rq_count; i++) {
1409 sli_queue_reset(&hw->sli, &hw->rq[i]);
1410 }
1411
1412 for (i = 0; i < hw->hw_rq_count; i++) {
1413 hw_rq_t *rq = hw->hw_rq[i];
1414 if (rq->rq_tracker != NULL) {
1415 uint32_t j;
1416
1417 for (j = 0; j < rq->entry_count; j++) {
1418 rq->rq_tracker[j] = NULL;
1419 }
1420 }
1421 }
1422
1423 for (i = 0; i < hw->mq_count; i++) {
1424 sli_queue_reset(&hw->sli, &hw->mq[i]);
1425 }
1426
1427 for (i = 0; i < hw->cq_count; i++) {
1428 sli_queue_reset(&hw->sli, &hw->cq[i]);
1429 }
1430
1431 for (i = 0; i < hw->eq_count; i++) {
1432 sli_queue_reset(&hw->sli, &hw->eq[i]);
1433 }
1434
1435 /* Free rq buffers */
1436 ocs_hw_rx_free(hw);
1437
1438 /* Teardown the HW queue topology */
1439 hw_queue_teardown(hw);
1440 } else {
1441 /* Free rq buffers */
1442 ocs_hw_rx_free(hw);
1443 }
1444
1445 /*
1446 * Re-apply the run-time workarounds after clearing the SLI config
1447 * fields in sli_reset.
1448 */
1449 ocs_hw_workaround_setup(hw);
1450 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
1451
1452 return rc;
1453 }
1454
1455 int32_t
1456 ocs_hw_get_num_eq(ocs_hw_t *hw)
1457 {
1458 return hw->eq_count;
1459 }
1460
1461 static int32_t
1462 ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
1463 {
1464 /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
1465 * No further explanation is given in the document.
1466 * */
1467 return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
1468 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
1469 }
1470
1471 ocs_hw_rtn_e
1472 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
1473 {
1474 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1475 int32_t tmp;
1476
1477 if (!value) {
1478 return OCS_HW_RTN_ERROR;
1479 }
1480
1481 *value = 0;
1482
1483 switch (prop) {
1484 case OCS_HW_N_IO:
1485 *value = hw->config.n_io;
1486 break;
1487 case OCS_HW_N_SGL:
1488 *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
1489 break;
1490 case OCS_HW_MAX_IO:
1491 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
1492 break;
1493 case OCS_HW_MAX_NODES:
1494 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1495 break;
1496 case OCS_HW_MAX_RQ_ENTRIES:
1497 *value = hw->num_qentries[SLI_QTYPE_RQ];
1498 break;
1499 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1500 *value = hw->config.rq_default_buffer_size;
1501 break;
1502 case OCS_HW_AUTO_XFER_RDY_CAPABLE:
1503 *value = sli_get_auto_xfer_rdy_capable(&hw->sli);
1504 break;
1505 case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1506 *value = hw->config.auto_xfer_rdy_xri_cnt;
1507 break;
1508 case OCS_HW_AUTO_XFER_RDY_SIZE:
1509 *value = hw->config.auto_xfer_rdy_size;
1510 break;
1511 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1512 switch (hw->config.auto_xfer_rdy_blk_size_chip) {
1513 case 0:
1514 *value = 512;
1515 break;
1516 case 1:
1517 *value = 1024;
1518 break;
1519 case 2:
1520 *value = 2048;
1521 break;
1522 case 3:
1523 *value = 4096;
1524 break;
1525 case 4:
1526 *value = 520;
1527 break;
1528 default:
1529 *value = 0;
1530 rc = OCS_HW_RTN_ERROR;
1531 break;
1532 }
1533 break;
1534 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1535 *value = hw->config.auto_xfer_rdy_t10_enable;
1536 break;
1537 case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1538 *value = hw->config.auto_xfer_rdy_p_type;
1539 break;
1540 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1541 *value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
1542 break;
1543 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1544 *value = hw->config.auto_xfer_rdy_app_tag_valid;
1545 break;
1546 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1547 *value = hw->config.auto_xfer_rdy_app_tag_value;
1548 break;
1549 case OCS_HW_MAX_SGE:
1550 *value = sli_get_max_sge(&hw->sli);
1551 break;
1552 case OCS_HW_MAX_SGL:
1553 *value = sli_get_max_sgl(&hw->sli);
1554 break;
1555 case OCS_HW_TOPOLOGY:
1556 /*
1557 * Infer link.status based on link.speed.
1558 * Report OCS_HW_TOPOLOGY_NONE if the link is down.
1559 */
1560 if (hw->link.speed == 0) {
1561 *value = OCS_HW_TOPOLOGY_NONE;
1562 break;
1563 }
1564 switch (hw->link.topology) {
1565 case SLI_LINK_TOPO_NPORT:
1566 *value = OCS_HW_TOPOLOGY_NPORT;
1567 break;
1568 case SLI_LINK_TOPO_LOOP:
1569 *value = OCS_HW_TOPOLOGY_LOOP;
1570 break;
1571 case SLI_LINK_TOPO_NONE:
1572 *value = OCS_HW_TOPOLOGY_NONE;
1573 break;
1574 default:
1575 ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
1576 rc = OCS_HW_RTN_ERROR;
1577 break;
1578 }
1579 break;
1580 case OCS_HW_CONFIG_TOPOLOGY:
1581 *value = hw->config.topology;
1582 break;
1583 case OCS_HW_LINK_SPEED:
1584 *value = hw->link.speed;
1585 break;
1586 case OCS_HW_LINK_CONFIG_SPEED:
1587 switch (hw->config.speed) {
1588 case FC_LINK_SPEED_10G:
1589 *value = 10000;
1590 break;
1591 case FC_LINK_SPEED_AUTO_16_8_4:
1592 *value = 0;
1593 break;
1594 case FC_LINK_SPEED_2G:
1595 *value = 2000;
1596 break;
1597 case FC_LINK_SPEED_4G:
1598 *value = 4000;
1599 break;
1600 case FC_LINK_SPEED_8G:
1601 *value = 8000;
1602 break;
1603 case FC_LINK_SPEED_16G:
1604 *value = 16000;
1605 break;
1606 case FC_LINK_SPEED_32G:
1607 *value = 32000;
1608 break;
1609 default:
1610 ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
1611 rc = OCS_HW_RTN_ERROR;
1612 break;
1613 }
1614 break;
1615 case OCS_HW_IF_TYPE:
1616 *value = sli_get_if_type(&hw->sli);
1617 break;
1618 case OCS_HW_SLI_REV:
1619 *value = sli_get_sli_rev(&hw->sli);
1620 break;
1621 case OCS_HW_SLI_FAMILY:
1622 *value = sli_get_sli_family(&hw->sli);
1623 break;
1624 case OCS_HW_DIF_CAPABLE:
1625 *value = sli_get_dif_capable(&hw->sli);
1626 break;
1627 case OCS_HW_DIF_SEED:
1628 *value = hw->config.dif_seed;
1629 break;
1630 case OCS_HW_DIF_MODE:
1631 *value = hw->config.dif_mode;
1632 break;
1633 case OCS_HW_DIF_MULTI_SEPARATE:
1634 /* Lancer supports multiple DIF separates */
1635 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
1636 *value = TRUE;
1637 } else {
1638 *value = FALSE;
1639 }
1640 break;
1641 case OCS_HW_DUMP_MAX_SIZE:
1642 *value = hw->dump_size;
1643 break;
1644 case OCS_HW_DUMP_READY:
1645 *value = sli_dump_is_ready(&hw->sli);
1646 break;
1647 case OCS_HW_DUMP_PRESENT:
1648 *value = sli_dump_is_present(&hw->sli);
1649 break;
1650 case OCS_HW_RESET_REQUIRED:
1651 tmp = sli_reset_required(&hw->sli);
1652 if(tmp < 0) {
1653 rc = OCS_HW_RTN_ERROR;
1654 } else {
1655 *value = tmp;
1656 }
1657 break;
1658 case OCS_HW_FW_ERROR:
1659 *value = sli_fw_error_status(&hw->sli);
1660 break;
1661 case OCS_HW_FW_READY:
1662 *value = sli_fw_ready(&hw->sli);
1663 break;
1664 case OCS_HW_FW_TIMED_OUT:
1665 *value = ocs_hw_get_fw_timed_out(hw);
1666 break;
1667 case OCS_HW_HIGH_LOGIN_MODE:
1668 *value = sli_get_hlm_capable(&hw->sli);
1669 break;
1670 case OCS_HW_PREREGISTER_SGL:
1671 *value = sli_get_sgl_preregister_required(&hw->sli);
1672 break;
1673 case OCS_HW_HW_REV1:
1674 *value = sli_get_hw_revision(&hw->sli, 0);
1675 break;
1676 case OCS_HW_HW_REV2:
1677 *value = sli_get_hw_revision(&hw->sli, 1);
1678 break;
1679 case OCS_HW_HW_REV3:
1680 *value = sli_get_hw_revision(&hw->sli, 2);
1681 break;
1682 case OCS_HW_LINKCFG:
1683 *value = hw->linkcfg;
1684 break;
1685 case OCS_HW_ETH_LICENSE:
1686 *value = hw->eth_license;
1687 break;
1688 case OCS_HW_LINK_MODULE_TYPE:
1689 *value = sli_get_link_module_type(&hw->sli);
1690 break;
1691 case OCS_HW_NUM_CHUTES:
1692 *value = ocs_hw_get_num_chutes(hw);
1693 break;
1694 case OCS_HW_DISABLE_AR_TGT_DIF:
1695 *value = hw->workaround.disable_ar_tgt_dif;
1696 break;
1697 case OCS_HW_EMULATE_I_ONLY_AAB:
1698 *value = hw->config.i_only_aab;
1699 break;
1700 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1701 *value = hw->config.emulate_tgt_wqe_timeout;
1702 break;
1703 case OCS_HW_VPD_LEN:
1704 *value = sli_get_vpd_len(&hw->sli);
1705 break;
1706 case OCS_HW_SGL_CHAINING_CAPABLE:
1707 *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
1708 break;
1709 case OCS_HW_SGL_CHAINING_ALLOWED:
1710 /*
1711 * SGL Chaining is allowed in the following cases:
1712 * 1. Lancer with host SGL Lists
1713 * 2. Skyhawk with pre-registered SGL Lists
1714 */
1715 *value = FALSE;
1716 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1717 !sli_get_sgl_preregister(&hw->sli) &&
1718 SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
1719 *value = TRUE;
1720 }
1721
1722 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1723 sli_get_sgl_preregister(&hw->sli) &&
1724 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
1725 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
1726 *value = TRUE;
1727 }
1728 break;
1729 case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
1730 /* Only lancer supports host allocated SGL Chaining buffers. */
1731 *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1732 (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)));
1733 break;
1734 case OCS_HW_SEND_FRAME_CAPABLE:
1735 if (hw->workaround.ignore_send_frame) {
1736 *value = 0;
1737 } else {
1738 /* Only lancer is capable */
1739 *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
1740 }
1741 break;
1742 case OCS_HW_RQ_SELECTION_POLICY:
1743 *value = hw->config.rq_selection_policy;
1744 break;
1745 case OCS_HW_RR_QUANTA:
1746 *value = hw->config.rr_quanta;
1747 break;
1748 case OCS_HW_MAX_VPORTS:
1749 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
1750 break;
1751 default:
1752 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1753 rc = OCS_HW_RTN_ERROR;
1754 }
1755
1756 return rc;
1757 }
1758
1759 void *
1760 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
1761 {
1762 void *rc = NULL;
1763
1764 switch (prop) {
1765 case OCS_HW_WWN_NODE:
1766 rc = sli_get_wwn_node(&hw->sli);
1767 break;
1768 case OCS_HW_WWN_PORT:
1769 rc = sli_get_wwn_port(&hw->sli);
1770 break;
1771 case OCS_HW_VPD:
1772 /* make sure VPD length is non-zero */
1773 if (sli_get_vpd_len(&hw->sli)) {
1774 rc = sli_get_vpd(&hw->sli);
1775 }
1776 break;
1777 case OCS_HW_FW_REV:
1778 rc = sli_get_fw_name(&hw->sli, 0);
1779 break;
1780 case OCS_HW_FW_REV2:
1781 rc = sli_get_fw_name(&hw->sli, 1);
1782 break;
1783 case OCS_HW_IPL:
1784 rc = sli_get_ipl_name(&hw->sli);
1785 break;
1786 case OCS_HW_PORTNUM:
1787 rc = sli_get_portnum(&hw->sli);
1788 break;
1789 case OCS_HW_BIOS_VERSION_STRING:
1790 rc = sli_get_bios_version_string(&hw->sli);
1791 break;
1792 default:
1793 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1794 }
1795
1796 return rc;
1797 }
1798
1799 ocs_hw_rtn_e
1800 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
1801 {
1802 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1803
1804 switch (prop) {
1805 case OCS_HW_N_IO:
1806 if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
1807 value == 0) {
1808 ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
1809 value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
1810 rc = OCS_HW_RTN_ERROR;
1811 } else {
1812 hw->config.n_io = value;
1813 }
1814 break;
1815 case OCS_HW_N_SGL:
1816 value += SLI4_SGE_MAX_RESERVED;
1817 if (value > sli_get_max_sgl(&hw->sli)) {
1818 ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
1819 value, sli_get_max_sgl(&hw->sli));
1820 rc = OCS_HW_RTN_ERROR;
1821 } else {
1822 hw->config.n_sgl = value;
1823 }
1824 break;
1825 case OCS_HW_TOPOLOGY:
1826 if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
1827 (value != OCS_HW_TOPOLOGY_AUTO)) {
1828 ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
1829 value, sli_get_medium(&hw->sli));
1830 rc = OCS_HW_RTN_ERROR;
1831 break;
1832 }
1833
1834 switch (value) {
1835 case OCS_HW_TOPOLOGY_AUTO:
1836 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
1837 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
1838 } else {
1839 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
1840 }
1841 break;
1842 case OCS_HW_TOPOLOGY_NPORT:
1843 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
1844 break;
1845 case OCS_HW_TOPOLOGY_LOOP:
1846 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
1847 break;
1848 default:
1849 ocs_log_test(hw->os, "unsupported topology %#x\n", value);
1850 rc = OCS_HW_RTN_ERROR;
1851 }
1852 hw->config.topology = value;
1853 break;
1854 case OCS_HW_LINK_SPEED:
1855 if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
1856 switch (value) {
1857 case 0: /* Auto-speed negotiation */
1858 case 10000: /* FCoE speed */
1859 hw->config.speed = FC_LINK_SPEED_10G;
1860 break;
1861 default:
1862 ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
1863 value, sli_get_medium(&hw->sli));
1864 rc = OCS_HW_RTN_ERROR;
1865 }
1866 break;
1867 }
1868
1869 switch (value) {
1870 case 0: /* Auto-speed negotiation */
1871 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
1872 break;
1873 case 2000: /* FC speeds */
1874 hw->config.speed = FC_LINK_SPEED_2G;
1875 break;
1876 case 4000:
1877 hw->config.speed = FC_LINK_SPEED_4G;
1878 break;
1879 case 8000:
1880 hw->config.speed = FC_LINK_SPEED_8G;
1881 break;
1882 case 16000:
1883 hw->config.speed = FC_LINK_SPEED_16G;
1884 break;
1885 case 32000:
1886 hw->config.speed = FC_LINK_SPEED_32G;
1887 break;
1888 default:
1889 ocs_log_test(hw->os, "unsupported speed %d\n", value);
1890 rc = OCS_HW_RTN_ERROR;
1891 }
1892 break;
1893 case OCS_HW_DIF_SEED:
1894 /* Set the DIF seed - only for lancer right now */
1895 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
1896 ocs_log_test(hw->os, "DIF seed not supported for this device\n");
1897 rc = OCS_HW_RTN_ERROR;
1898 } else {
1899 hw->config.dif_seed = value;
1900 }
1901 break;
1902 case OCS_HW_DIF_MODE:
1903 switch (value) {
1904 case OCS_HW_DIF_MODE_INLINE:
1905 /*
1906 * Make sure we support inline DIF.
1907 *
1908 * Note: Having both bits clear means that we have old
1909 * FW that doesn't set the bits.
1910 */
1911 if (sli_is_dif_inline_capable(&hw->sli)) {
1912 hw->config.dif_mode = value;
1913 } else {
1914 ocs_log_test(hw->os, "chip does not support DIF inline\n");
1915 rc = OCS_HW_RTN_ERROR;
1916 }
1917 break;
1918 case OCS_HW_DIF_MODE_SEPARATE:
1919 /* Make sure we support DIF separates. */
1920 if (sli_is_dif_separate_capable(&hw->sli)) {
1921 hw->config.dif_mode = value;
1922 } else {
1923 ocs_log_test(hw->os, "chip does not support DIF separate\n");
1924 rc = OCS_HW_RTN_ERROR;
1925 }
1926 }
1927 break;
1928 case OCS_HW_RQ_PROCESS_LIMIT: {
1929 hw_rq_t *rq;
1930 uint32_t i;
1931
1932 /* For each hw_rq object, set its parent CQ limit value */
1933 for (i = 0; i < hw->hw_rq_count; i++) {
1934 rq = hw->hw_rq[i];
1935 hw->cq[rq->cq->instance].proc_limit = value;
1936 }
1937 break;
1938 }
1939 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1940 hw->config.rq_default_buffer_size = value;
1941 break;
1942 case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1943 hw->config.auto_xfer_rdy_xri_cnt = value;
1944 break;
1945 case OCS_HW_AUTO_XFER_RDY_SIZE:
1946 hw->config.auto_xfer_rdy_size = value;
1947 break;
1948 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1949 switch (value) {
1950 case 512:
1951 hw->config.auto_xfer_rdy_blk_size_chip = 0;
1952 break;
1953 case 1024:
1954 hw->config.auto_xfer_rdy_blk_size_chip = 1;
1955 break;
1956 case 2048:
1957 hw->config.auto_xfer_rdy_blk_size_chip = 2;
1958 break;
1959 case 4096:
1960 hw->config.auto_xfer_rdy_blk_size_chip = 3;
1961 break;
1962 case 520:
1963 hw->config.auto_xfer_rdy_blk_size_chip = 4;
1964 break;
1965 default:
1966 ocs_log_err(hw->os, "Invalid block size %d\n",
1967 value);
1968 rc = OCS_HW_RTN_ERROR;
1969 }
1970 break;
1971 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1972 hw->config.auto_xfer_rdy_t10_enable = value;
1973 break;
1974 case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1975 hw->config.auto_xfer_rdy_p_type = value;
1976 break;
1977 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1978 hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
1979 break;
1980 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1981 hw->config.auto_xfer_rdy_app_tag_valid = value;
1982 break;
1983 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1984 hw->config.auto_xfer_rdy_app_tag_value = value;
1985 break;
1986 case OCS_ESOC:
1987 hw->config.esoc = value;
1988 break;
1989 case OCS_HW_HIGH_LOGIN_MODE:
1990 rc = sli_set_hlm(&hw->sli, value);
1991 break;
1992 case OCS_HW_PREREGISTER_SGL:
1993 rc = sli_set_sgl_preregister(&hw->sli, value);
1994 break;
1995 case OCS_HW_ETH_LICENSE:
1996 hw->eth_license = value;
1997 break;
1998 case OCS_HW_EMULATE_I_ONLY_AAB:
1999 hw->config.i_only_aab = value;
2000 break;
2001 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
2002 hw->config.emulate_tgt_wqe_timeout = value;
2003 break;
2004 case OCS_HW_BOUNCE:
2005 hw->config.bounce = value;
2006 break;
2007 case OCS_HW_RQ_SELECTION_POLICY:
2008 hw->config.rq_selection_policy = value;
2009 break;
2010 case OCS_HW_RR_QUANTA:
2011 hw->config.rr_quanta = value;
2012 break;
2013 default:
2014 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2015 rc = OCS_HW_RTN_ERROR;
2016 }
2017
2018 return rc;
2019 }
2020
2021 ocs_hw_rtn_e
2022 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
2023 {
2024 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2025
2026 switch (prop) {
2027 case OCS_HW_WAR_VERSION:
2028 hw->hw_war_version = value;
2029 break;
2030 case OCS_HW_FILTER_DEF: {
2031 char *p = value;
2032 uint32_t idx = 0;
2033
2034 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
2035 hw->config.filter_def[idx] = 0;
2036 }
2037
2038 for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
2039 hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
2040 p = ocs_strchr(p, ',');
2041 if (p != NULL) {
2042 p++;
2043 }
2044 }
2045
2046 break;
2047 }
2048 default:
2049 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2050 rc = OCS_HW_RTN_ERROR;
2051 break;
2052 }
2053 return rc;
2054 }
2055 /**
2056 * @ingroup interrupt
2057 * @brief Check for the events associated with the interrupt vector.
2058 *
2059 * @param hw Hardware context.
2060 * @param vector Zero-based interrupt vector number.
2061 *
2062 * @return Returns 0 on success, or a non-zero value on failure.
2063 */
2064 int32_t
2065 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
2066 {
2067 int32_t rc = 0;
2068
2069 if (!hw) {
2070 ocs_log_err(NULL, "HW context NULL?!?\n");
2071 return -1;
2072 }
2073
2074 if (vector > hw->eq_count) {
2075 ocs_log_err(hw->os, "vector %d. max %d\n",
2076 vector, hw->eq_count);
2077 return -1;
2078 }
2079
2080 /*
2081 * The caller should disable interrupts if they wish to prevent us
2082 * from processing during a shutdown. The following states are defined:
2083 * OCS_HW_STATE_UNINITIALIZED - No queues allocated
2084 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2085 * queues are cleared.
2086 * OCS_HW_STATE_ACTIVE - Chip and queues are operational
2087 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2088 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2089 * completions.
2090 */
2091 if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
2092 rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
2093
2094 /* Re-arm queue if there are no entries */
2095 if (rc != 0) {
2096 sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
2097 }
2098 }
2099 return rc;
2100 }
2101
2102 void
2103 ocs_hw_unsol_process_bounce(void *arg)
2104 {
2105 ocs_hw_sequence_t *seq = arg;
2106 ocs_hw_t *hw = seq->hw;
2107
2108 ocs_hw_assert(hw != NULL);
2109 ocs_hw_assert(hw->callback.unsolicited != NULL);
2110
2111 hw->callback.unsolicited(hw->args.unsolicited, seq);
2112 }
2113
2114 int32_t
2115 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
2116 {
2117 hw_eq_t *eq;
2118 int32_t rc = 0;
2119
2120 CPUTRACE("");
2121
2122 /*
2123 * The caller should disable interrupts if they wish to prevent us
2124 * from processing during a shutdown. The following states are defined:
2125 * OCS_HW_STATE_UNINITIALIZED - No queues allocated
2126 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2127 * queues are cleared.
2128 * OCS_HW_STATE_ACTIVE - Chip and queues are operational
2129 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2130 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2131 * completions.
2132 */
2133 if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
2134 return 0;
2135 }
2136
2137 /* Get pointer to hw_eq_t */
2138 eq = hw->hw_eq[vector];
2139
2140 OCS_STAT(eq->use_count++);
2141
2142 rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
2143
2144 return rc;
2145 }
2146
2147 /**
2148 * @ingroup interrupt
2149 * @brief Process events associated with an EQ.
2150 *
2151 * @par Description
2152 * Loop termination:
2153 * @n @n Without a mechanism to terminate the completion processing loop, it
2154 * is possible under some workload conditions for the loop to never terminate
2155 * (or at least take longer than the OS is happy to have an interrupt handler
2156 * or kernel thread context hold a CPU without yielding).
2157 * @n @n The approach taken here is to periodically check how much time
2158 * we have been in this
2159 * processing loop, and if we exceed a predetermined time (multiple seconds), the
2160 * loop is terminated, and ocs_hw_process() returns.
2161 *
2162 * @param hw Hardware context.
2163 * @param eq Pointer to HW EQ object.
2164 * @param max_isr_time_msec Maximum time in msec to stay in this function.
2165 *
2166 * @return Returns 0 on success, or a non-zero value on failure.
2167 */
2168 int32_t
2169 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
2170 {
2171 uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 };
2172 uint32_t done = FALSE;
2173 uint32_t tcheck_count;
2174 time_t tstart;
2175 time_t telapsed;
2176
2177 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2178 tstart = ocs_msectime();
2179
2180 CPUTRACE("");
2181
2182 while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
2183 uint16_t cq_id = 0;
2184 int32_t rc;
2185
2186 rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2187 if (unlikely(rc)) {
2188 if (rc > 0) {
2189 uint32_t i;
2190
2191 /*
2192 * Received a sentinel EQE indicating the EQ is full.
2193 * Process all CQs
2194 */
2195 for (i = 0; i < hw->cq_count; i++) {
2196 ocs_hw_cq_process(hw, hw->hw_cq[i]);
2197 }
2198 continue;
2199 } else {
2200 return rc;
2201 }
2202 } else {
2203 int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
2204 if (likely(index >= 0)) {
2205 ocs_hw_cq_process(hw, hw->hw_cq[index]);
2206 } else {
2207 ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2208 }
2209 }
2210
2211 if (eq->queue->n_posted > (eq->queue->posted_limit)) {
2212 sli_queue_arm(&hw->sli, eq->queue, FALSE);
2213 }
2214
2215 if (tcheck_count && (--tcheck_count == 0)) {
2216 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2217 telapsed = ocs_msectime() - tstart;
2218 if (telapsed >= max_isr_time_msec) {
2219 done = TRUE;
2220 }
2221 }
2222 }
2223 sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
2224
2225 return 0;
2226 }
2227
2228 /**
2229 * @brief Submit queued (pending) mbx commands.
2230 *
2231 * @par Description
2232 * Submit queued mailbox commands.
2233 * --- Assumes that hw->cmd_lock is held ---
2234 *
2235 * @param hw Hardware context.
2236 *
2237 * @return Returns 0 on success, or a negative error code value on failure.
2238 */
2239 static int32_t
2240 ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
2241 {
2242 ocs_command_ctx_t *ctx;
2243 int32_t rc = 0;
2244
2245 /* Assumes lock held */
2246
2247 /* Only submit MQE if there's room */
2248 while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
2249 ctx = ocs_list_remove_head(&hw->cmd_pending);
2250 if (ctx == NULL) {
2251 break;
2252 }
2253 ocs_list_add_tail(&hw->cmd_head, ctx);
2254 hw->cmd_head_count++;
2255 if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
2256 ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
2257 rc = -1;
2258 break;
2259 }
2260 }
2261 return rc;
2262 }
2263
2264 /**
2265 * @ingroup io
2266 * @brief Issue a SLI command.
2267 *
2268 * @par Description
2269 * Send a mailbox command to the hardware, and either wait for a completion
2270 * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
2271 *
2272 * @param hw Hardware context.
2273 * @param cmd Buffer containing a formatted command and results.
2274 * @param opts Command options:
2275 * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
2276 * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
2277 * @param cb Function callback used for asynchronous mode. May be NULL.
2278 * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
2279 * @n @n @b Note: If the
2280 * callback function pointer is NULL, the results of the command are silently
2281 * discarded, allowing this pointer to exist solely on the stack.
2282 * @param arg Argument passed to an asynchronous callback.
2283 *
2284 * @return Returns 0 on success, or a non-zero value on failure.
2285 */
2286 ocs_hw_rtn_e
2287 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
2288 {
2289 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2290
2291 /*
2292 * If the chip is in an error state (UE'd) then reject this mailbox
2293 * command.
2294 */
2295 if (sli_fw_error_status(&hw->sli) > 0) {
2296 uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
2297 uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
2298 if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
2299 hw->expiration_logged = 1;
2300 ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
2301 hw->watchdog_timeout);
2302 }
2303 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2304 ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
2305 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
2306 err1, err2);
2307
2308 return OCS_HW_RTN_ERROR;
2309 }
2310
2311 if (OCS_CMD_POLL == opts) {
2312 ocs_lock(&hw->cmd_lock);
2313 if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
2314 /*
2315 * Can't issue Boot-strap mailbox command with other
2316 * mail-queue commands pending as this interaction is
2317 * undefined
2318 */
2319 rc = OCS_HW_RTN_ERROR;
2320 } else {
2321 void *bmbx = hw->sli.bmbx.virt;
2322
2323 ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
2324 ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
2325
2326 if (sli_bmbx_command(&hw->sli) == 0) {
2327 rc = OCS_HW_RTN_SUCCESS;
2328 ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
2329 }
2330 }
2331 ocs_unlock(&hw->cmd_lock);
2332 } else if (OCS_CMD_NOWAIT == opts) {
2333 ocs_command_ctx_t *ctx = NULL;
2334
2335 ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
2336 if (!ctx) {
2337 ocs_log_err(hw->os, "can't allocate command context\n");
2338 return OCS_HW_RTN_NO_RESOURCES;
2339 }
2340
2341 if (hw->state != OCS_HW_STATE_ACTIVE) {
2342 ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
2343 ocs_free(hw->os, ctx, sizeof(*ctx));
2344 return OCS_HW_RTN_ERROR;
2345 }
2346
2347 if (cb) {
2348 ctx->cb = cb;
2349 ctx->arg = arg;
2350 }
2351 ctx->buf = cmd;
2352 ctx->ctx = hw;
2353
2354 ocs_lock(&hw->cmd_lock);
2355
2356 /* Add to pending list */
2357 ocs_list_add_tail(&hw->cmd_pending, ctx);
2358
2359 /* Submit as much of the pending list as we can */
2360 if (ocs_hw_cmd_submit_pending(hw) == 0) {
2361 rc = OCS_HW_RTN_SUCCESS;
2362 }
2363
2364 ocs_unlock(&hw->cmd_lock);
2365 }
2366
2367 return rc;
2368 }
2369
2370 /**
2371 * @ingroup devInitShutdown
2372 * @brief Register a callback for the given event.
2373 *
2374 * @param hw Hardware context.
2375 * @param which Event of interest.
2376 * @param func Function to call when the event occurs.
2377 * @param arg Argument passed to the callback function.
2378 *
2379 * @return Returns 0 on success, or a non-zero value on failure.
2380 */
2381 ocs_hw_rtn_e
2382 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
2383 {
2384
2385 if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
2386 ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
2387 hw, which, func);
2388 return OCS_HW_RTN_ERROR;
2389 }
2390
2391 switch (which) {
2392 case OCS_HW_CB_DOMAIN:
2393 hw->callback.domain = func;
2394 hw->args.domain = arg;
2395 break;
2396 case OCS_HW_CB_PORT:
2397 hw->callback.port = func;
2398 hw->args.port = arg;
2399 break;
2400 case OCS_HW_CB_UNSOLICITED:
2401 hw->callback.unsolicited = func;
2402 hw->args.unsolicited = arg;
2403 break;
2404 case OCS_HW_CB_REMOTE_NODE:
2405 hw->callback.rnode = func;
2406 hw->args.rnode = arg;
2407 break;
2408 case OCS_HW_CB_BOUNCE:
2409 hw->callback.bounce = func;
2410 hw->args.bounce = arg;
2411 break;
2412 default:
2413 ocs_log_test(hw->os, "unknown callback %#x\n", which);
2414 return OCS_HW_RTN_ERROR;
2415 }
2416
2417 return OCS_HW_RTN_SUCCESS;
2418 }
2419
2420 /**
2421 * @ingroup port
2422 * @brief Allocate a port object.
2423 *
2424 * @par Description
2425 * This function allocates a VPI object for the port and stores it in the
2426 * indicator field of the port object.
2427 *
2428 * @param hw Hardware context.
2429 * @param sport SLI port object used to connect to the domain.
2430 * @param domain Domain object associated with this port (may be NULL).
2431 * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
2432 *
2433 * @return Returns 0 on success, or a non-zero value on failure.
2434 */
2435 ocs_hw_rtn_e
2436 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
2437 uint8_t *wwpn)
2438 {
2439 uint8_t *cmd = NULL;
2440 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2441 uint32_t index;
2442
2443 sport->indicator = UINT32_MAX;
2444 sport->hw = hw;
2445 sport->ctx.app = sport;
2446 sport->sm_free_req_pending = 0;
2447
2448 /*
2449 * Check if the chip is in an error state (UE'd) before proceeding.
2450 */
2451 if (sli_fw_error_status(&hw->sli) > 0) {
2452 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2453 return OCS_HW_RTN_ERROR;
2454 }
2455
2456 if (wwpn) {
2457 ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
2458 }
2459
2460 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
2461 ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
2462 return OCS_HW_RTN_ERROR;
2463 }
2464
2465 if (domain != NULL) {
2466 ocs_sm_function_t next = NULL;
2467
2468 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2469 if (!cmd) {
2470 ocs_log_err(hw->os, "command memory allocation failed\n");
2471 rc = OCS_HW_RTN_NO_MEMORY;
2472 goto ocs_hw_port_alloc_out;
2473 }
2474
2475 /* If the WWPN is NULL, fetch the default WWPN and WWNN before
2476 * initializing the VPI
2477 */
2478 if (!wwpn) {
2479 next = __ocs_hw_port_alloc_read_sparm64;
2480 } else {
2481 next = __ocs_hw_port_alloc_init_vpi;
2482 }
2483
2484 ocs_sm_transition(&sport->ctx, next, cmd);
2485 } else if (!wwpn) {
2486 /* This is the convention for the HW, not SLI */
2487 ocs_log_test(hw->os, "need WWN for physical port\n");
2488 rc = OCS_HW_RTN_ERROR;
2489 } else {
2490 /* domain NULL and wwpn non-NULL */
2491 ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
2492 }
2493
2494 ocs_hw_port_alloc_out:
2495 if (rc != OCS_HW_RTN_SUCCESS) {
2496 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2497
2498 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
2499 }
2500
2501 return rc;
2502 }
2503
2504 /**
2505 * @ingroup port
2506 * @brief Attach a physical/virtual SLI port to a domain.
2507 *
2508 * @par Description
2509 * This function registers a previously-allocated VPI with the
2510 * device.
2511 *
2512 * @param hw Hardware context.
2513 * @param sport Pointer to the SLI port object.
2514 * @param fc_id Fibre Channel ID to associate with this port.
2515 *
2516 * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
2517 */
2518 ocs_hw_rtn_e
2519 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
2520 {
2521 uint8_t *buf = NULL;
2522 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2523
2524 if (!hw || !sport) {
2525 ocs_log_err(hw ? hw->os : NULL,
2526 "bad parameter(s) hw=%p sport=%p\n", hw,
2527 sport);
2528 return OCS_HW_RTN_ERROR;
2529 }
2530
2531 /*
2532 * Check if the chip is in an error state (UE'd) before proceeding.
2533 */
2534 if (sli_fw_error_status(&hw->sli) > 0) {
2535 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2536 return OCS_HW_RTN_ERROR;
2537 }
2538
2539 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2540 if (!buf) {
2541 ocs_log_err(hw->os, "no buffer for command\n");
2542 return OCS_HW_RTN_NO_MEMORY;
2543 }
2544
2545 sport->fc_id = fc_id;
2546 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
2547 return rc;
2548 }
2549
2550 /**
2551 * @brief Called when the port control command completes.
2552 *
2553 * @par Description
2554 * We only need to free the mailbox command buffer.
2555 *
2556 * @param hw Hardware context.
2557 * @param status Status field from the mbox completion.
2558 * @param mqe Mailbox response structure.
2559 * @param arg Pointer to a callback function that signals the caller that the command is done.
2560 *
2561 * @return Returns 0.
2562 */
2563 static int32_t
2564 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
2565 {
2566 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2567 return 0;
2568 }
2569
2570 /**
2571 * @ingroup port
2572 * @brief Control a port (initialize, shutdown, or set link configuration).
2573 *
2574 * @par Description
2575 * This function controls a port depending on the @c ctrl parameter:
2576 * - @b OCS_HW_PORT_INIT -
2577 * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
2578 * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
2579 * .
2580 * - @b OCS_HW_PORT_SHUTDOWN -
2581 * Issues the DOWN_LINK command for the specified port.
2582 * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
2583 * .
2584 * - @b OCS_HW_PORT_SET_LINK_CONFIG -
2585 * Sets the link configuration.
2586 *
2587 * @param hw Hardware context.
2588 * @param ctrl Specifies the operation:
2589 * - OCS_HW_PORT_INIT
2590 * - OCS_HW_PORT_SHUTDOWN
2591 * - OCS_HW_PORT_SET_LINK_CONFIG
2592 *
2593 * @param value Operation-specific value.
2594 * - OCS_HW_PORT_INIT - Selective reset AL_PA
2595 * - OCS_HW_PORT_SHUTDOWN - N/A
2596 * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
2597 *
2598 * @param cb Callback function to invoke the following operation.
2599 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2600 * are handled by the OCS_HW_CB_DOMAIN callbacks).
2601 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2602 * completes.
2603 *
2604 * @param arg Callback argument invoked after the command completes.
2605 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2606 * are handled by the OCS_HW_CB_DOMAIN callbacks).
2607 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2608 * completes.
2609 *
2610 * @return Returns 0 on success, or a non-zero value on failure.
2611 */
2612 ocs_hw_rtn_e
2613 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
2614 {
2615 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2616
2617 switch (ctrl) {
2618 case OCS_HW_PORT_INIT:
2619 {
2620 uint8_t *init_link;
2621 uint32_t speed = 0;
2622 uint8_t reset_alpa = 0;
2623
2624 if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
2625 uint8_t *cfg_link;
2626
2627 cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2628 if (cfg_link == NULL) {
2629 ocs_log_err(hw->os, "no buffer for command\n");
2630 return OCS_HW_RTN_NO_MEMORY;
2631 }
2632
2633 if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
2634 rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
2635 ocs_hw_cb_port_control, NULL);
2636 }
2637
2638 if (rc != OCS_HW_RTN_SUCCESS) {
2639 ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
2640 ocs_log_err(hw->os, "CONFIG_LINK failed\n");
2641 break;
2642 }
2643 speed = hw->config.speed;
2644 reset_alpa = (uint8_t)(value & 0xff);
2645 } else {
2646 speed = FC_LINK_SPEED_10G;
2647 }
2648
2649 /*
2650 * Bring link up, unless FW version is not supported
2651 */
2652 if (hw->workaround.fw_version_too_low) {
2653 if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
2654 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
2655 OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
2656 } else {
2657 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
2658 OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
2659 }
2660
2661 return OCS_HW_RTN_ERROR;
2662 }
2663
2664 rc = OCS_HW_RTN_ERROR;
2665
2666 /* Allocate a new buffer for the init_link command */
2667 init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2668 if (init_link == NULL) {
2669 ocs_log_err(hw->os, "no buffer for command\n");
2670 return OCS_HW_RTN_NO_MEMORY;
2671 }
2672
2673 if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
2674 rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
2675 ocs_hw_cb_port_control, NULL);
2676 }
2677 /* Free buffer on error, since no callback is coming */
2678 if (rc != OCS_HW_RTN_SUCCESS) {
2679 ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
2680 ocs_log_err(hw->os, "INIT_LINK failed\n");
2681 }
2682 break;
2683 }
2684 case OCS_HW_PORT_SHUTDOWN:
2685 {
2686 uint8_t *down_link;
2687
2688 down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2689 if (down_link == NULL) {
2690 ocs_log_err(hw->os, "no buffer for command\n");
2691 return OCS_HW_RTN_NO_MEMORY;
2692 }
2693 if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
2694 rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
2695 ocs_hw_cb_port_control, NULL);
2696 }
2697 /* Free buffer on error, since no callback is coming */
2698 if (rc != OCS_HW_RTN_SUCCESS) {
2699 ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
2700 ocs_log_err(hw->os, "DOWN_LINK failed\n");
2701 }
2702 break;
2703 }
2704 case OCS_HW_PORT_SET_LINK_CONFIG:
2705 rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
2706 break;
2707 default:
2708 ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
2709 break;
2710 }
2711
2712 return rc;
2713 }
2714
2715 /**
2716 * @ingroup port
2717 * @brief Free port resources.
2718 *
2719 * @par Description
2720 * Issue the UNREG_VPI command to free the assigned VPI context.
2721 *
2722 * @param hw Hardware context.
2723 * @param sport SLI port object used to connect to the domain.
2724 *
2725 * @return Returns 0 on success, or a non-zero value on failure.
2726 */
2727 ocs_hw_rtn_e
2728 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
2729 {
2730 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2731
2732 if (!hw || !sport) {
2733 ocs_log_err(hw ? hw->os : NULL,
2734 "bad parameter(s) hw=%p sport=%p\n", hw,
2735 sport);
2736 return OCS_HW_RTN_ERROR;
2737 }
2738
2739 /*
2740 * Check if the chip is in an error state (UE'd) before proceeding.
2741 */
2742 if (sli_fw_error_status(&hw->sli) > 0) {
2743 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2744 return OCS_HW_RTN_ERROR;
2745 }
2746
2747 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
2748 return rc;
2749 }
2750
2751 /**
2752 * @ingroup domain
2753 * @brief Allocate a fabric domain object.
2754 *
2755 * @par Description
2756 * This function starts a series of commands needed to connect to the domain, including
2757 * - REG_FCFI
2758 * - INIT_VFI
2759 * - READ_SPARMS
2760 * .
2761 * @b Note: Not all SLI interface types use all of the above commands.
2762 * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
2763 * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
2764 *
2765 * @param hw Hardware context.
2766 * @param domain Pointer to the domain object.
2767 * @param fcf FCF index.
2768 * @param vlan VLAN ID.
2769 *
2770 * @return Returns 0 on success, or a non-zero value on failure.
2771 */
2772 ocs_hw_rtn_e
2773 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
2774 {
2775 uint8_t *cmd = NULL;
2776 uint32_t index;
2777
2778 if (!hw || !domain || !domain->sport) {
2779 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
2780 hw, domain, domain ? domain->sport : NULL);
2781 return OCS_HW_RTN_ERROR;
2782 }
2783
2784 /*
2785 * Check if the chip is in an error state (UE'd) before proceeding.
2786 */
2787 if (sli_fw_error_status(&hw->sli) > 0) {
2788 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2789 return OCS_HW_RTN_ERROR;
2790 }
2791
2792 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2793 if (!cmd) {
2794 ocs_log_err(hw->os, "command memory allocation failed\n");
2795 return OCS_HW_RTN_NO_MEMORY;
2796 }
2797
2798 domain->dma = hw->domain_dmem;
2799
2800 domain->hw = hw;
2801 domain->sm.app = domain;
2802 domain->fcf = fcf;
2803 domain->fcf_indicator = UINT32_MAX;
2804 domain->vlan_id = vlan;
2805 domain->indicator = UINT32_MAX;
2806
2807 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
2808 ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
2809
2810 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2811
2812 return OCS_HW_RTN_ERROR;
2813 }
2814
2815 ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
2816 return OCS_HW_RTN_SUCCESS;
2817 }
2818
2819 /**
2820 * @ingroup domain
2821 * @brief Attach a SLI port to a domain.
2822 *
2823 * @param hw Hardware context.
2824 * @param domain Pointer to the domain object.
2825 * @param fc_id Fibre Channel ID to associate with this port.
2826 *
2827 * @return Returns 0 on success, or a non-zero value on failure.
2828 */
2829 ocs_hw_rtn_e
2830 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
2831 {
2832 uint8_t *buf = NULL;
2833 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2834
2835 if (!hw || !domain) {
2836 ocs_log_err(hw ? hw->os : NULL,
2837 "bad parameter(s) hw=%p domain=%p\n",
2838 hw, domain);
2839 return OCS_HW_RTN_ERROR;
2840 }
2841
2842 /*
2843 * Check if the chip is in an error state (UE'd) before proceeding.
2844 */
2845 if (sli_fw_error_status(&hw->sli) > 0) {
2846 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2847 return OCS_HW_RTN_ERROR;
2848 }
2849
2850 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2851 if (!buf) {
2852 ocs_log_err(hw->os, "no buffer for command\n");
2853 return OCS_HW_RTN_NO_MEMORY;
2854 }
2855
2856 domain->sport->fc_id = fc_id;
2857 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
2858 return rc;
2859 }
2860
2861 /**
2862 * @ingroup domain
2863 * @brief Free a fabric domain object.
2864 *
2865 * @par Description
2866 * Free both the driver and SLI port resources associated with the domain.
2867 *
2868 * @param hw Hardware context.
2869 * @param domain Pointer to the domain object.
2870 *
2871 * @return Returns 0 on success, or a non-zero value on failure.
2872 */
2873 ocs_hw_rtn_e
2874 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
2875 {
2876 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2877
2878 if (!hw || !domain) {
2879 ocs_log_err(hw ? hw->os : NULL,
2880 "bad parameter(s) hw=%p domain=%p\n",
2881 hw, domain);
2882 return OCS_HW_RTN_ERROR;
2883 }
2884
2885 /*
2886 * Check if the chip is in an error state (UE'd) before proceeding.
2887 */
2888 if (sli_fw_error_status(&hw->sli) > 0) {
2889 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2890 return OCS_HW_RTN_ERROR;
2891 }
2892
2893 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
2894 return rc;
2895 }
2896
2897 /**
2898 * @ingroup domain
2899 * @brief Free a fabric domain object.
2900 *
2901 * @par Description
2902 * Free the driver resources associated with the domain. The difference between
2903 * this call and ocs_hw_domain_free() is that this call assumes resources no longer
2904 * exist on the SLI port, due to a reset or after some error conditions.
2905 *
2906 * @param hw Hardware context.
2907 * @param domain Pointer to the domain object.
2908 *
2909 * @return Returns 0 on success, or a non-zero value on failure.
2910 */
2911 ocs_hw_rtn_e
2912 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
2913 {
2914 if (!hw || !domain) {
2915 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
2916 return OCS_HW_RTN_ERROR;
2917 }
2918
2919 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
2920
2921 return OCS_HW_RTN_SUCCESS;
2922 }
2923
2924 /**
2925 * @ingroup node
2926 * @brief Allocate a remote node object.
2927 *
2928 * @param hw Hardware context.
2929 * @param rnode Allocated remote node object to initialize.
2930 * @param fc_addr FC address of the remote node.
2931 * @param sport SLI port used to connect to remote node.
2932 *
2933 * @return Returns 0 on success, or a non-zero value on failure.
2934 */
2935 ocs_hw_rtn_e
2936 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
2937 ocs_sli_port_t *sport)
2938 {
2939 /* Check for invalid indicator */
2940 if (UINT32_MAX != rnode->indicator) {
2941 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
2942 fc_addr, rnode->indicator);
2943 return OCS_HW_RTN_ERROR;
2944 }
2945
2946 /*
2947 * Check if the chip is in an error state (UE'd) before proceeding.
2948 */
2949 if (sli_fw_error_status(&hw->sli) > 0) {
2950 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2951 return OCS_HW_RTN_ERROR;
2952 }
2953
2954 /* NULL SLI port indicates an unallocated remote node */
2955 rnode->sport = NULL;
2956
2957 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
2958 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
2959 fc_addr);
2960 return OCS_HW_RTN_ERROR;
2961 }
2962
2963 rnode->fc_id = fc_addr;
2964 rnode->sport = sport;
2965
2966 return OCS_HW_RTN_SUCCESS;
2967 }
2968
2969 /**
2970 * @ingroup node
2971 * @brief Update a remote node object with the remote port's service parameters.
2972 *
2973 * @param hw Hardware context.
2974 * @param rnode Allocated remote node object to initialize.
2975 * @param sparms DMA buffer containing the remote port's service parameters.
2976 *
2977 * @return Returns 0 on success, or a non-zero value on failure.
2978 */
2979 ocs_hw_rtn_e
2980 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
2981 {
2982 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2983 uint8_t *buf = NULL;
2984 uint32_t count = 0;
2985
2986 if (!hw || !rnode || !sparms) {
2987 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
2988 hw, rnode, sparms);
2989 return OCS_HW_RTN_ERROR;
2990 }
2991
2992 /*
2993 * Check if the chip is in an error state (UE'd) before proceeding.
2994 */
2995 if (sli_fw_error_status(&hw->sli) > 0) {
2996 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2997 return OCS_HW_RTN_ERROR;
2998 }
2999
3000 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3001 if (!buf) {
3002 ocs_log_err(hw->os, "no buffer for command\n");
3003 return OCS_HW_RTN_NO_MEMORY;
3004 }
3005
3006 /*
3007 * If the attach count is non-zero, this RPI has already been registered.
3008 * Otherwise, register the RPI
3009 */
3010 if (rnode->index == UINT32_MAX) {
3011 ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
3012 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3013 return OCS_HW_RTN_ERROR;
3014 }
3015 count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3016 if (count) {
3017 /*
3018 * Can't attach multiple FC_ID's to a node unless High Login
3019 * Mode is enabled
3020 */
3021 if (sli_get_hlm(&hw->sli) == FALSE) {
3022 ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
3023 sli_get_hlm(&hw->sli), count);
3024 rc = OCS_HW_RTN_SUCCESS;
3025 } else {
3026 rnode->node_group = TRUE;
3027 rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
3028 rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
3029 }
3030 } else {
3031 rnode->node_group = FALSE;
3032
3033 ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
3034 if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
3035 rnode->indicator, rnode->sport->indicator,
3036 sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
3037 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
3038 ocs_hw_cb_node_attach, rnode);
3039 }
3040 }
3041
3042 if (count || rc) {
3043 if (rc < OCS_HW_RTN_SUCCESS) {
3044 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3045 ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
3046 }
3047 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3048 }
3049
3050 return rc;
3051 }
3052
3053 /**
3054 * @ingroup node
3055 * @brief Free a remote node resource.
3056 *
3057 * @param hw Hardware context.
3058 * @param rnode Remote node object to free.
3059 *
3060 * @return Returns 0 on success, or a non-zero value on failure.
3061 */
3062 ocs_hw_rtn_e
3063 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3064 {
3065 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
3066
3067 if (!hw || !rnode) {
3068 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3069 hw, rnode);
3070 return OCS_HW_RTN_ERROR;
3071 }
3072
3073 if (rnode->sport) {
3074 if (!rnode->attached) {
3075 if (rnode->indicator != UINT32_MAX) {
3076 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3077 ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
3078 rnode->indicator, rnode->fc_id);
3079 rc = OCS_HW_RTN_ERROR;
3080 } else {
3081 rnode->node_group = FALSE;
3082 rnode->indicator = UINT32_MAX;
3083 rnode->index = UINT32_MAX;
3084 rnode->free_group = FALSE;
3085 }
3086 }
3087 } else {
3088 ocs_log_err(hw->os, "Error: rnode is still attached\n");
3089 rc = OCS_HW_RTN_ERROR;
3090 }
3091 }
3092
3093 return rc;
3094 }
3095
3096 /**
3097 * @ingroup node
3098 * @brief Free a remote node object.
3099 *
3100 * @param hw Hardware context.
3101 * @param rnode Remote node object to free.
3102 *
3103 * @return Returns 0 on success, or a non-zero value on failure.
3104 */
3105 ocs_hw_rtn_e
3106 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3107 {
3108 uint8_t *buf = NULL;
3109 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC;
3110 uint32_t index = UINT32_MAX;
3111
3112 if (!hw || !rnode) {
3113 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3114 hw, rnode);
3115 return OCS_HW_RTN_ERROR;
3116 }
3117
3118 /*
3119 * Check if the chip is in an error state (UE'd) before proceeding.
3120 */
3121 if (sli_fw_error_status(&hw->sli) > 0) {
3122 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3123 return OCS_HW_RTN_ERROR;
3124 }
3125
3126 index = rnode->index;
3127
3128 if (rnode->sport) {
3129 uint32_t count = 0;
3130 uint32_t fc_id;
3131
3132 if (!rnode->attached) {
3133 return OCS_HW_RTN_SUCCESS_SYNC;
3134 }
3135
3136 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3137 if (!buf) {
3138 ocs_log_err(hw->os, "no buffer for command\n");
3139 return OCS_HW_RTN_NO_MEMORY;
3140 }
3141
3142 count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
3143
3144 if (count <= 1) {
3145 /* There are no other references to this RPI
3146 * so unregister it and free the resource. */
3147 fc_id = UINT32_MAX;
3148 rnode->node_group = FALSE;
3149 rnode->free_group = TRUE;
3150 } else {
3151 if (sli_get_hlm(&hw->sli) == FALSE) {
3152 ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
3153 count);
3154 }
3155 fc_id = rnode->fc_id & 0x00ffffff;
3156 }
3157
3158 rc = OCS_HW_RTN_ERROR;
3159
3160 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
3161 SLI_RSRC_FCOE_RPI, fc_id)) {
3162 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
3163 }
3164
3165 if (rc != OCS_HW_RTN_SUCCESS) {
3166 ocs_log_err(hw->os, "UNREG_RPI failed\n");
3167 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3168 rc = OCS_HW_RTN_ERROR;
3169 }
3170 }
3171
3172 return rc;
3173 }
3174
3175 /**
3176 * @ingroup node
3177 * @brief Free all remote node objects.
3178 *
3179 * @param hw Hardware context.
3180 *
3181 * @return Returns 0 on success, or a non-zero value on failure.
3182 */
3183 ocs_hw_rtn_e
3184 ocs_hw_node_free_all(ocs_hw_t *hw)
3185 {
3186 uint8_t *buf = NULL;
3187 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
3188
3189 if (!hw) {
3190 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
3191 return OCS_HW_RTN_ERROR;
3192 }
3193
3194 /*
3195 * Check if the chip is in an error state (UE'd) before proceeding.
3196 */
3197 if (sli_fw_error_status(&hw->sli) > 0) {
3198 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3199 return OCS_HW_RTN_ERROR;
3200 }
3201
3202 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3203 if (!buf) {
3204 ocs_log_err(hw->os, "no buffer for command\n");
3205 return OCS_HW_RTN_NO_MEMORY;
3206 }
3207
3208 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
3209 SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
3210 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
3211 NULL);
3212 }
3213
3214 if (rc != OCS_HW_RTN_SUCCESS) {
3215 ocs_log_err(hw->os, "UNREG_RPI failed\n");
3216 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3217 rc = OCS_HW_RTN_ERROR;
3218 }
3219
3220 return rc;
3221 }
3222
3223 ocs_hw_rtn_e
3224 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3225 {
3226
3227 if (!hw || !ngroup) {
3228 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3229 hw, ngroup);
3230 return OCS_HW_RTN_ERROR;
3231 }
3232
3233 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
3234 &ngroup->index)) {
3235 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
3236 ngroup->indicator);
3237 return OCS_HW_RTN_ERROR;
3238 }
3239
3240 return OCS_HW_RTN_SUCCESS;
3241 }
3242
3243 ocs_hw_rtn_e
3244 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
3245 {
3246
3247 if (!hw || !ngroup || !rnode) {
3248 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
3249 hw, ngroup, rnode);
3250 return OCS_HW_RTN_ERROR;
3251 }
3252
3253 if (rnode->attached) {
3254 ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
3255 rnode->indicator, rnode->fc_id);
3256 return OCS_HW_RTN_ERROR;
3257 }
3258
3259 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3260 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3261 rnode->indicator);
3262 return OCS_HW_RTN_ERROR;
3263 }
3264
3265 rnode->indicator = ngroup->indicator;
3266 rnode->index = ngroup->index;
3267
3268 return OCS_HW_RTN_SUCCESS;
3269 }
3270
3271 ocs_hw_rtn_e
3272 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3273 {
3274 int ref;
3275
3276 if (!hw || !ngroup) {
3277 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3278 hw, ngroup);
3279 return OCS_HW_RTN_ERROR;
3280 }
3281
3282 ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
3283 if (ref) {
3284 /* Hmmm, the reference count is non-zero */
3285 ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
3286 ref, ngroup->indicator);
3287
3288 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
3289 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3290 ngroup->indicator);
3291 return OCS_HW_RTN_ERROR;
3292 }
3293
3294 ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
3295 }
3296
3297 ngroup->indicator = UINT32_MAX;
3298 ngroup->index = UINT32_MAX;
3299
3300 return OCS_HW_RTN_SUCCESS;
3301 }
3302
3303 /**
3304 * @brief Initialize IO fields on each free call.
3305 *
3306 * @n @b Note: This is done on each free call (as opposed to each
3307 * alloc call) because port-owned XRIs are not
3308 * allocated with ocs_hw_io_alloc() but are freed with this
3309 * function.
3310 *
3311 * @param io Pointer to HW IO.
3312 */
3313 static inline void
3314 ocs_hw_init_free_io(ocs_hw_io_t *io)
3315 {
3316 /*
3317 * Set io->done to NULL, to avoid any callbacks, should
3318 * a completion be received for one of these IOs
3319 */
3320 io->done = NULL;
3321 io->abort_done = NULL;
3322 io->status_saved = 0;
3323 io->abort_in_progress = FALSE;
3324 io->port_owned_abort_count = 0;
3325 io->rnode = NULL;
3326 io->type = 0xFFFF;
3327 io->wq = NULL;
3328 io->ul_io = NULL;
3329 io->tgt_wqe_timeout = 0;
3330 }
3331
3332 /**
3333 * @ingroup io
3334 * @brief Lockless allocate a HW IO object.
3335 *
3336 * @par Description
3337 * Assume that hw->ocs_lock is held. This function is only used if
3338 * use_dif_sec_xri workaround is being used.
3339 *
3340 * @param hw Hardware context.
3341 *
3342 * @return Returns a pointer to an object on success, or NULL on failure.
3343 */
3344 static inline ocs_hw_io_t *
3345 _ocs_hw_io_alloc(ocs_hw_t *hw)
3346 {
3347 ocs_hw_io_t *io = NULL;
3348
3349 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
3350 ocs_list_add_tail(&hw->io_inuse, io);
3351 io->state = OCS_HW_IO_STATE_INUSE;
3352 io->quarantine = FALSE;
3353 io->quarantine_first_phase = TRUE;
3354 io->abort_reqtag = UINT32_MAX;
3355 ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
3356 } else {
3357 ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
3358 }
3359
3360 return io;
3361 }
3362 /**
3363 * @ingroup io
3364 * @brief Allocate a HW IO object.
3365 *
3366 * @par Description
3367 * @n @b Note: This function applies to non-port owned XRIs
3368 * only.
3369 *
3370 * @param hw Hardware context.
3371 *
3372 * @return Returns a pointer to an object on success, or NULL on failure.
3373 */
3374 ocs_hw_io_t *
3375 ocs_hw_io_alloc(ocs_hw_t *hw)
3376 {
3377 ocs_hw_io_t *io = NULL;
3378
3379 ocs_lock(&hw->io_lock);
3380 io = _ocs_hw_io_alloc(hw);
3381 ocs_unlock(&hw->io_lock);
3382
3383 return io;
3384 }
3385
3386 /**
3387 * @ingroup io
3388 * @brief Allocate/Activate a port owned HW IO object.
3389 *
3390 * @par Description
3391 * This function is called by the transport layer when an XRI is
3392 * allocated by the SLI-Port. This will "activate" the HW IO
3393 * associated with the XRI received from the SLI-Port to mirror
3394 * the state of the XRI.
3395 * @n @n @b Note: This function applies to port owned XRIs only.
3396 *
3397 * @param hw Hardware context.
3398 * @param io Pointer HW IO to activate/allocate.
3399 *
3400 * @return Returns a pointer to an object on success, or NULL on failure.
3401 */
3402 ocs_hw_io_t *
3403 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
3404 {
3405 if (ocs_ref_read_count(&io->ref) > 0) {
3406 ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
3407 return NULL;
3408 }
3409
3410 if (io->wq != NULL) {
3411 ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
3412 return NULL;
3413 }
3414
3415 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3416 io->xbusy = TRUE;
3417
3418 return io;
3419 }
3420
3421 /**
3422 * @ingroup io
3423 * @brief When an IO is freed, depending on the exchange busy flag, and other
3424 * workarounds, move it to the correct list.
3425 *
3426 * @par Description
3427 * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
3428 * from the busy or wait_free list.
3429 *
3430 * @param hw Hardware context.
3431 * @param io Pointer to the IO object to move.
3432 */
3433 static void
3434 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
3435 {
3436 if (io->xbusy) {
3437 /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
3438 ocs_list_add_tail(&hw->io_wait_free, io);
3439 io->state = OCS_HW_IO_STATE_WAIT_FREE;
3440 } else {
3441 /* IO not busy, add to free list */
3442 ocs_list_add_tail(&hw->io_free, io);
3443 io->state = OCS_HW_IO_STATE_FREE;
3444 }
3445
3446 /* BZ 161832 workaround */
3447 if (hw->workaround.use_dif_sec_xri) {
3448 ocs_hw_check_sec_hio_list(hw);
3449 }
3450 }
3451
3452 /**
3453 * @ingroup io
3454 * @brief Free a HW IO object. Perform cleanup common to
3455 * port and host-owned IOs.
3456 *
3457 * @param hw Hardware context.
3458 * @param io Pointer to the HW IO object.
3459 */
3460 static inline void
3461 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
3462 {
3463 /* initialize IO fields */
3464 ocs_hw_init_free_io(io);
3465
3466 /* Restore default SGL */
3467 ocs_hw_io_restore_sgl(hw, io);
3468 }
3469
3470 /**
3471 * @ingroup io
3472 * @brief Free a HW IO object associated with a port-owned XRI.
3473 *
3474 * @param arg Pointer to the HW IO object.
3475 */
3476 static void
3477 ocs_hw_io_free_port_owned(void *arg)
3478 {
3479 ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3480 ocs_hw_t *hw = io->hw;
3481
3482 /*
3483 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
3484 * waiting for buffers.
3485 */
3486 if (io->auto_xfer_rdy_dnrx) {
3487 ocs_lock(&hw->io_lock);
3488 /* take a reference count because we still own the IO until the buffer is posted */
3489 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3490 ocs_list_add_tail(&hw->io_port_dnrx, io);
3491 ocs_unlock(&hw->io_lock);
3492 }
3493
3494 /* perform common cleanup */
3495 ocs_hw_io_free_common(hw, io);
3496 }
3497
3498 /**
3499 * @ingroup io
3500 * @brief Free a previously-allocated HW IO object. Called when
3501 * IO refcount goes to zero (host-owned IOs only).
3502 *
3503 * @param arg Pointer to the HW IO object.
3504 */
3505 static void
3506 ocs_hw_io_free_internal(void *arg)
3507 {
3508 ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3509 ocs_hw_t *hw = io->hw;
3510
3511 /* perform common cleanup */
3512 ocs_hw_io_free_common(hw, io);
3513
3514 ocs_lock(&hw->io_lock);
3515 /* remove from in-use list */
3516 ocs_list_remove(&hw->io_inuse, io);
3517 ocs_hw_io_free_move_correct_list(hw, io);
3518 ocs_unlock(&hw->io_lock);
3519 }
3520
3521 /**
3522 * @ingroup io
3523 * @brief Free a previously-allocated HW IO object.
3524 *
3525 * @par Description
3526 * @n @b Note: This function applies to port and host owned XRIs.
3527 *
3528 * @param hw Hardware context.
3529 * @param io Pointer to the HW IO object.
3530 *
3531 * @return Returns a non-zero value if HW IO was freed, 0 if references
3532 * on the IO still exist, or a negative value if an error occurred.
3533 */
3534 int32_t
3535 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
3536 {
3537 /* just put refcount */
3538 if (ocs_ref_read_count(&io->ref) <= 0) {
3539 ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
3540 io->indicator, io->reqtag);
3541 return -1;
3542 }
3543
3544 return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
3545 }
3546
3547 /**
3548 * @ingroup io
3549 * @brief Check if given HW IO is in-use
3550 *
3551 * @par Description
3552 * This function returns TRUE if the given HW IO has been
3553 * allocated and is in-use, and FALSE otherwise. It applies to
3554 * port and host owned XRIs.
3555 *
3556 * @param hw Hardware context.
3557 * @param io Pointer to the HW IO object.
3558 *
3559 * @return TRUE if an IO is in use, or FALSE otherwise.
3560 */
3561 uint8_t
3562 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
3563 {
3564 return (ocs_ref_read_count(&io->ref) > 0);
3565 }
3566
3567 /**
3568 * @brief Write a HW IO to a work queue.
3569 *
3570 * @par Description
3571 * A HW IO is written to a work queue.
3572 *
3573 * @param wq Pointer to work queue.
3574 * @param wqe Pointer to WQ entry.
3575 *
3576 * @n @b Note: Assumes the SLI-4 queue lock is held.
3577 *
3578 * @return Returns 0 on success, or a negative error code value on failure.
3579 */
3580 static int32_t
3581 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3582 {
3583 int32_t rc;
3584 int32_t queue_rc;
3585
3586 /* Every so often, set the wqec bit to generate comsummed completions */
3587 if (wq->wqec_count) {
3588 wq->wqec_count--;
3589 }
3590 if (wq->wqec_count == 0) {
3591 sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3592 genwqe->wqec = 1;
3593 wq->wqec_count = wq->wqec_set_count;
3594 }
3595
3596 /* Decrement WQ free count */
3597 wq->free_count--;
3598
3599 queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3600
3601 if (queue_rc < 0) {
3602 rc = -1;
3603 } else {
3604 rc = 0;
3605 ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3606 }
3607
3608 return rc;
3609 }
3610
3611 /**
3612 * @brief Write a HW IO to a work queue.
3613 *
3614 * @par Description
3615 * A HW IO is written to a work queue.
3616 *
3617 * @param wq Pointer to work queue.
3618 * @param wqe Pointer to WQE entry.
3619 *
3620 * @n @b Note: Takes the SLI-4 queue lock.
3621 *
3622 * @return Returns 0 on success, or a negative error code value on failure.
3623 */
3624 int32_t
3625 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3626 {
3627 int32_t rc = 0;
3628
3629 sli_queue_lock(wq->queue);
3630 if ( ! ocs_list_empty(&wq->pending_list)) {
3631 ocs_list_add_tail(&wq->pending_list, wqe);
3632 OCS_STAT(wq->wq_pending_count++;)
3633 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3634 rc = _hw_wq_write(wq, wqe);
3635 if (rc < 0) {
3636 break;
3637 }
3638 if (wqe->abort_wqe_submit_needed) {
3639 wqe->abort_wqe_submit_needed = 0;
3640 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3641 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
3642 ocs_list_add_tail(&wq->pending_list, wqe);
3643 OCS_STAT(wq->wq_pending_count++;)
3644 }
3645 }
3646 } else {
3647 if (wq->free_count > 0) {
3648 rc = _hw_wq_write(wq, wqe);
3649 } else {
3650 ocs_list_add_tail(&wq->pending_list, wqe);
3651 OCS_STAT(wq->wq_pending_count++;)
3652 }
3653 }
3654
3655 sli_queue_unlock(wq->queue);
3656
3657 return rc;
3658
3659 }
3660
3661 /**
3662 * @brief Update free count and submit any pending HW IOs
3663 *
3664 * @par Description
3665 * The WQ free count is updated, and any pending HW IOs are submitted that
3666 * will fit in the queue.
3667 *
3668 * @param wq Pointer to work queue.
3669 * @param update_free_count Value added to WQs free count.
3670 *
3671 * @return None.
3672 */
3673 static void
3674 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
3675 {
3676 ocs_hw_wqe_t *wqe;
3677
3678 sli_queue_lock(wq->queue);
3679
3680 /* Update free count with value passed in */
3681 wq->free_count += update_free_count;
3682
3683 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3684 _hw_wq_write(wq, wqe);
3685
3686 if (wqe->abort_wqe_submit_needed) {
3687 wqe->abort_wqe_submit_needed = 0;
3688 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3689 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
3690 ocs_list_add_tail(&wq->pending_list, wqe);
3691 OCS_STAT(wq->wq_pending_count++;)
3692 }
3693 }
3694
3695 sli_queue_unlock(wq->queue);
3696 }
3697
3698 /**
3699 * @brief Check to see if there are any BZ 161832 workaround waiting IOs
3700 *
3701 * @par Description
3702 * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
3703 * to allocate a secondary HW io, and dispatch it.
3704 *
3705 * @n @b Note: hw->io_lock MUST be taken when called.
3706 *
3707 * @param hw pointer to HW object
3708 *
3709 * @return none
3710 */
3711 static void
3712 ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
3713 {
3714 ocs_hw_io_t *io;
3715 ocs_hw_io_t *sec_io;
3716 int rc = 0;
3717
3718 while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
3719 uint16_t flags;
3720
3721 sec_io = _ocs_hw_io_alloc(hw);
3722 if (sec_io == NULL) {
3723 break;
3724 }
3725
3726 io = ocs_list_remove_head(&hw->sec_hio_wait_list);
3727 ocs_list_add_tail(&hw->io_inuse, io);
3728 io->state = OCS_HW_IO_STATE_INUSE;
3729 io->sec_hio = sec_io;
3730
3731 /* mark secondary XRI for second and subsequent data phase as quarantine */
3732 if (io->xbusy) {
3733 sec_io->quarantine = TRUE;
3734 }
3735
3736 flags = io->sec_iparam.fcp_tgt.flags;
3737 if (io->xbusy) {
3738 flags |= SLI4_IO_CONTINUATION;
3739 } else {
3740 flags &= ~SLI4_IO_CONTINUATION;
3741 }
3742
3743 io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
3744
3745 /* Complete (continue) TRECV IO */
3746 if (io->xbusy) {
3747 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3748 io->first_data_sge,
3749 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
3750 io->reqtag, SLI4_CQ_DEFAULT,
3751 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3752 flags,
3753 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3754 ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3755 break;
3756 }
3757 } else {
3758 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3759 io->first_data_sge,
3760 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
3761 io->reqtag, SLI4_CQ_DEFAULT,
3762 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3763 flags,
3764 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
3765 io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3766 ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3767 break;
3768 }
3769 }
3770
3771 if (io->wq == NULL) {
3772 io->wq = ocs_hw_queue_next_wq(hw, io);
3773 ocs_hw_assert(io->wq != NULL);
3774 }
3775 io->xbusy = TRUE;
3776
3777 /*
3778 * Add IO to active io wqe list before submitting, in case the
3779 * wcqe processing preempts this thread.
3780 */
3781 ocs_hw_add_io_timed_wqe(hw, io);
3782 rc = hw_wq_write(io->wq, &io->wqe);
3783 if (rc >= 0) {
3784 /* non-negative return is success */
3785 rc = 0;
3786 } else {
3787 /* failed to write wqe, remove from active wqe list */
3788 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3789 io->xbusy = FALSE;
3790 ocs_hw_remove_io_timed_wqe(hw, io);
3791 }
3792 }
3793 }
3794
3795 /**
3796 * @ingroup io
3797 * @brief Send a Single Request/Response Sequence (SRRS).
3798 *
3799 * @par Description
3800 * This routine supports communication sequences consisting of a single
3801 * request and single response between two endpoints. Examples include:
3802 * - Sending an ELS request.
3803 * - Sending an ELS response - To send an ELS reponse, the caller must provide
3804 * the OX_ID from the received request.
3805 * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
3806 * the caller must provide the R_CTL, TYPE, and DF_CTL
3807 * values to place in the FC frame header.
3808 * .
3809 * @n @b Note: The caller is expected to provide both send and receive
3810 * buffers for requests. In the case of sending a response, no receive buffer
3811 * is necessary and the caller may pass in a NULL pointer.
3812 *
3813 * @param hw Hardware context.
3814 * @param type Type of sequence (ELS request/response, FC-CT).
3815 * @param io Previously-allocated HW IO object.
3816 * @param send DMA memory holding data to send (for example, ELS request, BLS response).
3817 * @param len Length, in bytes, of data to send.
3818 * @param receive Optional DMA memory to hold a response.
3819 * @param rnode Destination of data (that is, a remote node).
3820 * @param iparam IO parameters (ELS response and FC-CT).
3821 * @param cb Function call upon completion of sending the data (may be NULL).
3822 * @param arg Argument to pass to IO completion function.
3823 *
3824 * @return Returns 0 on success, or a non-zero on failure.
3825 */
3826 ocs_hw_rtn_e
3827 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
3828 ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
3829 ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
3830 ocs_hw_srrs_cb_t cb, void *arg)
3831 {
3832 sli4_sge_t *sge = NULL;
3833 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
3834 uint16_t local_flags = 0;
3835
3836 if (!hw || !io || !rnode || !iparam) {
3837 ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
3838 hw, io, send, receive, rnode, iparam);
3839 return OCS_HW_RTN_ERROR;
3840 }
3841
3842 if (hw->state != OCS_HW_STATE_ACTIVE) {
3843 ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
3844 return OCS_HW_RTN_ERROR;
3845 }
3846
3847 if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
3848 /* We must set the XC bit for port owned XRIs */
3849 local_flags |= SLI4_IO_CONTINUATION;
3850 }
3851 io->rnode = rnode;
3852 io->type = type;
3853 io->done = cb;
3854 io->arg = arg;
3855
3856 sge = io->sgl->virt;
3857
3858 /* clear both SGE */
3859 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
3860
3861 if (send) {
3862 sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
3863 sge[0].buffer_address_low = ocs_addr32_lo(send->phys);
3864 sge[0].sge_type = SLI4_SGE_TYPE_DATA;
3865 sge[0].buffer_length = len;
3866 }
3867
3868 if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
3869 sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
3870 sge[1].buffer_address_low = ocs_addr32_lo(receive->phys);
3871 sge[1].sge_type = SLI4_SGE_TYPE_DATA;
3872 sge[1].buffer_length = receive->size;
3873 sge[1].last = TRUE;
3874 } else {
3875 sge[0].last = TRUE;
3876 }
3877
3878 switch (type) {
3879 case OCS_HW_ELS_REQ:
3880 if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
3881 *((uint8_t *)(send->virt)), /* req_type */
3882 len, receive->size,
3883 iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
3884 ocs_log_err(hw->os, "REQ WQE error\n");
3885 rc = OCS_HW_RTN_ERROR;
3886 }
3887 break;
3888 case OCS_HW_ELS_RSP:
3889 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3890 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3891 iparam->els.ox_id,
3892 rnode, local_flags, UINT32_MAX)) {
3893 ocs_log_err(hw->os, "RSP WQE error\n");
3894 rc = OCS_HW_RTN_ERROR;
3895 }
3896 break;
3897 case OCS_HW_ELS_RSP_SID:
3898 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3899 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3900 iparam->els_sid.ox_id,
3901 rnode, local_flags, iparam->els_sid.s_id)) {
3902 ocs_log_err(hw->os, "RSP (SID) WQE error\n");
3903 rc = OCS_HW_RTN_ERROR;
3904 }
3905 break;
3906 case OCS_HW_FC_CT:
3907 if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3908 receive->size, iparam->fc_ct.timeout, io->indicator,
3909 io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
3910 iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
3911 ocs_log_err(hw->os, "GEN WQE error\n");
3912 rc = OCS_HW_RTN_ERROR;
3913 }
3914 break;
3915 case OCS_HW_FC_CT_RSP:
3916 if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3917 iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
3918 io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
3919 iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
3920 ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
3921 rc = OCS_HW_RTN_ERROR;
3922 }
3923 break;
3924 case OCS_HW_BLS_ACC:
3925 case OCS_HW_BLS_RJT:
3926 {
3927 sli_bls_payload_t bls;
3928
3929 if (OCS_HW_BLS_ACC == type) {
3930 bls.type = SLI_BLS_ACC;
3931 ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
3932 } else {
3933 bls.type = SLI_BLS_RJT;
3934 ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
3935 }
3936
3937 bls.ox_id = iparam->bls.ox_id;
3938 bls.rx_id = iparam->bls.rx_id;
3939
3940 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3941 io->indicator, io->reqtag,
3942 SLI4_CQ_DEFAULT,
3943 rnode, UINT32_MAX)) {
3944 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
3945 rc = OCS_HW_RTN_ERROR;
3946 }
3947 break;
3948 }
3949 case OCS_HW_BLS_ACC_SID:
3950 {
3951 sli_bls_payload_t bls;
3952
3953 bls.type = SLI_BLS_ACC;
3954 ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
3955
3956 bls.ox_id = iparam->bls_sid.ox_id;
3957 bls.rx_id = iparam->bls_sid.rx_id;
3958
3959 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3960 io->indicator, io->reqtag,
3961 SLI4_CQ_DEFAULT,
3962 rnode, iparam->bls_sid.s_id)) {
3963 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
3964 rc = OCS_HW_RTN_ERROR;
3965 }
3966 break;
3967 }
3968 case OCS_HW_BCAST:
3969 if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3970 iparam->bcast.timeout, io->indicator, io->reqtag,
3971 SLI4_CQ_DEFAULT, rnode,
3972 iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
3973 ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
3974 rc = OCS_HW_RTN_ERROR;
3975 }
3976 break;
3977 default:
3978 ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
3979 rc = OCS_HW_RTN_ERROR;
3980 }
3981
3982 if (OCS_HW_RTN_SUCCESS == rc) {
3983 if (io->wq == NULL) {
3984 io->wq = ocs_hw_queue_next_wq(hw, io);
3985 ocs_hw_assert(io->wq != NULL);
3986 }
3987 io->xbusy = TRUE;
3988
3989 /*
3990 * Add IO to active io wqe list before submitting, in case the
3991 * wcqe processing preempts this thread.
3992 */
3993 OCS_STAT(io->wq->use_count++);
3994 ocs_hw_add_io_timed_wqe(hw, io);
3995 rc = hw_wq_write(io->wq, &io->wqe);
3996 if (rc >= 0) {
3997 /* non-negative return is success */
3998 rc = 0;
3999 } else {
4000 /* failed to write wqe, remove from active wqe list */
4001 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4002 io->xbusy = FALSE;
4003 ocs_hw_remove_io_timed_wqe(hw, io);
4004 }
4005 }
4006
4007 return rc;
4008 }
4009
4010 /**
4011 * @ingroup io
4012 * @brief Send a read, write, or response IO.
4013 *
4014 * @par Description
4015 * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
4016 * as a target or initiator. Examples include:
4017 * - Sending read data and good response (target).
4018 * - Sending a response (target with no data or after receiving write data).
4019 * .
4020 * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
4021 * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
4022 *
4023 * @param hw Hardware context.
4024 * @param type Type of IO (target read, target response, and so on).
4025 * @param io Previously-allocated HW IO object.
4026 * @param len Length, in bytes, of data to send.
4027 * @param iparam IO parameters.
4028 * @param rnode Destination of data (that is, a remote node).
4029 * @param cb Function call upon completion of sending data (may be NULL).
4030 * @param arg Argument to pass to IO completion function.
4031 *
4032 * @return Returns 0 on success, or a non-zero value on failure.
4033 *
4034 * @todo
4035 * - Support specifiying relative offset.
4036 * - Use a WQ other than 0.
4037 */
4038 ocs_hw_rtn_e
4039 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
4040 uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
4041 void *cb, void *arg)
4042 {
4043 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
4044 uint32_t rpi;
4045 uint8_t send_wqe = TRUE;
4046
4047 CPUTRACE("");
4048
4049 if (!hw || !io || !rnode || !iparam) {
4050 ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
4051 hw, io, iparam, rnode);
4052 return OCS_HW_RTN_ERROR;
4053 }
4054
4055 if (hw->state != OCS_HW_STATE_ACTIVE) {
4056 ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
4057 return OCS_HW_RTN_ERROR;
4058 }
4059
4060 rpi = rnode->indicator;
4061
4062 if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
4063 rpi = hw->workaround.unregistered_rid;
4064 ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
4065 }
4066
4067 /*
4068 * Save state needed during later stages
4069 */
4070 io->rnode = rnode;
4071 io->type = type;
4072 io->done = cb;
4073 io->arg = arg;
4074
4075 /*
4076 * Format the work queue entry used to send the IO
4077 */
4078 switch (type) {
4079 case OCS_HW_IO_INITIATOR_READ:
4080 /*
4081 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
4082 * initiator read IO for quarantine
4083 */
4084 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4085 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4086 io->quarantine = TRUE;
4087 }
4088
4089 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4090 iparam->fcp_ini.rsp);
4091
4092 if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
4093 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
4094 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4095 iparam->fcp_ini.timeout)) {
4096 ocs_log_err(hw->os, "IREAD WQE error\n");
4097 rc = OCS_HW_RTN_ERROR;
4098 }
4099 break;
4100 case OCS_HW_IO_INITIATOR_WRITE:
4101 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4102 iparam->fcp_ini.rsp);
4103
4104 if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4105 len, iparam->fcp_ini.first_burst,
4106 io->indicator, io->reqtag,
4107 SLI4_CQ_DEFAULT, rpi, rnode,
4108 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4109 iparam->fcp_ini.timeout)) {
4110 ocs_log_err(hw->os, "IWRITE WQE error\n");
4111 rc = OCS_HW_RTN_ERROR;
4112 }
4113 break;
4114 case OCS_HW_IO_INITIATOR_NODATA:
4115 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4116 iparam->fcp_ini.rsp);
4117
4118 if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
4119 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
4120 rpi, rnode, iparam->fcp_ini.timeout)) {
4121 ocs_log_err(hw->os, "ICMND WQE error\n");
4122 rc = OCS_HW_RTN_ERROR;
4123 }
4124 break;
4125 case OCS_HW_IO_TARGET_WRITE: {
4126 uint16_t flags = iparam->fcp_tgt.flags;
4127 fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
4128
4129 /*
4130 * Fill in the XFER_RDY for IF_TYPE 0 devices
4131 */
4132 *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
4133 *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
4134 *((uint32_t *)xfer->rsvd) = 0;
4135
4136 if (io->xbusy) {
4137 flags |= SLI4_IO_CONTINUATION;
4138 } else {
4139 flags &= ~SLI4_IO_CONTINUATION;
4140 }
4141
4142 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4143
4144 /*
4145 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
4146 * then mark the target write IO for quarantine
4147 */
4148 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4149 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4150 io->quarantine = TRUE;
4151 }
4152
4153 /*
4154 * BZ 161832 Workaround:
4155 * Check for use_dif_sec_xri workaround. Note, even though the first dataphase
4156 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
4157 * potential for deadlock where all XRI's are allocated as primaries to IOs that
4158 * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first
4159 * data phase, it is marked for quarantine.
4160 */
4161 if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4162 /*
4163 * If we have allocated a chained SGL for skyhawk, then
4164 * we can re-use this for the sec_hio.
4165 */
4166 if (io->ovfl_io != NULL) {
4167 io->sec_hio = io->ovfl_io;
4168 io->sec_hio->quarantine = TRUE;
4169 } else {
4170 io->sec_hio = ocs_hw_io_alloc(hw);
4171 }
4172 if (io->sec_hio == NULL) {
4173 /* Failed to allocate, so save full request context and put
4174 * this IO on the wait list
4175 */
4176 io->sec_iparam = *iparam;
4177 io->sec_len = len;
4178 ocs_lock(&hw->io_lock);
4179 ocs_list_remove(&hw->io_inuse, io);
4180 ocs_list_add_tail(&hw->sec_hio_wait_list, io);
4181 io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
4182 hw->sec_hio_wait_count++;
4183 ocs_unlock(&hw->io_lock);
4184 send_wqe = FALSE;
4185 /* Done */
4186 break;
4187 }
4188 /* We quarantine the secondary IO if this is the second or subsequent data phase */
4189 if (io->xbusy) {
4190 io->sec_hio->quarantine = TRUE;
4191 }
4192 }
4193
4194 /*
4195 * If not the first data phase, and io->sec_hio has been allocated, then issue
4196 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
4197 */
4198 if (io->xbusy && (io->sec_hio != NULL)) {
4199 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4200 iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
4201 io->reqtag, SLI4_CQ_DEFAULT,
4202 iparam->fcp_tgt.ox_id, rpi, rnode,
4203 flags,
4204 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4205 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4206 ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4207 rc = OCS_HW_RTN_ERROR;
4208 }
4209 } else {
4210 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4211 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4212 SLI4_CQ_DEFAULT,
4213 iparam->fcp_tgt.ox_id, rpi, rnode,
4214 flags,
4215 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4216 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4217 ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4218 rc = OCS_HW_RTN_ERROR;
4219 }
4220 }
4221 break;
4222 }
4223 case OCS_HW_IO_TARGET_READ: {
4224 uint16_t flags = iparam->fcp_tgt.flags;
4225
4226 if (io->xbusy) {
4227 flags |= SLI4_IO_CONTINUATION;
4228 } else {
4229 flags &= ~SLI4_IO_CONTINUATION;
4230 }
4231
4232 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4233 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4234 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4235 SLI4_CQ_DEFAULT,
4236 iparam->fcp_tgt.ox_id, rpi, rnode,
4237 flags,
4238 iparam->fcp_tgt.dif_oper,
4239 iparam->fcp_tgt.blk_size,
4240 iparam->fcp_tgt.cs_ctl,
4241 iparam->fcp_tgt.app_id)) {
4242 ocs_log_err(hw->os, "TSEND WQE error\n");
4243 rc = OCS_HW_RTN_ERROR;
4244 } else if (hw->workaround.retain_tsend_io_length) {
4245 io->length = len;
4246 }
4247 break;
4248 }
4249 case OCS_HW_IO_TARGET_RSP: {
4250 uint16_t flags = iparam->fcp_tgt.flags;
4251
4252 if (io->xbusy) {
4253 flags |= SLI4_IO_CONTINUATION;
4254 } else {
4255 flags &= ~SLI4_IO_CONTINUATION;
4256 }
4257
4258 /* post a new auto xfer ready buffer */
4259 if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
4260 if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
4261 flags |= SLI4_IO_DNRX;
4262 }
4263 }
4264
4265 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4266 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
4267 &io->def_sgl,
4268 len,
4269 io->indicator, io->reqtag,
4270 SLI4_CQ_DEFAULT,
4271 iparam->fcp_tgt.ox_id,
4272 rpi, rnode,
4273 flags, iparam->fcp_tgt.cs_ctl,
4274 io->is_port_owned,
4275 iparam->fcp_tgt.app_id)) {
4276 ocs_log_err(hw->os, "TRSP WQE error\n");
4277 rc = OCS_HW_RTN_ERROR;
4278 }
4279
4280 break;
4281 }
4282 default:
4283 ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4284 rc = OCS_HW_RTN_ERROR;
4285 }
4286
4287 if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
4288 if (io->wq == NULL) {
4289 io->wq = ocs_hw_queue_next_wq(hw, io);
4290 ocs_hw_assert(io->wq != NULL);
4291 }
4292
4293 io->xbusy = TRUE;
4294
4295 /*
4296 * Add IO to active io wqe list before submitting, in case the
4297 * wcqe processing preempts this thread.
4298 */
4299 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
4300 OCS_STAT(io->wq->use_count++);
4301 ocs_hw_add_io_timed_wqe(hw, io);
4302 rc = hw_wq_write(io->wq, &io->wqe);
4303 if (rc >= 0) {
4304 /* non-negative return is success */
4305 rc = 0;
4306 } else {
4307 /* failed to write wqe, remove from active wqe list */
4308 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4309 io->xbusy = FALSE;
4310 ocs_hw_remove_io_timed_wqe(hw, io);
4311 }
4312 }
4313
4314 return rc;
4315 }
4316
4317 /**
4318 * @brief Send a raw frame
4319 *
4320 * @par Description
4321 * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
4322 *
4323 * @param hw Pointer to HW object.
4324 * @param hdr Pointer to a little endian formatted FC header.
4325 * @param sof Value to use as the frame SOF.
4326 * @param eof Value to use as the frame EOF.
4327 * @param payload Pointer to payload DMA buffer.
4328 * @param ctx Pointer to caller provided send frame context.
4329 * @param callback Callback function.
4330 * @param arg Callback function argument.
4331 *
4332 * @return Returns 0 on success, or a negative error code value on failure.
4333 */
4334 ocs_hw_rtn_e
4335 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
4336 ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
4337 {
4338 int32_t rc;
4339 ocs_hw_wqe_t *wqe;
4340 uint32_t xri;
4341 hw_wq_t *wq;
4342
4343 wqe = &ctx->wqe;
4344
4345 /* populate the callback object */
4346 ctx->hw = hw;
4347
4348 /* Fetch and populate request tag */
4349 ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
4350 if (ctx->wqcb == NULL) {
4351 ocs_log_err(hw->os, "can't allocate request tag\n");
4352 return OCS_HW_RTN_NO_RESOURCES;
4353 }
4354
4355 /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
4356 wq = ocs_varray_iter_next(hw->wq_class_array[1]);
4357 if (wq == NULL) {
4358 wq = hw->hw_wq[0];
4359 }
4360
4361 /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
4362 xri = wq->send_frame_io->indicator;
4363
4364 /* Build the send frame WQE */
4365 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
4366 payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
4367 if (rc) {
4368 ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
4369 return OCS_HW_RTN_ERROR;
4370 }
4371
4372 /* Write to WQ */
4373 rc = hw_wq_write(wq, wqe);
4374 if (rc) {
4375 ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
4376 return OCS_HW_RTN_ERROR;
4377 }
4378
4379 OCS_STAT(wq->use_count++);
4380
4381 return OCS_HW_RTN_SUCCESS;
4382 }
4383
4384 ocs_hw_rtn_e
4385 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
4386 {
4387 if (sli_get_sgl_preregister(&hw->sli)) {
4388 ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
4389 return OCS_HW_RTN_ERROR;
4390 }
4391 io->ovfl_sgl = sgl;
4392 io->ovfl_sgl_count = sgl_count;
4393 io->ovfl_io = NULL;
4394
4395 return OCS_HW_RTN_SUCCESS;
4396 }
4397
4398 static void
4399 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4400 {
4401 /* Restore the default */
4402 io->sgl = &io->def_sgl;
4403 io->sgl_count = io->def_sgl_count;
4404
4405 /*
4406 * For skyhawk, we need to free the IO allocated for the chained
4407 * SGL. For all devices, clear the overflow fields on the IO.
4408 *
4409 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
4410 * the chained SGLs. If so, then we clear the ovfl_io field
4411 * when the sec_hio is freed.
4412 */
4413 if (io->ovfl_io != NULL) {
4414 ocs_hw_io_free(hw, io->ovfl_io);
4415 io->ovfl_io = NULL;
4416 }
4417
4418 /* Clear the overflow SGL */
4419 io->ovfl_sgl = NULL;
4420 io->ovfl_sgl_count = 0;
4421 io->ovfl_lsp = NULL;
4422 }
4423
4424 /**
4425 * @ingroup io
4426 * @brief Initialize the scatter gather list entries of an IO.
4427 *
4428 * @param hw Hardware context.
4429 * @param io Previously-allocated HW IO object.
4430 * @param type Type of IO (target read, target response, and so on).
4431 *
4432 * @return Returns 0 on success, or a non-zero value on failure.
4433 */
4434 ocs_hw_rtn_e
4435 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
4436 {
4437 sli4_sge_t *data = NULL;
4438 uint32_t i = 0;
4439 uint32_t skips = 0;
4440
4441 if (!hw || !io) {
4442 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
4443 hw, io);
4444 return OCS_HW_RTN_ERROR;
4445 }
4446
4447 /* Clear / reset the scatter-gather list */
4448 io->sgl = &io->def_sgl;
4449 io->sgl_count = io->def_sgl_count;
4450 io->first_data_sge = 0;
4451
4452 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
4453 io->n_sge = 0;
4454 io->sge_offset = 0;
4455
4456 io->type = type;
4457
4458 data = io->sgl->virt;
4459
4460 /*
4461 * Some IO types have underlying hardware requirements on the order
4462 * of SGEs. Process all special entries here.
4463 */
4464 switch (type) {
4465 case OCS_HW_IO_INITIATOR_READ:
4466 case OCS_HW_IO_INITIATOR_WRITE:
4467 case OCS_HW_IO_INITIATOR_NODATA:
4468 /*
4469 * No skips, 2 special for initiator I/Os
4470 * The addresses and length are written later
4471 */
4472 /* setup command pointer */
4473 data->sge_type = SLI4_SGE_TYPE_DATA;
4474 data++;
4475
4476 /* setup response pointer */
4477 data->sge_type = SLI4_SGE_TYPE_DATA;
4478
4479 if (OCS_HW_IO_INITIATOR_NODATA == type) {
4480 data->last = TRUE;
4481 }
4482 data++;
4483
4484 io->n_sge = 2;
4485 break;
4486 case OCS_HW_IO_TARGET_WRITE:
4487 #define OCS_TARGET_WRITE_SKIPS 2
4488 skips = OCS_TARGET_WRITE_SKIPS;
4489
4490 /* populate host resident XFER_RDY buffer */
4491 data->sge_type = SLI4_SGE_TYPE_DATA;
4492 data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
4493 data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys);
4494 data->buffer_length = io->xfer_rdy.size;
4495 data++;
4496
4497 skips--;
4498
4499 io->n_sge = 1;
4500 break;
4501 case OCS_HW_IO_TARGET_READ:
4502 /*
4503 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
4504 */
4505 #define OCS_TARGET_READ_SKIPS 2
4506 skips = OCS_TARGET_READ_SKIPS;
4507 break;
4508 case OCS_HW_IO_TARGET_RSP:
4509 /*
4510 * No skips, etc. for FCP_TRSP64
4511 */
4512 break;
4513 default:
4514 ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4515 return OCS_HW_RTN_ERROR;
4516 }
4517
4518 /*
4519 * Write skip entries
4520 */
4521 for (i = 0; i < skips; i++) {
4522 data->sge_type = SLI4_SGE_TYPE_SKIP;
4523 data++;
4524 }
4525
4526 io->n_sge += skips;
4527
4528 /*
4529 * Set last
4530 */
4531 data->last = TRUE;
4532
4533 return OCS_HW_RTN_SUCCESS;
4534 }
4535
4536 /**
4537 * @ingroup io
4538 * @brief Add a T10 PI seed scatter gather list entry.
4539 *
4540 * @param hw Hardware context.
4541 * @param io Previously-allocated HW IO object.
4542 * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
4543 *
4544 * @return Returns 0 on success, or a non-zero value on failure.
4545 */
4546 ocs_hw_rtn_e
4547 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
4548 {
4549 sli4_sge_t *data = NULL;
4550 sli4_diseed_sge_t *dif_seed;
4551
4552 /* If no dif_info, or dif_oper is disabled, then just return success */
4553 if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
4554 return OCS_HW_RTN_SUCCESS;
4555 }
4556
4557 if (!hw || !io) {
4558 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
4559 hw, io, dif_info);
4560 return OCS_HW_RTN_ERROR;
4561 }
4562
4563 data = io->sgl->virt;
4564 data += io->n_sge;
4565
4566 /* If we are doing T10 DIF add the DIF Seed SGE */
4567 ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
4568 dif_seed = (sli4_diseed_sge_t *)data;
4569 dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
4570 dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
4571 dif_seed->app_tag_repl = dif_info->app_tag_repl;
4572 dif_seed->repl_app_tag = dif_info->repl_app_tag;
4573 if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
4574 dif_seed->atrt = dif_info->disable_app_ref_ffff;
4575 dif_seed->at = dif_info->disable_app_ffff;
4576 }
4577 dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
4578 /* Workaround for SKH (BZ157233) */
4579 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4580 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
4581 dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
4582 }
4583
4584 dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
4585 dif_seed->dif_blk_size = dif_info->blk_size;
4586 dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
4587 dif_seed->check_app_tag = dif_info->check_app_tag;
4588 dif_seed->check_ref_tag = dif_info->check_ref_tag;
4589 dif_seed->check_crc = dif_info->check_guard;
4590 dif_seed->new_ref_tag = dif_info->repl_ref_tag;
4591
4592 switch(dif_info->dif_oper) {
4593 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
4594 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4595 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4596 break;
4597 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
4598 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4599 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4600 break;
4601 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
4602 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4603 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4604 break;
4605 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
4606 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4607 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4608 break;
4609 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
4610 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4611 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4612 break;
4613 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
4614 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4615 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4616 break;
4617 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
4618 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4619 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4620 break;
4621 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
4622 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4623 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4624 break;
4625 case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
4626 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4627 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4628 break;
4629 default:
4630 ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
4631 dif_info->dif_oper);
4632 return OCS_HW_RTN_ERROR;
4633 }
4634
4635 /*
4636 * Set last, clear previous last
4637 */
4638 data->last = TRUE;
4639 if (io->n_sge) {
4640 data[-1].last = FALSE;
4641 }
4642
4643 io->n_sge++;
4644
4645 return OCS_HW_RTN_SUCCESS;
4646 }
4647
4648 static ocs_hw_rtn_e
4649 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4650 {
4651 sli4_lsp_sge_t *lsp;
4652
4653 /* fail if we're already pointing to the overflow SGL */
4654 if (io->sgl == io->ovfl_sgl) {
4655 return OCS_HW_RTN_ERROR;
4656 }
4657
4658 /*
4659 * For skyhawk, we can use another SGL to extend the SGL list. The
4660 * Chained entry must not be in the first 4 entries.
4661 *
4662 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
4663 */
4664 if (sli_get_sgl_preregister(&hw->sli) &&
4665 io->def_sgl_count > 4 &&
4666 io->ovfl_io == NULL &&
4667 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4668 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
4669 io->ovfl_io = ocs_hw_io_alloc(hw);
4670 if (io->ovfl_io != NULL) {
4671 /*
4672 * Note: We can't call ocs_hw_io_register_sgl() here
4673 * because it checks that SGLs are not pre-registered
4674 * and for shyhawk, preregistered SGLs are required.
4675 */
4676 io->ovfl_sgl = &io->ovfl_io->def_sgl;
4677 io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
4678 }
4679 }
4680
4681 /* fail if we don't have an overflow SGL registered */
4682 if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) {
4683 return OCS_HW_RTN_ERROR;
4684 }
4685
4686 /*
4687 * Overflow, we need to put a link SGE in the last location of the current SGL, after
4688 * copying the the last SGE to the overflow SGL
4689 */
4690
4691 ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
4692
4693 lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
4694 ocs_memset(lsp, 0, sizeof(*lsp));
4695
4696 if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4697 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
4698 sli_skh_chain_sge_build(&hw->sli,
4699 (sli4_sge_t*)lsp,
4700 io->ovfl_io->indicator,
4701 0, /* frag_num */
4702 0); /* offset */
4703 } else {
4704 lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
4705 lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys);
4706 lsp->sge_type = SLI4_SGE_TYPE_LSP;
4707 lsp->last = 0;
4708 io->ovfl_lsp = lsp;
4709 io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
4710 }
4711
4712 /* Update the current SGL pointer, and n_sgl */
4713 io->sgl = io->ovfl_sgl;
4714 io->sgl_count = io->ovfl_sgl_count;
4715 io->n_sge = 1;
4716
4717 return OCS_HW_RTN_SUCCESS;
4718 }
4719
4720 /**
4721 * @ingroup io
4722 * @brief Add a scatter gather list entry to an IO.
4723 *
4724 * @param hw Hardware context.
4725 * @param io Previously-allocated HW IO object.
4726 * @param addr Physical address.
4727 * @param length Length of memory pointed to by @c addr.
4728 *
4729 * @return Returns 0 on success, or a non-zero value on failure.
4730 */
4731 ocs_hw_rtn_e
4732 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
4733 {
4734 sli4_sge_t *data = NULL;
4735
4736 if (!hw || !io || !addr || !length) {
4737 ocs_log_err(hw ? hw->os : NULL,
4738 "bad parameter hw=%p io=%p addr=%lx length=%u\n",
4739 hw, io, addr, length);
4740 return OCS_HW_RTN_ERROR;
4741 }
4742
4743 if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
4744 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
4745 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4746 return OCS_HW_RTN_ERROR;
4747 }
4748 }
4749
4750 if (length > sli_get_max_sge(&hw->sli)) {
4751 ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
4752 length, sli_get_max_sge(&hw->sli));
4753 return OCS_HW_RTN_ERROR;
4754 }
4755
4756 data = io->sgl->virt;
4757 data += io->n_sge;
4758
4759 data->sge_type = SLI4_SGE_TYPE_DATA;
4760 data->buffer_address_high = ocs_addr32_hi(addr);
4761 data->buffer_address_low = ocs_addr32_lo(addr);
4762 data->buffer_length = length;
4763 data->data_offset = io->sge_offset;
4764 /*
4765 * Always assume this is the last entry and mark as such.
4766 * If this is not the first entry unset the "last SGE"
4767 * indication for the previous entry
4768 */
4769 data->last = TRUE;
4770 if (io->n_sge) {
4771 data[-1].last = FALSE;
4772 }
4773
4774 /* Set first_data_bde if not previously set */
4775 if (io->first_data_sge == 0) {
4776 io->first_data_sge = io->n_sge;
4777 }
4778
4779 io->sge_offset += length;
4780 io->n_sge++;
4781
4782 /* Update the linked segment length (only executed after overflow has begun) */
4783 if (io->ovfl_lsp != NULL) {
4784 io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
4785 }
4786
4787 return OCS_HW_RTN_SUCCESS;
4788 }
4789
4790 /**
4791 * @ingroup io
4792 * @brief Add a T10 DIF scatter gather list entry to an IO.
4793 *
4794 * @param hw Hardware context.
4795 * @param io Previously-allocated HW IO object.
4796 * @param addr DIF physical address.
4797 *
4798 * @return Returns 0 on success, or a non-zero value on failure.
4799 */
4800 ocs_hw_rtn_e
4801 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
4802 {
4803 sli4_dif_sge_t *data = NULL;
4804
4805 if (!hw || !io || !addr) {
4806 ocs_log_err(hw ? hw->os : NULL,
4807 "bad parameter hw=%p io=%p addr=%lx\n",
4808 hw, io, addr);
4809 return OCS_HW_RTN_ERROR;
4810 }
4811
4812 if ((io->n_sge + 1) > hw->config.n_sgl) {
4813 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
4814 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4815 return OCS_HW_RTN_ERROR;
4816 }
4817 }
4818
4819 data = io->sgl->virt;
4820 data += io->n_sge;
4821
4822 data->sge_type = SLI4_SGE_TYPE_DIF;
4823 /* Workaround for SKH (BZ157233) */
4824 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4825 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
4826 data->sge_type = SLI4_SGE_TYPE_SKIP;
4827 }
4828
4829 data->buffer_address_high = ocs_addr32_hi(addr);
4830 data->buffer_address_low = ocs_addr32_lo(addr);
4831
4832 /*
4833 * Always assume this is the last entry and mark as such.
4834 * If this is not the first entry unset the "last SGE"
4835 * indication for the previous entry
4836 */
4837 data->last = TRUE;
4838 if (io->n_sge) {
4839 data[-1].last = FALSE;
4840 }
4841
4842 io->n_sge++;
4843
4844 return OCS_HW_RTN_SUCCESS;
4845 }
4846
4847 /**
4848 * @ingroup io
4849 * @brief Abort a previously-started IO.
4850 *
4851 * @param hw Hardware context.
4852 * @param io_to_abort The IO to abort.
4853 * @param send_abts Boolean to have the hardware automatically
4854 * generate an ABTS.
4855 * @param cb Function call upon completion of the abort (may be NULL).
4856 * @param arg Argument to pass to abort completion function.
4857 *
4858 * @return Returns 0 on success, or a non-zero value on failure.
4859 */
4860 ocs_hw_rtn_e
4861 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
4862 {
4863 sli4_abort_type_e atype = SLI_ABORT_MAX;
4864 uint32_t id = 0, mask = 0;
4865 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
4866 hw_wq_callback_t *wqcb;
4867
4868 if (!hw || !io_to_abort) {
4869 ocs_log_err(hw ? hw->os : NULL,
4870 "bad parameter hw=%p io=%p\n",
4871 hw, io_to_abort);
4872 return OCS_HW_RTN_ERROR;
4873 }
4874
4875 if (hw->state != OCS_HW_STATE_ACTIVE) {
4876 ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
4877 hw->state);
4878 return OCS_HW_RTN_ERROR;
4879 }
4880
4881 /* take a reference on IO being aborted */
4882 if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
4883 /* command no longer active */
4884 ocs_log_test(hw ? hw->os : NULL,
4885 "io not active xri=0x%x tag=0x%x\n",
4886 io_to_abort->indicator, io_to_abort->reqtag);
4887 return OCS_HW_RTN_IO_NOT_ACTIVE;
4888 }
4889
4890 /* non-port owned XRI checks */
4891 /* Must have a valid WQ reference */
4892 if (io_to_abort->wq == NULL) {
4893 ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
4894 io_to_abort->indicator);
4895 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4896 return OCS_HW_RTN_IO_NOT_ACTIVE;
4897 }
4898
4899 /* Validation checks complete; now check to see if already being aborted */
4900 ocs_lock(&hw->io_abort_lock);
4901 if (io_to_abort->abort_in_progress) {
4902 ocs_unlock(&hw->io_abort_lock);
4903 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4904 ocs_log_debug(hw ? hw->os : NULL,
4905 "io already being aborted xri=0x%x tag=0x%x\n",
4906 io_to_abort->indicator, io_to_abort->reqtag);
4907 return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
4908 }
4909
4910 /*
4911 * This IO is not already being aborted. Set flag so we won't try to
4912 * abort it again. After all, we only have one abort_done callback.
4913 */
4914 io_to_abort->abort_in_progress = 1;
4915 ocs_unlock(&hw->io_abort_lock);
4916
4917 /*
4918 * If we got here, the possibilities are:
4919 * - host owned xri
4920 * - io_to_abort->wq_index != UINT32_MAX
4921 * - submit ABORT_WQE to same WQ
4922 * - port owned xri:
4923 * - rxri: io_to_abort->wq_index == UINT32_MAX
4924 * - submit ABORT_WQE to any WQ
4925 * - non-rxri
4926 * - io_to_abort->index != UINT32_MAX
4927 * - submit ABORT_WQE to same WQ
4928 * - io_to_abort->index == UINT32_MAX
4929 * - submit ABORT_WQE to any WQ
4930 */
4931 io_to_abort->abort_done = cb;
4932 io_to_abort->abort_arg = arg;
4933
4934 atype = SLI_ABORT_XRI;
4935 id = io_to_abort->indicator;
4936
4937 /* Allocate a request tag for the abort portion of this IO */
4938 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
4939 if (wqcb == NULL) {
4940 ocs_log_err(hw->os, "can't allocate request tag\n");
4941 return OCS_HW_RTN_NO_RESOURCES;
4942 }
4943 io_to_abort->abort_reqtag = wqcb->instance_index;
4944
4945 /*
4946 * If the wqe is on the pending list, then set this wqe to be
4947 * aborted when the IO's wqe is removed from the list.
4948 */
4949 if (io_to_abort->wq != NULL) {
4950 sli_queue_lock(io_to_abort->wq->queue);
4951 if (ocs_list_on_list(&io_to_abort->wqe.link)) {
4952 io_to_abort->wqe.abort_wqe_submit_needed = 1;
4953 io_to_abort->wqe.send_abts = send_abts;
4954 io_to_abort->wqe.id = id;
4955 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
4956 sli_queue_unlock(io_to_abort->wq->queue);
4957 return 0;
4958 }
4959 sli_queue_unlock(io_to_abort->wq->queue);
4960 }
4961
4962 if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
4963 io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
4964 ocs_log_err(hw->os, "ABORT WQE error\n");
4965 io_to_abort->abort_reqtag = UINT32_MAX;
4966 ocs_hw_reqtag_free(hw, wqcb);
4967 rc = OCS_HW_RTN_ERROR;
4968 }
4969
4970 if (OCS_HW_RTN_SUCCESS == rc) {
4971 if (io_to_abort->wq == NULL) {
4972 io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
4973 ocs_hw_assert(io_to_abort->wq != NULL);
4974 }
4975 /* ABORT_WQE does not actually utilize an XRI on the Port,
4976 * therefore, keep xbusy as-is to track the exchange's state,
4977 * not the ABORT_WQE's state
4978 */
4979 rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
4980 if (rc > 0) {
4981 /* non-negative return is success */
4982 rc = 0;
4983 /* can't abort an abort so skip adding to timed wqe list */
4984 }
4985 }
4986
4987 if (OCS_HW_RTN_SUCCESS != rc) {
4988 ocs_lock(&hw->io_abort_lock);
4989 io_to_abort->abort_in_progress = 0;
4990 ocs_unlock(&hw->io_abort_lock);
4991 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4992 }
4993 return rc;
4994 }
4995
4996 /**
4997 * @ingroup io
4998 * @brief Return the OX_ID/RX_ID of the IO.
4999 *
5000 * @param hw Hardware context.
5001 * @param io HW IO object.
5002 *
5003 * @return Returns X_ID on success, or -1 on failure.
5004 */
5005 int32_t
5006 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
5007 {
5008 if (!hw || !io) {
5009 ocs_log_err(hw ? hw->os : NULL,
5010 "bad parameter hw=%p io=%p\n", hw, io);
5011 return -1;
5012 }
5013
5014 return io->indicator;
5015 }
5016
5017 typedef struct ocs_hw_fw_write_cb_arg {
5018 ocs_hw_fw_cb_t cb;
5019 void *arg;
5020 } ocs_hw_fw_write_cb_arg_t;
5021
5022 typedef struct ocs_hw_sfp_cb_arg {
5023 ocs_hw_sfp_cb_t cb;
5024 void *arg;
5025 ocs_dma_t payload;
5026 } ocs_hw_sfp_cb_arg_t;
5027
5028 typedef struct ocs_hw_temp_cb_arg {
5029 ocs_hw_temp_cb_t cb;
5030 void *arg;
5031 } ocs_hw_temp_cb_arg_t;
5032
5033 typedef struct ocs_hw_link_stat_cb_arg {
5034 ocs_hw_link_stat_cb_t cb;
5035 void *arg;
5036 } ocs_hw_link_stat_cb_arg_t;
5037
5038 typedef struct ocs_hw_host_stat_cb_arg {
5039 ocs_hw_host_stat_cb_t cb;
5040 void *arg;
5041 } ocs_hw_host_stat_cb_arg_t;
5042
5043 typedef struct ocs_hw_dump_get_cb_arg {
5044 ocs_hw_dump_get_cb_t cb;
5045 void *arg;
5046 void *mbox_cmd;
5047 } ocs_hw_dump_get_cb_arg_t;
5048
5049 typedef struct ocs_hw_dump_clear_cb_arg {
5050 ocs_hw_dump_clear_cb_t cb;
5051 void *arg;
5052 void *mbox_cmd;
5053 } ocs_hw_dump_clear_cb_arg_t;
5054
5055 /**
5056 * @brief Write a portion of a firmware image to the device.
5057 *
5058 * @par Description
5059 * Calls the correct firmware write function based on the device type.
5060 *
5061 * @param hw Hardware context.
5062 * @param dma DMA structure containing the firmware image chunk.
5063 * @param size Size of the firmware image chunk.
5064 * @param offset Offset, in bytes, from the beginning of the firmware image.
5065 * @param last True if this is the last chunk of the image.
5066 * Causes the image to be committed to flash.
5067 * @param cb Pointer to a callback function that is called when the command completes.
5068 * The callback function prototype is
5069 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5070 * @param arg Pointer to be passed to the callback function.
5071 *
5072 * @return Returns 0 on success, or a non-zero value on failure.
5073 */
5074 ocs_hw_rtn_e
5075 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5076 {
5077 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
5078 return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
5079 } else {
5080 /* Write firmware_write for BE3/Skyhawk not supported */
5081 return -1;
5082 }
5083 }
5084
5085 /**
5086 * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
5087 *
5088 * @par Description
5089 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
5090 * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
5091 * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
5092 * and to signal the caller that the write has completed.
5093 *
5094 * @param hw Hardware context.
5095 * @param dma DMA structure containing the firmware image chunk.
5096 * @param size Size of the firmware image chunk.
5097 * @param offset Offset, in bytes, from the beginning of the firmware image.
5098 * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
5099 * @param cb Pointer to a callback function that is called when the command completes.
5100 * The callback function prototype is
5101 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5102 * @param arg Pointer to be passed to the callback function.
5103 *
5104 * @return Returns 0 on success, or a non-zero value on failure.
5105 */
5106 ocs_hw_rtn_e
5107 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5108 {
5109 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5110 uint8_t *mbxdata;
5111 ocs_hw_fw_write_cb_arg_t *cb_arg;
5112 int noc=0; /* No Commit bit - set to 1 for testing */
5113
5114 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
5115 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
5116 return OCS_HW_RTN_ERROR;
5117 }
5118
5119 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5120 if (mbxdata == NULL) {
5121 ocs_log_err(hw->os, "failed to malloc mbox\n");
5122 return OCS_HW_RTN_NO_MEMORY;
5123 }
5124
5125 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
5126 if (cb_arg == NULL) {
5127 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5128 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5129 return OCS_HW_RTN_NO_MEMORY;
5130 }
5131
5132 cb_arg->cb = cb;
5133 cb_arg->arg = arg;
5134
5135 if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
5136 size, offset, "/prg/", dma)) {
5137 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
5138 }
5139
5140 if (rc != OCS_HW_RTN_SUCCESS) {
5141 ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
5142 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5143 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5144 }
5145
5146 return rc;
5147
5148 }
5149
5150 /**
5151 * @brief Called when the WRITE OBJECT command completes.
5152 *
5153 * @par Description
5154 * Get the number of bytes actually written out of the response, free the mailbox
5155 * that was malloc'd by ocs_hw_firmware_write(),
5156 * then call the callback and pass the status and bytes written.
5157 *
5158 * @param hw Hardware context.
5159 * @param status Status field from the mbox completion.
5160 * @param mqe Mailbox response structure.
5161 * @param arg Pointer to a callback function that signals the caller that the command is done.
5162 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
5163 *
5164 * @return Returns 0.
5165 */
5166 static int32_t
5167 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5168 {
5169
5170 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
5171 sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
5172 ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
5173 uint32_t bytes_written;
5174 uint16_t mbox_status;
5175 uint32_t change_status;
5176
5177 bytes_written = wr_obj_rsp->actual_write_length;
5178 mbox_status = mbox_rsp->hdr.status;
5179 change_status = wr_obj_rsp->change_status;
5180
5181 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5182
5183 if (cb_arg) {
5184 if (cb_arg->cb) {
5185 if ((status == 0) && mbox_status) {
5186 status = mbox_status;
5187 }
5188 cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
5189 }
5190
5191 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5192 }
5193
5194 return 0;
5195
5196 }
5197
5198 /**
5199 * @brief Called when the READ_TRANSCEIVER_DATA command completes.
5200 *
5201 * @par Description
5202 * Get the number of bytes read out of the response, free the mailbox that was malloc'd
5203 * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
5204 *
5205 * @param hw Hardware context.
5206 * @param status Status field from the mbox completion.
5207 * @param mqe Mailbox response structure.
5208 * @param arg Pointer to a callback function that signals the caller that the command is done.
5209 * The callback function prototype is
5210 * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
5211 *
5212 * @return Returns 0.
5213 */
5214 static int32_t
5215 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5216 {
5217
5218 ocs_hw_sfp_cb_arg_t *cb_arg = arg;
5219 ocs_dma_t *payload = NULL;
5220 sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
5221 uint32_t bytes_written;
5222
5223 if (cb_arg) {
5224 payload = &(cb_arg->payload);
5225 if (cb_arg->cb) {
5226 mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
5227 bytes_written = mbox_rsp->hdr.response_length;
5228 if ((status == 0) && mbox_rsp->hdr.status) {
5229 status = mbox_rsp->hdr.status;
5230 }
5231 cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
5232 }
5233
5234 ocs_dma_free(hw->os, &cb_arg->payload);
5235 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5236 }
5237
5238 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5239 return 0;
5240 }
5241
5242 /**
5243 * @ingroup io
5244 * @brief Function to retrieve the SFP information.
5245 *
5246 * @param hw Hardware context.
5247 * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
5248 * @param cb Function call upon completion of sending the data (may be NULL).
5249 * @param arg Argument to pass to IO completion function.
5250 *
5251 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5252 */
5253 ocs_hw_rtn_e
5254 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
5255 {
5256 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5257 ocs_hw_sfp_cb_arg_t *cb_arg;
5258 uint8_t *mbxdata;
5259
5260 /* mbxdata holds the header of the command */
5261 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5262 if (mbxdata == NULL) {
5263 ocs_log_err(hw->os, "failed to malloc mbox\n");
5264 return OCS_HW_RTN_NO_MEMORY;
5265 }
5266
5267 /* cb_arg holds the data that will be passed to the callback on completion */
5268 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
5269 if (cb_arg == NULL) {
5270 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5271 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5272 return OCS_HW_RTN_NO_MEMORY;
5273 }
5274
5275 cb_arg->cb = cb;
5276 cb_arg->arg = arg;
5277
5278 /* payload holds the non-embedded portion */
5279 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
5280 OCS_MIN_DMA_ALIGNMENT)) {
5281 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
5282 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5283 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5284 return OCS_HW_RTN_NO_MEMORY;
5285 }
5286
5287 /* Send the HW command */
5288 if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
5289 &cb_arg->payload)) {
5290 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
5291 }
5292
5293 if (rc != OCS_HW_RTN_SUCCESS) {
5294 ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
5295 rc);
5296 ocs_dma_free(hw->os, &cb_arg->payload);
5297 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5298 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5299 }
5300
5301 return rc;
5302 }
5303
5304 /**
5305 * @brief Function to retrieve the temperature information.
5306 *
5307 * @param hw Hardware context.
5308 * @param cb Function call upon completion of sending the data (may be NULL).
5309 * @param arg Argument to pass to IO completion function.
5310 *
5311 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5312 */
5313 ocs_hw_rtn_e
5314 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
5315 {
5316 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5317 ocs_hw_temp_cb_arg_t *cb_arg;
5318 uint8_t *mbxdata;
5319
5320 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5321 if (mbxdata == NULL) {
5322 ocs_log_err(hw->os, "failed to malloc mbox");
5323 return OCS_HW_RTN_NO_MEMORY;
5324 }
5325
5326 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
5327 if (cb_arg == NULL) {
5328 ocs_log_err(hw->os, "failed to malloc cb_arg");
5329 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5330 return OCS_HW_RTN_NO_MEMORY;
5331 }
5332
5333 cb_arg->cb = cb;
5334 cb_arg->arg = arg;
5335
5336 if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5337 SLI4_WKI_TAG_SAT_TEM)) {
5338 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
5339 }
5340
5341 if (rc != OCS_HW_RTN_SUCCESS) {
5342 ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
5343 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5344 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5345 }
5346
5347 return rc;
5348 }
5349
5350 /**
5351 * @brief Called when the DUMP command completes.
5352 *
5353 * @par Description
5354 * Get the temperature data out of the response, free the mailbox that was malloc'd
5355 * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
5356 *
5357 * @param hw Hardware context.
5358 * @param status Status field from the mbox completion.
5359 * @param mqe Mailbox response structure.
5360 * @param arg Pointer to a callback function that signals the caller that the command is done.
5361 * The callback function prototype is defined by ocs_hw_temp_cb_t.
5362 *
5363 * @return Returns 0.
5364 */
5365 static int32_t
5366 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5367 {
5368
5369 sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
5370 ocs_hw_temp_cb_arg_t *cb_arg = arg;
5371 uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
5372 uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
5373 uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
5374 uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
5375 uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */
5376 uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */
5377
5378 if (cb_arg) {
5379 if (cb_arg->cb) {
5380 if ((status == 0) && mbox_rsp->hdr.status) {
5381 status = mbox_rsp->hdr.status;
5382 }
5383 cb_arg->cb(status,
5384 curr_temp,
5385 crit_temp_thrshld,
5386 warn_temp_thrshld,
5387 norm_temp_thrshld,
5388 fan_off_thrshld,
5389 fan_on_thrshld,
5390 cb_arg->arg);
5391 }
5392
5393 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5394 }
5395 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5396
5397 return 0;
5398 }
5399
5400 /**
5401 * @brief Function to retrieve the link statistics.
5402 *
5403 * @param hw Hardware context.
5404 * @param req_ext_counters If TRUE, then the extended counters will be requested.
5405 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
5406 * @param clear_all_counters If TRUE, the counters will be cleared.
5407 * @param cb Function call upon completion of sending the data (may be NULL).
5408 * @param arg Argument to pass to IO completion function.
5409 *
5410 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5411 */
5412 ocs_hw_rtn_e
5413 ocs_hw_get_link_stats(ocs_hw_t *hw,
5414 uint8_t req_ext_counters,
5415 uint8_t clear_overflow_flags,
5416 uint8_t clear_all_counters,
5417 ocs_hw_link_stat_cb_t cb,
5418 void *arg)
5419 {
5420 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5421 ocs_hw_link_stat_cb_arg_t *cb_arg;
5422 uint8_t *mbxdata;
5423
5424 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5425 if (mbxdata == NULL) {
5426 ocs_log_err(hw->os, "failed to malloc mbox");
5427 return OCS_HW_RTN_NO_MEMORY;
5428 }
5429
5430 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
5431 if (cb_arg == NULL) {
5432 ocs_log_err(hw->os, "failed to malloc cb_arg");
5433 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5434 return OCS_HW_RTN_NO_MEMORY;
5435 }
5436
5437 cb_arg->cb = cb;
5438 cb_arg->arg = arg;
5439
5440 if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5441 req_ext_counters,
5442 clear_overflow_flags,
5443 clear_all_counters)) {
5444 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
5445 }
5446
5447 if (rc != OCS_HW_RTN_SUCCESS) {
5448 ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
5449 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5450 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5451 }
5452
5453 return rc;
5454 }
5455
5456 /**
5457 * @brief Called when the READ_LINK_STAT command completes.
5458 *
5459 * @par Description
5460 * Get the counters out of the response, free the mailbox that was malloc'd
5461 * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
5462 *
5463 * @param hw Hardware context.
5464 * @param status Status field from the mbox completion.
5465 * @param mqe Mailbox response structure.
5466 * @param arg Pointer to a callback function that signals the caller that the command is done.
5467 * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
5468 *
5469 * @return Returns 0.
5470 */
5471 static int32_t
5472 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5473 {
5474
5475 sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
5476 ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
5477 ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
5478 uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
5479
5480 ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
5481 OCS_HW_LINK_STAT_MAX);
5482
5483 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
5484 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
5485 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
5486 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
5487 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
5488 counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
5489 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
5490 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
5491 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
5492 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
5493 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
5494 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
5495 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
5496 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
5497 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
5498 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
5499 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
5500 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
5501 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
5502 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
5503
5504 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
5505 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
5506 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
5507 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
5508 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
5509 counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
5510 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
5511 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
5512 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
5513 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
5514 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
5515 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
5516 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
5517 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
5518 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
5519 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
5520 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
5521 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
5522 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
5523 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
5524
5525 if (cb_arg) {
5526 if (cb_arg->cb) {
5527 if ((status == 0) && mbox_rsp->hdr.status) {
5528 status = mbox_rsp->hdr.status;
5529 }
5530 cb_arg->cb(status,
5531 num_counters,
5532 counts,
5533 cb_arg->arg);
5534 }
5535
5536 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5537 }
5538 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5539
5540 return 0;
5541 }
5542
5543 /**
5544 * @brief Function to retrieve the link and host statistics.
5545 *
5546 * @param hw Hardware context.
5547 * @param cc clear counters, if TRUE all counters will be cleared.
5548 * @param cb Function call upon completion of receiving the data.
5549 * @param arg Argument to pass to pointer fc hosts statistics structure.
5550 *
5551 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5552 */
5553 ocs_hw_rtn_e
5554 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
5555 {
5556 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5557 ocs_hw_host_stat_cb_arg_t *cb_arg;
5558 uint8_t *mbxdata;
5559
5560 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
5561 if (mbxdata == NULL) {
5562 ocs_log_err(hw->os, "failed to malloc mbox");
5563 return OCS_HW_RTN_NO_MEMORY;
5564 }
5565
5566 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
5567 if (cb_arg == NULL) {
5568 ocs_log_err(hw->os, "failed to malloc cb_arg");
5569 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5570 return OCS_HW_RTN_NO_MEMORY;
5571 }
5572
5573 cb_arg->cb = cb;
5574 cb_arg->arg = arg;
5575
5576 /* Send the HW command to get the host stats */
5577 if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
5578 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
5579 }
5580
5581 if (rc != OCS_HW_RTN_SUCCESS) {
5582 ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
5583 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5584 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5585 }
5586
5587 return rc;
5588 }
5589
5590 /**
5591 * @brief Called when the READ_STATUS command completes.
5592 *
5593 * @par Description
5594 * Get the counters out of the response, free the mailbox that was malloc'd
5595 * by ocs_hw_get_host_stats(), then call the callback and pass
5596 * the status and data.
5597 *
5598 * @param hw Hardware context.
5599 * @param status Status field from the mbox completion.
5600 * @param mqe Mailbox response structure.
5601 * @param arg Pointer to a callback function that signals the caller that the command is done.
5602 * The callback function prototype is defined by
5603 * ocs_hw_host_stat_cb_t.
5604 *
5605 * @return Returns 0.
5606 */
5607 static int32_t
5608 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5609 {
5610
5611 sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
5612 ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
5613 ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
5614 uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
5615
5616 ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
5617 OCS_HW_HOST_STAT_MAX);
5618
5619 counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
5620 counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
5621 counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
5622 counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
5623 counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
5624 counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
5625 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
5626 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
5627 counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
5628 counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
5629 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
5630 counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
5631 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
5632 counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
5633
5634 if (cb_arg) {
5635 if (cb_arg->cb) {
5636 if ((status == 0) && mbox_rsp->hdr.status) {
5637 status = mbox_rsp->hdr.status;
5638 }
5639 cb_arg->cb(status,
5640 num_counters,
5641 counts,
5642 cb_arg->arg);
5643 }
5644
5645 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5646 }
5647 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5648
5649 return 0;
5650 }
5651
5652 /**
5653 * @brief HW link configuration enum to the CLP string value mapping.
5654 *
5655 * This structure provides a mapping from the ocs_hw_linkcfg_e
5656 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5657 * control) to the CLP string that is used
5658 * in the DMTF_CLP_CMD mailbox command.
5659 */
5660 typedef struct ocs_hw_linkcfg_map_s {
5661 ocs_hw_linkcfg_e linkcfg;
5662 const char *clp_str;
5663 } ocs_hw_linkcfg_map_t;
5664
5665 /**
5666 * @brief Mapping from the HW linkcfg enum to the CLP command value
5667 * string.
5668 */
5669 static ocs_hw_linkcfg_map_t linkcfg_map[] = {
5670 {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
5671 {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
5672 {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
5673 {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
5674 {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
5675 {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
5676 {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
5677
5678 /**
5679 * @brief HW link configuration enum to Skyhawk link config ID mapping.
5680 *
5681 * This structure provides a mapping from the ocs_hw_linkcfg_e
5682 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5683 * control) to the link config ID numbers used by Skyhawk
5684 */
5685 typedef struct ocs_hw_skyhawk_linkcfg_map_s {
5686 ocs_hw_linkcfg_e linkcfg;
5687 uint32_t config_id;
5688 } ocs_hw_skyhawk_linkcfg_map_t;
5689
5690 /**
5691 * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
5692 */
5693 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
5694 {OCS_HW_LINKCFG_4X10G, 0x0a},
5695 {OCS_HW_LINKCFG_1X40G, 0x09},
5696 };
5697
5698 /**
5699 * @brief Helper function for getting the HW linkcfg enum from the CLP
5700 * string value
5701 *
5702 * @param clp_str CLP string value from OEMELX_LinkConfig.
5703 *
5704 * @return Returns the HW linkcfg enum corresponding to clp_str.
5705 */
5706 static ocs_hw_linkcfg_e
5707 ocs_hw_linkcfg_from_clp(const char *clp_str)
5708 {
5709 uint32_t i;
5710 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5711 if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
5712 return linkcfg_map[i].linkcfg;
5713 }
5714 }
5715 return OCS_HW_LINKCFG_NA;
5716 }
5717
5718 /**
5719 * @brief Helper function for getting the CLP string value from the HW
5720 * linkcfg enum.
5721 *
5722 * @param linkcfg HW linkcfg enum.
5723 *
5724 * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
5725 * given linkcfg.
5726 */
5727 static const char *
5728 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5729 {
5730 uint32_t i;
5731 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5732 if (linkcfg_map[i].linkcfg == linkcfg) {
5733 return linkcfg_map[i].clp_str;
5734 }
5735 }
5736 return NULL;
5737 }
5738
5739 /**
5740 * @brief Helper function for getting a Skyhawk link config ID from the HW
5741 * linkcfg enum.
5742 *
5743 * @param linkcfg HW linkcfg enum.
5744 *
5745 * @return Returns the Skyhawk link config ID corresponding to
5746 * given linkcfg.
5747 */
5748 static uint32_t
5749 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5750 {
5751 uint32_t i;
5752 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5753 if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
5754 return skyhawk_linkcfg_map[i].config_id;
5755 }
5756 }
5757 return 0;
5758 }
5759
5760 /**
5761 * @brief Helper function for getting the HW linkcfg enum from a
5762 * Skyhawk config ID.
5763 *
5764 * @param config_id Skyhawk link config ID.
5765 *
5766 * @return Returns the HW linkcfg enum corresponding to config_id.
5767 */
5768 static ocs_hw_linkcfg_e
5769 ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
5770 {
5771 uint32_t i;
5772 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5773 if (skyhawk_linkcfg_map[i].config_id == config_id) {
5774 return skyhawk_linkcfg_map[i].linkcfg;
5775 }
5776 }
5777 return OCS_HW_LINKCFG_NA;
5778 }
5779
5780 /**
5781 * @brief Link configuration callback argument.
5782 */
5783 typedef struct ocs_hw_linkcfg_cb_arg_s {
5784 ocs_hw_port_control_cb_t cb;
5785 void *arg;
5786 uint32_t opts;
5787 int32_t status;
5788 ocs_dma_t dma_cmd;
5789 ocs_dma_t dma_resp;
5790 uint32_t result_len;
5791 } ocs_hw_linkcfg_cb_arg_t;
5792
5793 /**
5794 * @brief Set link configuration.
5795 *
5796 * @param hw Hardware context.
5797 * @param value Link configuration enum to which the link configuration is
5798 * set.
5799 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5800 * @param cb Callback function to invoke following mbx command.
5801 * @param arg Callback argument.
5802 *
5803 * @return Returns OCS_HW_RTN_SUCCESS on success.
5804 */
5805 static ocs_hw_rtn_e
5806 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5807 {
5808 if (!sli_link_is_configurable(&hw->sli)) {
5809 ocs_log_debug(hw->os, "Function not supported\n");
5810 return OCS_HW_RTN_ERROR;
5811 }
5812
5813 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
5814 return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
5815 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
5816 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
5817 return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
5818 } else {
5819 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
5820 return OCS_HW_RTN_ERROR;
5821 }
5822 }
5823
5824 /**
5825 * @brief Set link configuration for Lancer
5826 *
5827 * @param hw Hardware context.
5828 * @param value Link configuration enum to which the link configuration is
5829 * set.
5830 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5831 * @param cb Callback function to invoke following mbx command.
5832 * @param arg Callback argument.
5833 *
5834 * @return Returns OCS_HW_RTN_SUCCESS on success.
5835 */
5836 static ocs_hw_rtn_e
5837 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5838 {
5839 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
5840 ocs_hw_linkcfg_cb_arg_t *cb_arg;
5841 const char *value_str = NULL;
5842 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5843
5844 /* translate ocs_hw_linkcfg_e to CLP string */
5845 value_str = ocs_hw_clp_from_linkcfg(value);
5846
5847 /* allocate memory for callback argument */
5848 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
5849 if (cb_arg == NULL) {
5850 ocs_log_err(hw->os, "failed to malloc cb_arg");
5851 return OCS_HW_RTN_NO_MEMORY;
5852 }
5853
5854 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
5855 /* allocate DMA for command */
5856 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
5857 ocs_log_err(hw->os, "malloc failed\n");
5858 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5859 return OCS_HW_RTN_NO_MEMORY;
5860 }
5861 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
5862 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
5863
5864 /* allocate DMA for response */
5865 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
5866 ocs_log_err(hw->os, "malloc failed\n");
5867 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5868 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5869 return OCS_HW_RTN_NO_MEMORY;
5870 }
5871 cb_arg->cb = cb;
5872 cb_arg->arg = arg;
5873 cb_arg->opts = opts;
5874
5875 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
5876 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
5877
5878 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
5879 /* if failed, or polling, free memory here; if success and not
5880 * polling, will free in callback function
5881 */
5882 if (rc) {
5883 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
5884 (char *)cb_arg->dma_cmd.virt);
5885 }
5886 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5887 ocs_dma_free(hw->os, &cb_arg->dma_resp);
5888 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5889 }
5890 return rc;
5891 }
5892
5893 /**
5894 * @brief Callback for ocs_hw_set_linkcfg_skyhawk
5895 *
5896 * @param hw Hardware context.
5897 * @param status Status from the RECONFIG_GET_LINK_INFO command.
5898 * @param mqe Mailbox response structure.
5899 * @param arg Pointer to a callback argument.
5900 *
5901 * @return none
5902 */
5903 static void
5904 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5905 {
5906 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
5907
5908 if (status) {
5909 ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
5910 }
5911
5912 /* invoke callback */
5913 if (cb_arg->cb) {
5914 cb_arg->cb(status, 0, cb_arg->arg);
5915 }
5916
5917 /* if polling, will free memory in calling function */
5918 if (cb_arg->opts != OCS_CMD_POLL) {
5919 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5920 }
5921 }
5922
5923 /**
5924 * @brief Set link configuration for a Skyhawk
5925 *
5926 * @param hw Hardware context.
5927 * @param value Link configuration enum to which the link configuration is
5928 * set.
5929 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5930 * @param cb Callback function to invoke following mbx command.
5931 * @param arg Callback argument.
5932 *
5933 * @return Returns OCS_HW_RTN_SUCCESS on success.
5934 */
5935 static ocs_hw_rtn_e
5936 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5937 {
5938 uint8_t *mbxdata;
5939 ocs_hw_linkcfg_cb_arg_t *cb_arg;
5940 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5941 uint32_t config_id;
5942
5943 config_id = ocs_hw_config_id_from_linkcfg(value);
5944
5945 if (config_id == 0) {
5946 ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value);
5947 return OCS_HW_RTN_ERROR;
5948 }
5949
5950 /* mbxdata holds the header of the command */
5951 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5952 if (mbxdata == NULL) {
5953 ocs_log_err(hw->os, "failed to malloc mbox\n");
5954 return OCS_HW_RTN_NO_MEMORY;
5955 }
5956
5957 /* cb_arg holds the data that will be passed to the callback on completion */
5958 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
5959 if (cb_arg == NULL) {
5960 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5961 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5962 return OCS_HW_RTN_NO_MEMORY;
5963 }
5964
5965 cb_arg->cb = cb;
5966 cb_arg->arg = arg;
5967
5968 if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) {
5969 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg);
5970 }
5971
5972 if (rc != OCS_HW_RTN_SUCCESS) {
5973 ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n");
5974 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5975 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5976 } else if (opts == OCS_CMD_POLL) {
5977 /* if we're polling we have to call the callback here. */
5978 ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg);
5979 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5980 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5981 } else {
5982 /* We weren't poling, so the callback got called */
5983 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5984 }
5985
5986 return rc;
5987 }
5988
5989 /**
5990 * @brief Get link configuration.
5991 *
5992 * @param hw Hardware context.
5993 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5994 * @param cb Callback function to invoke following mbx command.
5995 * @param arg Callback argument.
5996 *
5997 * @return Returns OCS_HW_RTN_SUCCESS on success.
5998 */
5999 static ocs_hw_rtn_e
6000 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6001 {
6002 if (!sli_link_is_configurable(&hw->sli)) {
6003 ocs_log_debug(hw->os, "Function not supported\n");
6004 return OCS_HW_RTN_ERROR;
6005 }
6006
6007 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) ||
6008 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))){
6009 return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg);
6010 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
6011 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
6012 return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg);
6013 } else {
6014 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
6015 return OCS_HW_RTN_ERROR;
6016 }
6017 }
6018
6019 /**
6020 * @brief Get link configuration for a Lancer
6021 *
6022 * @param hw Hardware context.
6023 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6024 * @param cb Callback function to invoke following mbx command.
6025 * @param arg Callback argument.
6026 *
6027 * @return Returns OCS_HW_RTN_SUCCESS on success.
6028 */
6029 static ocs_hw_rtn_e
6030 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6031 {
6032 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6033 ocs_hw_linkcfg_cb_arg_t *cb_arg;
6034 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6035
6036 /* allocate memory for callback argument */
6037 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6038 if (cb_arg == NULL) {
6039 ocs_log_err(hw->os, "failed to malloc cb_arg");
6040 return OCS_HW_RTN_NO_MEMORY;
6041 }
6042
6043 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig");
6044
6045 /* allocate DMA for command */
6046 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6047 ocs_log_err(hw->os, "malloc failed\n");
6048 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6049 return OCS_HW_RTN_NO_MEMORY;
6050 }
6051
6052 /* copy CLP command to DMA command */
6053 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6054 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
6055
6056 /* allocate DMA for response */
6057 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6058 ocs_log_err(hw->os, "malloc failed\n");
6059 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6060 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6061 return OCS_HW_RTN_NO_MEMORY;
6062 }
6063 cb_arg->cb = cb;
6064 cb_arg->arg = arg;
6065 cb_arg->opts = opts;
6066
6067 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
6068 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
6069
6070 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6071 /* if failed or polling, free memory here; if not polling and success,
6072 * will free in callback function
6073 */
6074 if (rc) {
6075 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
6076 (char *)cb_arg->dma_cmd.virt);
6077 }
6078 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6079 ocs_dma_free(hw->os, &cb_arg->dma_resp);
6080 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6081 }
6082 return rc;
6083 }
6084
6085 /**
6086 * @brief Get the link configuration callback.
6087 *
6088 * @param hw Hardware context.
6089 * @param status Status from the RECONFIG_GET_LINK_INFO command.
6090 * @param mqe Mailbox response structure.
6091 * @param arg Pointer to a callback argument.
6092 *
6093 * @return none
6094 */
6095 static void
6096 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6097 {
6098 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6099 sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt;
6100 ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA;
6101
6102 if (status) {
6103 ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status);
6104 } else {
6105 /* Call was successful */
6106 value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id);
6107 }
6108
6109 /* invoke callback */
6110 if (cb_arg->cb) {
6111 cb_arg->cb(status, value, cb_arg->arg);
6112 }
6113
6114 /* if polling, will free memory in calling function */
6115 if (cb_arg->opts != OCS_CMD_POLL) {
6116 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6117 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6118 }
6119 }
6120
6121 /**
6122 * @brief Get link configuration for a Skyhawk.
6123 *
6124 * @param hw Hardware context.
6125 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6126 * @param cb Callback function to invoke following mbx command.
6127 * @param arg Callback argument.
6128 *
6129 * @return Returns OCS_HW_RTN_SUCCESS on success.
6130 */
6131 static ocs_hw_rtn_e
6132 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6133 {
6134 uint8_t *mbxdata;
6135 ocs_hw_linkcfg_cb_arg_t *cb_arg;
6136 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6137
6138 /* mbxdata holds the header of the command */
6139 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6140 if (mbxdata == NULL) {
6141 ocs_log_err(hw->os, "failed to malloc mbox\n");
6142 return OCS_HW_RTN_NO_MEMORY;
6143 }
6144
6145 /* cb_arg holds the data that will be passed to the callback on completion */
6146 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
6147 if (cb_arg == NULL) {
6148 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6149 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6150 return OCS_HW_RTN_NO_MEMORY;
6151 }
6152
6153 cb_arg->cb = cb;
6154 cb_arg->arg = arg;
6155 cb_arg->opts = opts;
6156
6157 /* dma_mem holds the non-embedded portion */
6158 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) {
6159 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
6160 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6161 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6162 return OCS_HW_RTN_NO_MEMORY;
6163 }
6164
6165 if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) {
6166 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg);
6167 }
6168
6169 if (rc != OCS_HW_RTN_SUCCESS) {
6170 ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n");
6171 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6172 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6173 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6174 } else if (opts == OCS_CMD_POLL) {
6175 /* if we're polling we have to call the callback here. */
6176 ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg);
6177 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6178 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6179 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6180 } else {
6181 /* We weren't poling, so the callback got called */
6182 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6183 }
6184
6185 return rc;
6186 }
6187
6188 /**
6189 * @brief Sets the DIF seed value.
6190 *
6191 * @param hw Hardware context.
6192 *
6193 * @return Returns OCS_HW_RTN_SUCCESS on success.
6194 */
6195 static ocs_hw_rtn_e
6196 ocs_hw_set_dif_seed(ocs_hw_t *hw)
6197 {
6198 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6199 uint8_t buf[SLI4_BMBX_SIZE];
6200 sli4_req_common_set_features_dif_seed_t seed_param;
6201
6202 ocs_memset(&seed_param, 0, sizeof(seed_param));
6203 seed_param.seed = hw->config.dif_seed;
6204
6205 /* send set_features command */
6206 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6207 SLI4_SET_FEATURES_DIF_SEED,
6208 4,
6209 (uint32_t*)&seed_param)) {
6210 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6211 if (rc) {
6212 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6213 } else {
6214 ocs_log_debug(hw->os, "DIF seed set to 0x%x\n",
6215 hw->config.dif_seed);
6216 }
6217 } else {
6218 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6219 rc = OCS_HW_RTN_ERROR;
6220 }
6221 return rc;
6222 }
6223
6224 /**
6225 * @brief Sets the DIF mode value.
6226 *
6227 * @param hw Hardware context.
6228 *
6229 * @return Returns OCS_HW_RTN_SUCCESS on success.
6230 */
6231 static ocs_hw_rtn_e
6232 ocs_hw_set_dif_mode(ocs_hw_t *hw)
6233 {
6234 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6235 uint8_t buf[SLI4_BMBX_SIZE];
6236 sli4_req_common_set_features_t10_pi_mem_model_t mode_param;
6237
6238 ocs_memset(&mode_param, 0, sizeof(mode_param));
6239 mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1);
6240
6241 /* send set_features command */
6242 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6243 SLI4_SET_FEATURES_DIF_MEMORY_MODE,
6244 sizeof(mode_param),
6245 (uint32_t*)&mode_param)) {
6246 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6247 if (rc) {
6248 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6249 } else {
6250 ocs_log_test(hw->os, "DIF mode set to %s\n",
6251 (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate"));
6252 }
6253 } else {
6254 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6255 rc = OCS_HW_RTN_ERROR;
6256 }
6257 return rc;
6258 }
6259
6260 static void
6261 ocs_hw_watchdog_timer_cb(void *arg)
6262 {
6263 ocs_hw_t *hw = (ocs_hw_t *)arg;
6264
6265 ocs_hw_config_watchdog_timer(hw);
6266 return;
6267 }
6268
6269 static void
6270 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6271 {
6272 uint16_t timeout = hw->watchdog_timeout;
6273
6274 if (status != 0) {
6275 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status);
6276 } else {
6277 if(timeout != 0) {
6278 /* keeping callback 500ms before timeout to keep heartbeat alive */
6279 ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) );
6280 }else {
6281 ocs_del_timer(&hw->watchdog_timer);
6282 }
6283 }
6284
6285 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6286 return;
6287 }
6288
6289 /**
6290 * @brief Set configuration parameters for watchdog timer feature.
6291 *
6292 * @param hw Hardware context.
6293 * @param timeout Timeout for watchdog timer in seconds
6294 *
6295 * @return Returns OCS_HW_RTN_SUCCESS on success.
6296 */
6297 static ocs_hw_rtn_e
6298 ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
6299 {
6300 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6301 uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
6302
6303 if (!buf) {
6304 ocs_log_err(hw->os, "no buffer for command\n");
6305 return OCS_HW_RTN_NO_MEMORY;
6306 }
6307
6308 sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
6309 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
6310 if (rc) {
6311 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
6312 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc);
6313 }
6314 return rc;
6315 }
6316
6317 /**
6318 * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature.
6319 *
6320 * @param hw Hardware context.
6321 * @param buf Pointer to a mailbox buffer area.
6322 *
6323 * @return Returns OCS_HW_RTN_SUCCESS on success.
6324 */
6325 static ocs_hw_rtn_e
6326 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf)
6327 {
6328 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6329 sli4_req_common_set_features_xfer_rdy_t10pi_t param;
6330
6331 ocs_memset(¶m, 0, sizeof(param));
6332 param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1);
6333 param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0);
6334 param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1);
6335 param.app_tag = hw->config.auto_xfer_rdy_app_tag_value;
6336 param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
6337
6338 switch (hw->config.auto_xfer_rdy_p_type) {
6339 case 1:
6340 param.p_type = 0;
6341 break;
6342 case 3:
6343 param.p_type = 2;
6344 break;
6345 default:
6346 ocs_log_err(hw->os, "unsupported p_type %d\n",
6347 hw->config.auto_xfer_rdy_p_type);
6348 return OCS_HW_RTN_ERROR;
6349 }
6350
6351 /* build the set_features command */
6352 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6353 SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI,
6354 sizeof(param),
6355 ¶m);
6356
6357 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6358 if (rc) {
6359 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6360 } else {
6361 ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n",
6362 param.rtc, param.atv, param.p_type,
6363 param.app_tag, param.blk_size);
6364 }
6365
6366 return rc;
6367 }
6368
6369 /**
6370 * @brief enable sli port health check
6371 *
6372 * @param hw Hardware context.
6373 * @param buf Pointer to a mailbox buffer area.
6374 * @param query current status of the health check feature enabled/disabled
6375 * @param enable if 1: enable 0: disable
6376 * @param buf Pointer to a mailbox buffer area.
6377 *
6378 * @return Returns OCS_HW_RTN_SUCCESS on success.
6379 */
6380 static ocs_hw_rtn_e
6381 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable)
6382 {
6383 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6384 uint8_t buf[SLI4_BMBX_SIZE];
6385 sli4_req_common_set_features_health_check_t param;
6386
6387 ocs_memset(¶m, 0, sizeof(param));
6388 param.hck = enable;
6389 param.qry = query;
6390
6391 /* build the set_features command */
6392 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6393 SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
6394 sizeof(param),
6395 ¶m);
6396
6397 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6398 if (rc) {
6399 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6400 } else {
6401 ocs_log_test(hw->os, "SLI Port Health Check is enabled \n");
6402 }
6403
6404 return rc;
6405 }
6406
6407 /**
6408 * @brief Set FTD transfer hint feature
6409 *
6410 * @param hw Hardware context.
6411 * @param fdt_xfer_hint size in bytes where read requests are segmented.
6412 *
6413 * @return Returns OCS_HW_RTN_SUCCESS on success.
6414 */
6415 static ocs_hw_rtn_e
6416 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint)
6417 {
6418 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6419 uint8_t buf[SLI4_BMBX_SIZE];
6420 sli4_req_common_set_features_set_fdt_xfer_hint_t param;
6421
6422 ocs_memset(¶m, 0, sizeof(param));
6423 param.fdt_xfer_hint = fdt_xfer_hint;
6424 /* build the set_features command */
6425 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6426 SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
6427 sizeof(param),
6428 ¶m);
6429
6430 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6431 if (rc) {
6432 ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc);
6433 } else {
6434 ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint);
6435 }
6436
6437 return rc;
6438 }
6439
6440 /**
6441 * @brief Get the link configuration callback.
6442 *
6443 * @param hw Hardware context.
6444 * @param status Status from the DMTF CLP command.
6445 * @param result_len Length, in bytes, of the DMTF CLP result.
6446 * @param arg Pointer to a callback argument.
6447 *
6448 * @return Returns OCS_HW_RTN_SUCCESS on success.
6449 */
6450 static void
6451 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg)
6452 {
6453 int32_t rval;
6454 char retdata_str[64];
6455 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6456 ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA;
6457
6458 if (status) {
6459 ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status);
6460 } else {
6461 /* parse CLP response to get return data */
6462 rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str,
6463 sizeof(retdata_str),
6464 cb_arg->dma_resp.virt,
6465 result_len);
6466
6467 if (rval <= 0) {
6468 ocs_log_err(hw->os, "failed to get retdata %d\n", result_len);
6469 } else {
6470 /* translate string into hw enum */
6471 linkcfg = ocs_hw_linkcfg_from_clp(retdata_str);
6472 }
6473 }
6474
6475 /* invoke callback */
6476 if (cb_arg->cb) {
6477 cb_arg->cb(status, linkcfg, cb_arg->arg);
6478 }
6479
6480 /* if polling, will free memory in calling function */
6481 if (cb_arg->opts != OCS_CMD_POLL) {
6482 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6483 ocs_dma_free(hw->os, &cb_arg->dma_resp);
6484 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6485 }
6486 }
6487
6488 /**
6489 * @brief Set the Lancer dump location
6490 * @par Description
6491 * This function tells a Lancer chip to use a specific DMA
6492 * buffer as a dump location rather than the internal flash.
6493 *
6494 * @param hw Hardware context.
6495 * @param num_buffers The number of DMA buffers to hold the dump (1..n).
6496 * @param dump_buffers DMA buffers to hold the dump.
6497 *
6498 * @return Returns OCS_HW_RTN_SUCCESS on success.
6499 */
6500 ocs_hw_rtn_e
6501 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb)
6502 {
6503 uint8_t bus, dev, func;
6504 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6505 uint8_t buf[SLI4_BMBX_SIZE];
6506
6507 /*
6508 * Make sure the FW is new enough to support this command. If the FW
6509 * is too old, the FW will UE.
6510 */
6511 if (hw->workaround.disable_dump_loc) {
6512 ocs_log_test(hw->os, "FW version is too old for this feature\n");
6513 return OCS_HW_RTN_ERROR;
6514 }
6515
6516 /* This command is only valid for physical port 0 */
6517 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
6518 if (fdb == 0 && func != 0) {
6519 ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n",
6520 func);
6521 return OCS_HW_RTN_ERROR;
6522 }
6523
6524 /*
6525 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers,
6526 * We must allocate a SGL list and then pass the address of the list to the chip.
6527 */
6528 if (num_buffers > 1) {
6529 uint32_t sge_size = num_buffers * sizeof(sli4_sge_t);
6530 sli4_sge_t *sge;
6531 uint32_t i;
6532
6533 if (hw->dump_sges.size < sge_size) {
6534 ocs_dma_free(hw->os, &hw->dump_sges);
6535 if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) {
6536 ocs_log_err(hw->os, "SGE DMA allocation failed\n");
6537 return OCS_HW_RTN_NO_MEMORY;
6538 }
6539 }
6540 /* build the SGE list */
6541 ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size);
6542 hw->dump_sges.len = sge_size;
6543 sge = hw->dump_sges.virt;
6544 for (i = 0; i < num_buffers; i++) {
6545 sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys);
6546 sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys);
6547 sge[i].last = (i == num_buffers - 1 ? 1 : 0);
6548 sge[i].buffer_length = dump_buffers[i].size;
6549 }
6550 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6551 SLI4_BMBX_SIZE, FALSE, TRUE,
6552 &hw->dump_sges, fdb);
6553 } else {
6554 dump_buffers->len = dump_buffers->size;
6555 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6556 SLI4_BMBX_SIZE, FALSE, FALSE,
6557 dump_buffers, fdb);
6558 }
6559
6560 if (rc) {
6561 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL,
6562 NULL, NULL);
6563 if (rc) {
6564 ocs_log_err(hw->os, "ocs_hw_command returns %d\n",
6565 rc);
6566 }
6567 } else {
6568 ocs_log_err(hw->os,
6569 "sli_cmd_common_set_dump_location failed\n");
6570 rc = OCS_HW_RTN_ERROR;
6571 }
6572
6573 return rc;
6574 }
6575
6576 /**
6577 * @brief Set the Ethernet license.
6578 *
6579 * @par Description
6580 * This function sends the appropriate mailbox command (DMTF
6581 * CLP) to set the Ethernet license to the given license value.
6582 * Since it is used during the time of ocs_hw_init(), the mailbox
6583 * command is sent via polling (the BMBX route).
6584 *
6585 * @param hw Hardware context.
6586 * @param license 32-bit license value.
6587 *
6588 * @return Returns OCS_HW_RTN_SUCCESS on success.
6589 */
6590 static ocs_hw_rtn_e
6591 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license)
6592 {
6593 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6594 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6595 ocs_dma_t dma_cmd;
6596 ocs_dma_t dma_resp;
6597
6598 /* only for lancer right now */
6599 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6600 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6601 return OCS_HW_RTN_ERROR;
6602 }
6603
6604 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license);
6605 /* allocate DMA for command */
6606 if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6607 ocs_log_err(hw->os, "malloc failed\n");
6608 return OCS_HW_RTN_NO_MEMORY;
6609 }
6610 ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6611 ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd));
6612
6613 /* allocate DMA for response */
6614 if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6615 ocs_log_err(hw->os, "malloc failed\n");
6616 ocs_dma_free(hw->os, &dma_cmd);
6617 return OCS_HW_RTN_NO_MEMORY;
6618 }
6619
6620 /* send DMTF CLP command mbx and poll */
6621 if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) {
6622 ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt);
6623 rc = OCS_HW_RTN_ERROR;
6624 }
6625
6626 ocs_dma_free(hw->os, &dma_cmd);
6627 ocs_dma_free(hw->os, &dma_resp);
6628 return rc;
6629 }
6630
6631 /**
6632 * @brief Callback argument structure for the DMTF CLP commands.
6633 */
6634 typedef struct ocs_hw_clp_cb_arg_s {
6635 ocs_hw_dmtf_clp_cb_t cb;
6636 ocs_dma_t *dma_resp;
6637 int32_t status;
6638 uint32_t opts;
6639 void *arg;
6640 } ocs_hw_clp_cb_arg_t;
6641
6642 /**
6643 * @brief Execute the DMTF CLP command.
6644 *
6645 * @param hw Hardware context.
6646 * @param dma_cmd DMA buffer containing the CLP command.
6647 * @param dma_resp DMA buffer that will contain the response (if successful).
6648 * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL).
6649 * @param cb Callback function.
6650 * @param arg Callback argument.
6651 *
6652 * @return Returns the number of bytes written to the response
6653 * buffer on success, or a negative value if failed.
6654 */
6655 static ocs_hw_rtn_e
6656 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg)
6657 {
6658 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6659 ocs_hw_clp_cb_arg_t *cb_arg;
6660 uint8_t *mbxdata;
6661
6662 /* allocate DMA for mailbox */
6663 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6664 if (mbxdata == NULL) {
6665 ocs_log_err(hw->os, "failed to malloc mbox\n");
6666 return OCS_HW_RTN_NO_MEMORY;
6667 }
6668
6669 /* allocate memory for callback argument */
6670 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6671 if (cb_arg == NULL) {
6672 ocs_log_err(hw->os, "failed to malloc cb_arg");
6673 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6674 return OCS_HW_RTN_NO_MEMORY;
6675 }
6676
6677 cb_arg->cb = cb;
6678 cb_arg->arg = arg;
6679 cb_arg->dma_resp = dma_resp;
6680 cb_arg->opts = opts;
6681
6682 /* Send the HW command */
6683 if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6684 dma_cmd, dma_resp)) {
6685 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg);
6686
6687 if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) {
6688 /* if we're polling, copy response and invoke callback to
6689 * parse result */
6690 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6691 ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg);
6692
6693 /* set rc to resulting or "parsed" status */
6694 rc = cb_arg->status;
6695 }
6696
6697 /* if failed, or polling, free memory here */
6698 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6699 if (rc != OCS_HW_RTN_SUCCESS) {
6700 ocs_log_test(hw->os, "ocs_hw_command failed\n");
6701 }
6702 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6703 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6704 }
6705 } else {
6706 ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n");
6707 rc = OCS_HW_RTN_ERROR;
6708 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6709 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6710 }
6711
6712 return rc;
6713 }
6714
6715 /**
6716 * @brief Called when the DMTF CLP command completes.
6717 *
6718 * @param hw Hardware context.
6719 * @param status Status field from the mbox completion.
6720 * @param mqe Mailbox response structure.
6721 * @param arg Pointer to a callback argument.
6722 *
6723 * @return None.
6724 *
6725 */
6726 static void
6727 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6728 {
6729 int32_t cb_status = 0;
6730 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6731 sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed;
6732 ocs_hw_clp_cb_arg_t *cb_arg = arg;
6733 uint32_t result_len = 0;
6734 int32_t stat_len;
6735 char stat_str[8];
6736
6737 /* there are several status codes here, check them all and condense
6738 * into a single callback status
6739 */
6740 if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) {
6741 ocs_log_debug(hw->os, "status=x%x/x%x/x%x addl=x%x clp=x%x detail=x%x\n",
6742 status,
6743 mbox_rsp->hdr.status,
6744 clp_rsp->hdr.status,
6745 clp_rsp->hdr.additional_status,
6746 clp_rsp->clp_status,
6747 clp_rsp->clp_detailed_status);
6748 if (status) {
6749 cb_status = status;
6750 } else if (mbox_rsp->hdr.status) {
6751 cb_status = mbox_rsp->hdr.status;
6752 } else {
6753 cb_status = clp_rsp->clp_status;
6754 }
6755 } else {
6756 result_len = clp_rsp->resp_length;
6757 }
6758
6759 if (cb_status) {
6760 goto ocs_hw_cb_dmtf_clp_done;
6761 }
6762
6763 if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) {
6764 ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n",
6765 cb_arg->dma_resp->size, result_len);
6766 cb_status = -1;
6767 goto ocs_hw_cb_dmtf_clp_done;
6768 }
6769
6770 /* parse CLP response to get status */
6771 stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str,
6772 sizeof(stat_str),
6773 cb_arg->dma_resp->virt,
6774 result_len);
6775
6776 if (stat_len <= 0) {
6777 ocs_log_test(hw->os, "failed to get status %d\n", stat_len);
6778 cb_status = -1;
6779 goto ocs_hw_cb_dmtf_clp_done;
6780 }
6781
6782 if (ocs_strcmp(stat_str, "") != 0) {
6783 ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str);
6784 cb_status = -1;
6785 goto ocs_hw_cb_dmtf_clp_done;
6786 }
6787
6788 ocs_hw_cb_dmtf_clp_done:
6789
6790 /* save status in cb_arg for callers with NULL cb's + polling */
6791 cb_arg->status = cb_status;
6792 if (cb_arg->cb) {
6793 cb_arg->cb(hw, cb_status, result_len, cb_arg->arg);
6794 }
6795 /* if polling, caller will free memory */
6796 if (cb_arg->opts != OCS_CMD_POLL) {
6797 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6798 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6799 }
6800 }
6801
6802 /**
6803 * @brief Parse the CLP result and get the value corresponding to the given
6804 * keyword.
6805 *
6806 * @param hw Hardware context.
6807 * @param keyword CLP keyword for which the value is returned.
6808 * @param value Location to which the resulting value is copied.
6809 * @param value_len Length of the value parameter.
6810 * @param resp Pointer to the response buffer that is searched
6811 * for the keyword and value.
6812 * @param resp_len Length of response buffer passed in.
6813 *
6814 * @return Returns the number of bytes written to the value
6815 * buffer on success, or a negative vaue on failure.
6816 */
6817 static int32_t
6818 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len)
6819 {
6820 char *start = NULL;
6821 char *end = NULL;
6822
6823 /* look for specified keyword in string */
6824 start = ocs_strstr(resp, keyword);
6825 if (start == NULL) {
6826 ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n",
6827 keyword);
6828 return -1;
6829 }
6830
6831 /* now look for '=' and go one past */
6832 start = ocs_strchr(start, '=');
6833 if (start == NULL) {
6834 ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n",
6835 keyword);
6836 return -1;
6837 }
6838 start++;
6839
6840 /* \r\n terminates value */
6841 end = ocs_strstr(start, "\r\n");
6842 if (end == NULL) {
6843 ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n",
6844 keyword);
6845 return -1;
6846 }
6847
6848 /* make sure given result array is big enough */
6849 if ((end - start + 1) > value_len) {
6850 ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n",
6851 value_len, (end-start));
6852 return -1;
6853 }
6854
6855 ocs_strncpy(value, start, (end - start));
6856 value[end-start] = '\0';
6857 return (end-start+1);
6858 }
6859
6860 /**
6861 * @brief Cause chip to enter an unrecoverable error state.
6862 *
6863 * @par Description
6864 * Cause chip to enter an unrecoverable error state. This is
6865 * used when detecting unexpected FW behavior so that the FW can be
6866 * hwted from the driver as soon as the error is detected.
6867 *
6868 * @param hw Hardware context.
6869 * @param dump Generate dump as part of reset.
6870 *
6871 * @return Returns 0 on success, or a non-zero value on failure.
6872 *
6873 */
6874 ocs_hw_rtn_e
6875 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump)
6876 {
6877 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6878
6879 if (sli_raise_ue(&hw->sli, dump) != 0) {
6880 rc = OCS_HW_RTN_ERROR;
6881 } else {
6882 if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
6883 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
6884 }
6885 }
6886
6887 return rc;
6888 }
6889
6890 /**
6891 * @brief Called when the OBJECT_GET command completes.
6892 *
6893 * @par Description
6894 * Get the number of bytes actually written out of the response, free the mailbox
6895 * that was malloc'd by ocs_hw_dump_get(), then call the callback
6896 * and pass the status and bytes read.
6897 *
6898 * @param hw Hardware context.
6899 * @param status Status field from the mbox completion.
6900 * @param mqe Mailbox response structure.
6901 * @param arg Pointer to a callback function that signals the caller that the command is done.
6902 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>.
6903 *
6904 * @return Returns 0.
6905 */
6906 static int32_t
6907 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6908 {
6909 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6910 sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed;
6911 ocs_hw_dump_get_cb_arg_t *cb_arg = arg;
6912 uint32_t bytes_read;
6913 uint8_t eof;
6914
6915 bytes_read = rd_obj_rsp->actual_read_length;
6916 eof = rd_obj_rsp->eof;
6917
6918 if (cb_arg) {
6919 if (cb_arg->cb) {
6920 if ((status == 0) && mbox_rsp->hdr.status) {
6921 status = mbox_rsp->hdr.status;
6922 }
6923 cb_arg->cb(status, bytes_read, eof, cb_arg->arg);
6924 }
6925
6926 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
6927 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
6928 }
6929
6930 return 0;
6931 }
6932
6933 /**
6934 * @brief Read a dump image to the host.
6935 *
6936 * @par Description
6937 * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a
6938 * dump image chunk, then sends the command with the ocs_hw_command(). On completion,
6939 * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox
6940 * and signal the caller that the read has completed.
6941 *
6942 * @param hw Hardware context.
6943 * @param dma DMA structure to transfer the dump chunk into.
6944 * @param size Size of the dump chunk.
6945 * @param offset Offset, in bytes, from the beginning of the dump.
6946 * @param cb Pointer to a callback function that is called when the command completes.
6947 * The callback function prototype is
6948 * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>.
6949 * @param arg Pointer to be passed to the callback function.
6950 *
6951 * @return Returns 0 on success, or a non-zero value on failure.
6952 */
6953 ocs_hw_rtn_e
6954 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg)
6955 {
6956 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6957 uint8_t *mbxdata;
6958 ocs_hw_dump_get_cb_arg_t *cb_arg;
6959 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
6960
6961 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6962 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6963 return OCS_HW_RTN_ERROR;
6964 }
6965
6966 if (1 != sli_dump_is_present(&hw->sli)) {
6967 ocs_log_test(hw->os, "No dump is present\n");
6968 return OCS_HW_RTN_ERROR;
6969 }
6970
6971 if (1 == sli_reset_required(&hw->sli)) {
6972 ocs_log_test(hw->os, "device reset required\n");
6973 return OCS_HW_RTN_ERROR;
6974 }
6975
6976 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6977 if (mbxdata == NULL) {
6978 ocs_log_err(hw->os, "failed to malloc mbox\n");
6979 return OCS_HW_RTN_NO_MEMORY;
6980 }
6981
6982 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT);
6983 if (cb_arg == NULL) {
6984 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6985 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6986 return OCS_HW_RTN_NO_MEMORY;
6987 }
6988
6989 cb_arg->cb = cb;
6990 cb_arg->arg = arg;
6991 cb_arg->mbox_cmd = mbxdata;
6992
6993 if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6994 size, offset, "/dbg/dump.bin", dma)) {
6995 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg);
6996 if (rc == 0 && opts == OCS_CMD_POLL) {
6997 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6998 rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg);
6999 }
7000 }
7001
7002 if (rc != OCS_HW_RTN_SUCCESS) {
7003 ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n");
7004 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7005 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
7006 }
7007
7008 return rc;
7009 }
7010
7011 /**
7012 * @brief Called when the OBJECT_DELETE command completes.
7013 *
7014 * @par Description
7015 * Free the mailbox that was malloc'd
7016 * by ocs_hw_dump_clear(), then call the callback and pass the status.
7017 *
7018 * @param hw Hardware context.
7019 * @param status Status field from the mbox completion.
7020 * @param mqe Mailbox response structure.
7021 * @param arg Pointer to a callback function that signals the caller that the command is done.
7022 * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>.
7023 *
7024 * @return Returns 0.
7025 */
7026 static int32_t
7027 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7028 {
7029 ocs_hw_dump_clear_cb_arg_t *cb_arg = arg;
7030 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7031
7032 if (cb_arg) {
7033 if (cb_arg->cb) {
7034 if ((status == 0) && mbox_rsp->hdr.status) {
7035 status = mbox_rsp->hdr.status;
7036 }
7037 cb_arg->cb(status, cb_arg->arg);
7038 }
7039
7040 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
7041 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7042 }
7043
7044 return 0;
7045 }
7046
7047 /**
7048 * @brief Clear a dump image from the device.
7049 *
7050 * @par Description
7051 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear
7052 * the dump, then sends the command with ocs_hw_command(). On completion,
7053 * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox
7054 * and to signal the caller that the write has completed.
7055 *
7056 * @param hw Hardware context.
7057 * @param cb Pointer to a callback function that is called when the command completes.
7058 * The callback function prototype is
7059 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
7060 * @param arg Pointer to be passed to the callback function.
7061 *
7062 * @return Returns 0 on success, or a non-zero value on failure.
7063 */
7064 ocs_hw_rtn_e
7065 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg)
7066 {
7067 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7068 uint8_t *mbxdata;
7069 ocs_hw_dump_clear_cb_arg_t *cb_arg;
7070 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
7071
7072 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
7073 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
7074 return OCS_HW_RTN_ERROR;
7075 }
7076
7077 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7078 if (mbxdata == NULL) {
7079 ocs_log_err(hw->os, "failed to malloc mbox\n");
7080 return OCS_HW_RTN_NO_MEMORY;
7081 }
7082
7083 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT);
7084 if (cb_arg == NULL) {
7085 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7086 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7087 return OCS_HW_RTN_NO_MEMORY;
7088 }
7089
7090 cb_arg->cb = cb;
7091 cb_arg->arg = arg;
7092 cb_arg->mbox_cmd = mbxdata;
7093
7094 if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7095 "/dbg/dump.bin")) {
7096 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg);
7097 if (rc == 0 && opts == OCS_CMD_POLL) {
7098 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7099 rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg);
7100 }
7101 }
7102
7103 if (rc != OCS_HW_RTN_SUCCESS) {
7104 ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n");
7105 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7106 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7107 }
7108
7109 return rc;
7110 }
7111
7112 typedef struct ocs_hw_get_port_protocol_cb_arg_s {
7113 ocs_get_port_protocol_cb_t cb;
7114 void *arg;
7115 uint32_t pci_func;
7116 ocs_dma_t payload;
7117 } ocs_hw_get_port_protocol_cb_arg_t;
7118
7119 /**
7120 * @brief Called for the completion of get_port_profile for a
7121 * user request.
7122 *
7123 * @param hw Hardware context.
7124 * @param status The status from the MQE.
7125 * @param mqe Pointer to mailbox command buffer.
7126 * @param arg Pointer to a callback argument.
7127 *
7128 * @return Returns 0 on success, or a non-zero value on failure.
7129 */
7130 static int32_t
7131 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status,
7132 uint8_t *mqe, void *arg)
7133 {
7134 ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg;
7135 ocs_dma_t *payload = &(cb_arg->payload);
7136 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7137 ocs_hw_port_protocol_e port_protocol;
7138 int num_descriptors;
7139 sli4_resource_descriptor_v1_t *desc_p;
7140 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7141 int i;
7142
7143 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7144
7145 num_descriptors = response->desc_count;
7146 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7147 for (i=0; i<num_descriptors; i++) {
7148 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7149 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7150 if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7151 switch(pcie_desc_p->pf_type) {
7152 case 0x02:
7153 port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
7154 break;
7155 case 0x04:
7156 port_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
7157 break;
7158 case 0x10:
7159 port_protocol = OCS_HW_PORT_PROTOCOL_FC;
7160 break;
7161 default:
7162 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7163 break;
7164 }
7165 }
7166 }
7167
7168 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7169 }
7170
7171 if (cb_arg->cb) {
7172 cb_arg->cb(status, port_protocol, cb_arg->arg);
7173 }
7174
7175 ocs_dma_free(hw->os, &cb_arg->payload);
7176 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7177 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7178
7179 return 0;
7180 }
7181
7182 /**
7183 * @ingroup io
7184 * @brief Get the current port protocol.
7185 * @par Description
7186 * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox. When the
7187 * command completes the provided mgmt callback function is
7188 * called.
7189 *
7190 * @param hw Hardware context.
7191 * @param pci_func PCI function to query for current protocol.
7192 * @param cb Callback function to be called when the command completes.
7193 * @param ul_arg An argument that is passed to the callback function.
7194 *
7195 * @return
7196 * - OCS_HW_RTN_SUCCESS on success.
7197 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7198 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7199 * context.
7200 * - OCS_HW_RTN_ERROR on any other error.
7201 */
7202 ocs_hw_rtn_e
7203 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func,
7204 ocs_get_port_protocol_cb_t cb, void* ul_arg)
7205 {
7206 uint8_t *mbxdata;
7207 ocs_hw_get_port_protocol_cb_arg_t *cb_arg;
7208 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7209
7210 /* Only supported on Skyhawk */
7211 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7212 return OCS_HW_RTN_ERROR;
7213 }
7214
7215 /* mbxdata holds the header of the command */
7216 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7217 if (mbxdata == NULL) {
7218 ocs_log_err(hw->os, "failed to malloc mbox\n");
7219 return OCS_HW_RTN_NO_MEMORY;
7220 }
7221
7222 /* cb_arg holds the data that will be passed to the callback on completion */
7223 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7224 if (cb_arg == NULL) {
7225 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7226 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7227 return OCS_HW_RTN_NO_MEMORY;
7228 }
7229
7230 cb_arg->cb = cb;
7231 cb_arg->arg = ul_arg;
7232 cb_arg->pci_func = pci_func;
7233
7234 /* dma_mem holds the non-embedded portion */
7235 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7236 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7237 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7238 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7239 return OCS_HW_RTN_NO_MEMORY;
7240 }
7241
7242 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7243 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg);
7244 }
7245
7246 if (rc != OCS_HW_RTN_SUCCESS) {
7247 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7248 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7249 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7250 ocs_dma_free(hw->os, &cb_arg->payload);
7251 }
7252
7253 return rc;
7254
7255 }
7256
7257 typedef struct ocs_hw_set_port_protocol_cb_arg_s {
7258 ocs_set_port_protocol_cb_t cb;
7259 void *arg;
7260 ocs_dma_t payload;
7261 uint32_t new_protocol;
7262 uint32_t pci_func;
7263 } ocs_hw_set_port_protocol_cb_arg_t;
7264
7265 /**
7266 * @brief Called for the completion of set_port_profile for a
7267 * user request.
7268 *
7269 * @par Description
7270 * This is the second of two callbacks for the set_port_protocol
7271 * function. The set operation is a read-modify-write. This
7272 * callback is called when the write (SET_PROFILE_CONFIG)
7273 * completes.
7274 *
7275 * @param hw Hardware context.
7276 * @param status The status from the MQE.
7277 * @param mqe Pointer to mailbox command buffer.
7278 * @param arg Pointer to a callback argument.
7279 *
7280 * @return 0 on success, non-zero otherwise
7281 */
7282 static int32_t
7283 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7284 {
7285 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7286
7287 if (cb_arg->cb) {
7288 cb_arg->cb( status, cb_arg->arg);
7289 }
7290
7291 ocs_dma_free(hw->os, &(cb_arg->payload));
7292 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7293 ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7294
7295 return 0;
7296 }
7297
7298 /**
7299 * @brief Called for the completion of set_port_profile for a
7300 * user request.
7301 *
7302 * @par Description
7303 * This is the first of two callbacks for the set_port_protocol
7304 * function. The set operation is a read-modify-write. This
7305 * callback is called when the read completes
7306 * (GET_PROFILE_CONFG). It will updated the resource
7307 * descriptors, then queue the write (SET_PROFILE_CONFIG).
7308 *
7309 * On entry there are three memory areas that were allocated by
7310 * ocs_hw_set_port_protocol. If a failure is detected in this
7311 * function those need to be freed. If this function succeeds
7312 * it allocates three more areas.
7313 *
7314 * @param hw Hardware context.
7315 * @param status The status from the MQE
7316 * @param mqe Pointer to mailbox command buffer.
7317 * @param arg Pointer to a callback argument.
7318 *
7319 * @return Returns 0 on success, or a non-zero value otherwise.
7320 */
7321 static int32_t
7322 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7323 {
7324 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7325 ocs_dma_t *payload = &(cb_arg->payload);
7326 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7327 int num_descriptors;
7328 sli4_resource_descriptor_v1_t *desc_p;
7329 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7330 int i;
7331 ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg;
7332 ocs_hw_port_protocol_e new_protocol;
7333 uint8_t *dst;
7334 sli4_isap_resouce_descriptor_v1_t *isap_desc_p;
7335 uint8_t *mbxdata;
7336 int pci_descriptor_count;
7337 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7338 int num_fcoe_ports = 0;
7339 int num_iscsi_ports = 0;
7340
7341 new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol;
7342
7343 num_descriptors = response->desc_count;
7344
7345 /* Count PCI descriptors */
7346 pci_descriptor_count = 0;
7347 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7348 for (i=0; i<num_descriptors; i++) {
7349 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7350 ++pci_descriptor_count;
7351 }
7352 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7353 }
7354
7355 /* mbxdata holds the header of the command */
7356 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7357 if (mbxdata == NULL) {
7358 ocs_log_err(hw->os, "failed to malloc mbox\n");
7359 return OCS_HW_RTN_NO_MEMORY;
7360 }
7361
7362 /* cb_arg holds the data that will be passed to the callback on completion */
7363 new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7364 if (new_cb_arg == NULL) {
7365 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7366 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7367 return OCS_HW_RTN_NO_MEMORY;
7368 }
7369
7370 new_cb_arg->cb = cb_arg->cb;
7371 new_cb_arg->arg = cb_arg->arg;
7372
7373 /* Allocate memory for the descriptors we're going to send. This is
7374 * one for each PCI descriptor plus one ISAP descriptor. */
7375 if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) +
7376 (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) +
7377 sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) {
7378 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7379 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7380 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7381 return OCS_HW_RTN_NO_MEMORY;
7382 }
7383
7384 sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7385 &new_cb_arg->payload,
7386 0, pci_descriptor_count+1, 1);
7387
7388 /* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */
7389 dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc);
7390
7391 /* Loop over all descriptors. If the descriptor is a PCIe descriptor, copy it
7392 * to the SET_PROFILE_CONFIG command to be written back. If it's the descriptor
7393 * that we're trying to change also set its pf_type.
7394 */
7395 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7396 for (i=0; i<num_descriptors; i++) {
7397 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7398 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7399 if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7400 /* This is the PCIe descriptor for this OCS instance.
7401 * Update it with the new pf_type */
7402 switch(new_protocol) {
7403 case OCS_HW_PORT_PROTOCOL_FC:
7404 pcie_desc_p->pf_type = SLI4_PROTOCOL_FC;
7405 break;
7406 case OCS_HW_PORT_PROTOCOL_FCOE:
7407 pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE;
7408 break;
7409 case OCS_HW_PORT_PROTOCOL_ISCSI:
7410 pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI;
7411 break;
7412 default:
7413 pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT;
7414 break;
7415 }
7416 }
7417
7418 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) {
7419 ++num_fcoe_ports;
7420 }
7421 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) {
7422 ++num_iscsi_ports;
7423 }
7424 ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t));
7425 dst += sizeof(sli4_pcie_resource_descriptor_v1_t);
7426 }
7427
7428 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7429 }
7430
7431 /* Create an ISAP resource descriptor */
7432 isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst;
7433 isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP;
7434 isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t);
7435 if (num_iscsi_ports > 0) {
7436 isap_desc_p->iscsi_tgt = 1;
7437 isap_desc_p->iscsi_ini = 1;
7438 isap_desc_p->iscsi_dif = 1;
7439 }
7440 if (num_fcoe_ports > 0) {
7441 isap_desc_p->fcoe_tgt = 1;
7442 isap_desc_p->fcoe_ini = 1;
7443 isap_desc_p->fcoe_dif = 1;
7444 }
7445
7446 /* At this point we're done with the memory allocated by ocs_port_set_protocol */
7447 ocs_dma_free(hw->os, &cb_arg->payload);
7448 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7449 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7450
7451 /* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */
7452 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg);
7453 if (rc) {
7454 ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n");
7455 /* Call the upper level callback to report a failure */
7456 if (new_cb_arg->cb) {
7457 new_cb_arg->cb( rc, new_cb_arg->arg);
7458 }
7459
7460 /* Free the memory allocated by this function */
7461 ocs_dma_free(hw->os, &new_cb_arg->payload);
7462 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7463 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7464 }
7465
7466 return rc;
7467 }
7468
7469 /**
7470 * @ingroup io
7471 * @brief Set the port protocol.
7472 * @par Description
7473 * Setting the port protocol is a read-modify-write operation.
7474 * This function submits a GET_PROFILE_CONFIG command to read
7475 * the current settings. The callback function will modify the
7476 * settings and issue the write.
7477 *
7478 * On successful completion this function will have allocated
7479 * two regular memory areas and one dma area which will need to
7480 * get freed later in the callbacks.
7481 *
7482 * @param hw Hardware context.
7483 * @param new_protocol New protocol to use.
7484 * @param pci_func PCI function to configure.
7485 * @param cb Callback function to be called when the command completes.
7486 * @param ul_arg An argument that is passed to the callback function.
7487 *
7488 * @return
7489 * - OCS_HW_RTN_SUCCESS on success.
7490 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7491 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7492 * context.
7493 * - OCS_HW_RTN_ERROR on any other error.
7494 */
7495 ocs_hw_rtn_e
7496 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol,
7497 uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg)
7498 {
7499 uint8_t *mbxdata;
7500 ocs_hw_set_port_protocol_cb_arg_t *cb_arg;
7501 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7502
7503 /* Only supported on Skyhawk */
7504 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7505 return OCS_HW_RTN_ERROR;
7506 }
7507
7508 /* mbxdata holds the header of the command */
7509 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7510 if (mbxdata == NULL) {
7511 ocs_log_err(hw->os, "failed to malloc mbox\n");
7512 return OCS_HW_RTN_NO_MEMORY;
7513 }
7514
7515 /* cb_arg holds the data that will be passed to the callback on completion */
7516 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7517 if (cb_arg == NULL) {
7518 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7519 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7520 return OCS_HW_RTN_NO_MEMORY;
7521 }
7522
7523 cb_arg->cb = cb;
7524 cb_arg->arg = ul_arg;
7525 cb_arg->new_protocol = new_protocol;
7526 cb_arg->pci_func = pci_func;
7527
7528 /* dma_mem holds the non-embedded portion */
7529 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7530 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7531 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7532 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7533 return OCS_HW_RTN_NO_MEMORY;
7534 }
7535
7536 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7537 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg);
7538 }
7539
7540 if (rc != OCS_HW_RTN_SUCCESS) {
7541 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7542 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7543 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7544 ocs_dma_free(hw->os, &cb_arg->payload);
7545 }
7546
7547 return rc;
7548 }
7549
7550 typedef struct ocs_hw_get_profile_list_cb_arg_s {
7551 ocs_get_profile_list_cb_t cb;
7552 void *arg;
7553 ocs_dma_t payload;
7554 } ocs_hw_get_profile_list_cb_arg_t;
7555
7556 /**
7557 * @brief Called for the completion of get_profile_list for a
7558 * user request.
7559 * @par Description
7560 * This function is called when the COMMMON_GET_PROFILE_LIST
7561 * mailbox completes. The response will be in
7562 * ctx->non_embedded_mem.virt. This function parses the
7563 * response and creates a ocs_hw_profile_list, then calls the
7564 * mgmt_cb callback function and passes that list to it.
7565 *
7566 * @param hw Hardware context.
7567 * @param status The status from the MQE
7568 * @param mqe Pointer to mailbox command buffer.
7569 * @param arg Pointer to a callback argument.
7570 *
7571 * @return Returns 0 on success, or a non-zero value on failure.
7572 */
7573 static int32_t
7574 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7575 {
7576 ocs_hw_profile_list_t *list;
7577 ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg;
7578 ocs_dma_t *payload = &(cb_arg->payload);
7579 sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt;
7580 int i;
7581 int num_descriptors;
7582
7583 list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO);
7584 if (list == NULL) {
7585 ocs_log_err(hw->os, "failed to malloc list\n");
7586 return OCS_HW_RTN_NO_MEMORY;
7587 }
7588
7589 list->num_descriptors = response->profile_descriptor_count;
7590
7591 num_descriptors = list->num_descriptors;
7592 if (num_descriptors > OCS_HW_MAX_PROFILES) {
7593 num_descriptors = OCS_HW_MAX_PROFILES;
7594 }
7595
7596 for (i=0; i<num_descriptors; i++) {
7597 list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id;
7598 list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index;
7599 ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description);
7600 }
7601
7602 if (cb_arg->cb) {
7603 cb_arg->cb(status, list, cb_arg->arg);
7604 } else {
7605 ocs_free(hw->os, list, sizeof(*list));
7606 }
7607
7608 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7609 ocs_dma_free(hw->os, &cb_arg->payload);
7610 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7611
7612 return 0;
7613 }
7614
7615 /**
7616 * @ingroup io
7617 * @brief Get a list of available profiles.
7618 * @par Description
7619 * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox. When the
7620 * command completes the provided mgmt callback function is
7621 * called.
7622 *
7623 * @param hw Hardware context.
7624 * @param cb Callback function to be called when the
7625 * command completes.
7626 * @param ul_arg An argument that is passed to the callback
7627 * function.
7628 *
7629 * @return
7630 * - OCS_HW_RTN_SUCCESS on success.
7631 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7632 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7633 * context.
7634 * - OCS_HW_RTN_ERROR on any other error.
7635 */
7636 ocs_hw_rtn_e
7637 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg)
7638 {
7639 uint8_t *mbxdata;
7640 ocs_hw_get_profile_list_cb_arg_t *cb_arg;
7641 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7642
7643 /* Only supported on Skyhawk */
7644 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7645 return OCS_HW_RTN_ERROR;
7646 }
7647
7648 /* mbxdata holds the header of the command */
7649 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7650 if (mbxdata == NULL) {
7651 ocs_log_err(hw->os, "failed to malloc mbox\n");
7652 return OCS_HW_RTN_NO_MEMORY;
7653 }
7654
7655 /* cb_arg holds the data that will be passed to the callback on completion */
7656 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT);
7657 if (cb_arg == NULL) {
7658 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7659 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7660 return OCS_HW_RTN_NO_MEMORY;
7661 }
7662
7663 cb_arg->cb = cb;
7664 cb_arg->arg = ul_arg;
7665
7666 /* dma_mem holds the non-embedded portion */
7667 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) {
7668 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7669 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7670 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7671 return OCS_HW_RTN_NO_MEMORY;
7672 }
7673
7674 if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) {
7675 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg);
7676 }
7677
7678 if (rc != OCS_HW_RTN_SUCCESS) {
7679 ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n");
7680 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7681 ocs_dma_free(hw->os, &cb_arg->payload);
7682 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7683 }
7684
7685 return rc;
7686 }
7687
7688 typedef struct ocs_hw_get_active_profile_cb_arg_s {
7689 ocs_get_active_profile_cb_t cb;
7690 void *arg;
7691 } ocs_hw_get_active_profile_cb_arg_t;
7692
7693 /**
7694 * @brief Called for the completion of get_active_profile for a
7695 * user request.
7696 *
7697 * @param hw Hardware context.
7698 * @param status The status from the MQE
7699 * @param mqe Pointer to mailbox command buffer.
7700 * @param arg Pointer to a callback argument.
7701 *
7702 * @return Returns 0 on success, or a non-zero value on failure.
7703 */
7704 static int32_t
7705 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7706 {
7707 ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg;
7708 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7709 sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed;
7710 uint32_t active_profile;
7711
7712 active_profile = response->active_profile_id;
7713
7714 if (cb_arg->cb) {
7715 cb_arg->cb(status, active_profile, cb_arg->arg);
7716 }
7717
7718 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7719 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7720
7721 return 0;
7722 }
7723
7724 /**
7725 * @ingroup io
7726 * @brief Get the currently active profile.
7727 * @par Description
7728 * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
7729 * command completes the provided mgmt callback function is
7730 * called.
7731 *
7732 * @param hw Hardware context.
7733 * @param cb Callback function to be called when the
7734 * command completes.
7735 * @param ul_arg An argument that is passed to the callback
7736 * function.
7737 *
7738 * @return
7739 * - OCS_HW_RTN_SUCCESS on success.
7740 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7741 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7742 * context.
7743 * - OCS_HW_RTN_ERROR on any other error.
7744 */
7745 int32_t
7746 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg)
7747 {
7748 uint8_t *mbxdata;
7749 ocs_hw_get_active_profile_cb_arg_t *cb_arg;
7750 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7751
7752 /* Only supported on Skyhawk */
7753 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7754 return OCS_HW_RTN_ERROR;
7755 }
7756
7757 /* mbxdata holds the header of the command */
7758 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7759 if (mbxdata == NULL) {
7760 ocs_log_err(hw->os, "failed to malloc mbox\n");
7761 return OCS_HW_RTN_NO_MEMORY;
7762 }
7763
7764 /* cb_arg holds the data that will be passed to the callback on completion */
7765 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT);
7766 if (cb_arg == NULL) {
7767 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7768 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7769 return OCS_HW_RTN_NO_MEMORY;
7770 }
7771
7772 cb_arg->cb = cb;
7773 cb_arg->arg = ul_arg;
7774
7775 if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7776 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg);
7777 }
7778
7779 if (rc != OCS_HW_RTN_SUCCESS) {
7780 ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n");
7781 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7782 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7783 }
7784
7785 return rc;
7786 }
7787
7788 typedef struct ocs_hw_get_nvparms_cb_arg_s {
7789 ocs_get_nvparms_cb_t cb;
7790 void *arg;
7791 } ocs_hw_get_nvparms_cb_arg_t;
7792
7793 /**
7794 * @brief Called for the completion of get_nvparms for a
7795 * user request.
7796 *
7797 * @param hw Hardware context.
7798 * @param status The status from the MQE.
7799 * @param mqe Pointer to mailbox command buffer.
7800 * @param arg Pointer to a callback argument.
7801 *
7802 * @return 0 on success, non-zero otherwise
7803 */
7804 static int32_t
7805 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7806 {
7807 ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg;
7808 sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe;
7809
7810 if (cb_arg->cb) {
7811 cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa,
7812 mbox_rsp->preferred_d_id, cb_arg->arg);
7813 }
7814
7815 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7816 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7817
7818 return 0;
7819 }
7820
7821 /**
7822 * @ingroup io
7823 * @brief Read non-volatile parms.
7824 * @par Description
7825 * Issues a SLI-4 READ_NVPARMS mailbox. When the
7826 * command completes the provided mgmt callback function is
7827 * called.
7828 *
7829 * @param hw Hardware context.
7830 * @param cb Callback function to be called when the
7831 * command completes.
7832 * @param ul_arg An argument that is passed to the callback
7833 * function.
7834 *
7835 * @return
7836 * - OCS_HW_RTN_SUCCESS on success.
7837 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7838 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7839 * context.
7840 * - OCS_HW_RTN_ERROR on any other error.
7841 */
7842 int32_t
7843 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg)
7844 {
7845 uint8_t *mbxdata;
7846 ocs_hw_get_nvparms_cb_arg_t *cb_arg;
7847 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7848
7849 /* mbxdata holds the header of the command */
7850 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7851 if (mbxdata == NULL) {
7852 ocs_log_err(hw->os, "failed to malloc mbox\n");
7853 return OCS_HW_RTN_NO_MEMORY;
7854 }
7855
7856 /* cb_arg holds the data that will be passed to the callback on completion */
7857 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT);
7858 if (cb_arg == NULL) {
7859 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7860 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7861 return OCS_HW_RTN_NO_MEMORY;
7862 }
7863
7864 cb_arg->cb = cb;
7865 cb_arg->arg = ul_arg;
7866
7867 if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7868 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg);
7869 }
7870
7871 if (rc != OCS_HW_RTN_SUCCESS) {
7872 ocs_log_test(hw->os, "READ_NVPARMS failed\n");
7873 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7874 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7875 }
7876
7877 return rc;
7878 }
7879
7880 typedef struct ocs_hw_set_nvparms_cb_arg_s {
7881 ocs_set_nvparms_cb_t cb;
7882 void *arg;
7883 } ocs_hw_set_nvparms_cb_arg_t;
7884
7885 /**
7886 * @brief Called for the completion of set_nvparms for a
7887 * user request.
7888 *
7889 * @param hw Hardware context.
7890 * @param status The status from the MQE.
7891 * @param mqe Pointer to mailbox command buffer.
7892 * @param arg Pointer to a callback argument.
7893 *
7894 * @return Returns 0 on success, or a non-zero value on failure.
7895 */
7896 static int32_t
7897 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7898 {
7899 ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg;
7900
7901 if (cb_arg->cb) {
7902 cb_arg->cb(status, cb_arg->arg);
7903 }
7904
7905 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7906 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7907
7908 return 0;
7909 }
7910
7911 /**
7912 * @ingroup io
7913 * @brief Write non-volatile parms.
7914 * @par Description
7915 * Issues a SLI-4 WRITE_NVPARMS mailbox. When the
7916 * command completes the provided mgmt callback function is
7917 * called.
7918 *
7919 * @param hw Hardware context.
7920 * @param cb Callback function to be called when the
7921 * command completes.
7922 * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
7923 * @param wwnn Port's WWNN in big-endian order, or NULL to use default.
7924 * @param hard_alpa A hard AL_PA address setting used during loop
7925 * initialization. If no hard AL_PA is required, set to 0.
7926 * @param preferred_d_id A preferred D_ID address setting
7927 * that may be overridden with the CONFIG_LINK mailbox command.
7928 * If there is no preference, set to 0.
7929 * @param ul_arg An argument that is passed to the callback
7930 * function.
7931 *
7932 * @return
7933 * - OCS_HW_RTN_SUCCESS on success.
7934 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7935 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7936 * context.
7937 * - OCS_HW_RTN_ERROR on any other error.
7938 */
7939 int32_t
7940 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn,
7941 uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg)
7942 {
7943 uint8_t *mbxdata;
7944 ocs_hw_set_nvparms_cb_arg_t *cb_arg;
7945 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7946
7947 /* mbxdata holds the header of the command */
7948 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7949 if (mbxdata == NULL) {
7950 ocs_log_err(hw->os, "failed to malloc mbox\n");
7951 return OCS_HW_RTN_NO_MEMORY;
7952 }
7953
7954 /* cb_arg holds the data that will be passed to the callback on completion */
7955 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT);
7956 if (cb_arg == NULL) {
7957 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7958 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7959 return OCS_HW_RTN_NO_MEMORY;
7960 }
7961
7962 cb_arg->cb = cb;
7963 cb_arg->arg = ul_arg;
7964
7965 if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) {
7966 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg);
7967 }
7968
7969 if (rc != OCS_HW_RTN_SUCCESS) {
7970 ocs_log_test(hw->os, "SET_NVPARMS failed\n");
7971 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7972 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7973 }
7974
7975 return rc;
7976 }
7977
7978 /**
7979 * @brief Called to obtain the count for the specified type.
7980 *
7981 * @param hw Hardware context.
7982 * @param io_count_type IO count type (inuse, free, wait_free).
7983 *
7984 * @return Returns the number of IOs on the specified list type.
7985 */
7986 uint32_t
7987 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type)
7988 {
7989 ocs_hw_io_t *io = NULL;
7990 uint32_t count = 0;
7991
7992 ocs_lock(&hw->io_lock);
7993
7994 switch (io_count_type) {
7995 case OCS_HW_IO_INUSE_COUNT :
7996 ocs_list_foreach(&hw->io_inuse, io) {
7997 count++;
7998 }
7999 break;
8000 case OCS_HW_IO_FREE_COUNT :
8001 ocs_list_foreach(&hw->io_free, io) {
8002 count++;
8003 }
8004 break;
8005 case OCS_HW_IO_WAIT_FREE_COUNT :
8006 ocs_list_foreach(&hw->io_wait_free, io) {
8007 count++;
8008 }
8009 break;
8010 case OCS_HW_IO_PORT_OWNED_COUNT:
8011 ocs_list_foreach(&hw->io_port_owned, io) {
8012 count++;
8013 }
8014 break;
8015 case OCS_HW_IO_N_TOTAL_IO_COUNT :
8016 count = hw->config.n_io;
8017 break;
8018 }
8019
8020 ocs_unlock(&hw->io_lock);
8021
8022 return count;
8023 }
8024
8025 /**
8026 * @brief Called to obtain the count of produced RQs.
8027 *
8028 * @param hw Hardware context.
8029 *
8030 * @return Returns the number of RQs produced.
8031 */
8032 uint32_t
8033 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw)
8034 {
8035 uint32_t count = 0;
8036 uint32_t i;
8037 uint32_t j;
8038
8039 for (i = 0; i < hw->hw_rq_count; i++) {
8040 hw_rq_t *rq = hw->hw_rq[i];
8041 if (rq->rq_tracker != NULL) {
8042 for (j = 0; j < rq->entry_count; j++) {
8043 if (rq->rq_tracker[j] != NULL) {
8044 count++;
8045 }
8046 }
8047 }
8048 }
8049
8050 return count;
8051 }
8052
8053 typedef struct ocs_hw_set_active_profile_cb_arg_s {
8054 ocs_set_active_profile_cb_t cb;
8055 void *arg;
8056 } ocs_hw_set_active_profile_cb_arg_t;
8057
8058 /**
8059 * @brief Called for the completion of set_active_profile for a
8060 * user request.
8061 *
8062 * @param hw Hardware context.
8063 * @param status The status from the MQE
8064 * @param mqe Pointer to mailbox command buffer.
8065 * @param arg Pointer to a callback argument.
8066 *
8067 * @return Returns 0 on success, or a non-zero value on failure.
8068 */
8069 static int32_t
8070 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
8071 {
8072 ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg;
8073
8074 if (cb_arg->cb) {
8075 cb_arg->cb(status, cb_arg->arg);
8076 }
8077
8078 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
8079 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
8080
8081 return 0;
8082 }
8083
8084 /**
8085 * @ingroup io
8086 * @brief Set the currently active profile.
8087 * @par Description
8088 * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
8089 * command completes the provided mgmt callback function is
8090 * called.
8091 *
8092 * @param hw Hardware context.
8093 * @param profile_id Profile ID to activate.
8094 * @param cb Callback function to be called when the command completes.
8095 * @param ul_arg An argument that is passed to the callback function.
8096 *
8097 * @return
8098 * - OCS_HW_RTN_SUCCESS on success.
8099 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
8100 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
8101 * context.
8102 * - OCS_HW_RTN_ERROR on any other error.
8103 */
8104 int32_t
8105 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg)
8106 {
8107 uint8_t *mbxdata;
8108 ocs_hw_set_active_profile_cb_arg_t *cb_arg;
8109 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
8110
8111 /* Only supported on Skyhawk */
8112 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
8113 return OCS_HW_RTN_ERROR;
8114 }
8115
8116 /* mbxdata holds the header of the command */
8117 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
8118 if (mbxdata == NULL) {
8119 ocs_log_err(hw->os, "failed to malloc mbox\n");
8120 return OCS_HW_RTN_NO_MEMORY;
8121 }
8122
8123 /* cb_arg holds the data that will be passed to the callback on completion */
8124 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT);
8125 if (cb_arg == NULL) {
8126 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
8127 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8128 return OCS_HW_RTN_NO_MEMORY;
8129 }
8130
8131 cb_arg->cb = cb;
8132 cb_arg->arg = ul_arg;
8133
8134 if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) {
8135 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg);
8136 }
8137
8138 if (rc != OCS_HW_RTN_SUCCESS) {
8139 ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n");
8140 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8141 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t));
8142 }
8143
8144 return rc;
8145 }
8146
8147 /*
8148 * Private functions
8149 */
8150
8151 /**
8152 * @brief Update the queue hash with the ID and index.
8153 *
8154 * @param hash Pointer to hash table.
8155 * @param id ID that was created.
8156 * @param index The index into the hash object.
8157 */
8158 static void
8159 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index)
8160 {
8161 uint32_t hash_index = id & (OCS_HW_Q_HASH_SIZE - 1);
8162
8163 /*
8164 * Since the hash is always bigger than the number of queues, then we
8165 * never have to worry about an infinite loop.
8166 */
8167 while(hash[hash_index].in_use) {
8168 hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8169 }
8170
8171 /* not used, claim the entry */
8172 hash[hash_index].id = id;
8173 hash[hash_index].in_use = 1;
8174 hash[hash_index].index = index;
8175 }
8176
8177 /**
8178 * @brief Find index given queue ID.
8179 *
8180 * @param hash Pointer to hash table.
8181 * @param id ID to find.
8182 *
8183 * @return Returns the index into the HW cq array or -1 if not found.
8184 */
8185 int32_t
8186 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id)
8187 {
8188 int32_t rc = -1;
8189 int32_t index = id & (OCS_HW_Q_HASH_SIZE - 1);
8190
8191 /*
8192 * Since the hash is always bigger than the maximum number of Qs, then we
8193 * never have to worry about an infinite loop. We will always find an
8194 * unused entry.
8195 */
8196 do {
8197 if (hash[index].in_use &&
8198 hash[index].id == id) {
8199 rc = hash[index].index;
8200 } else {
8201 index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8202 }
8203 } while(rc == -1 && hash[index].in_use);
8204
8205 return rc;
8206 }
8207
8208 static int32_t
8209 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain)
8210 {
8211 int32_t rc = OCS_HW_RTN_ERROR;
8212 uint16_t fcfi = UINT16_MAX;
8213
8214 if ((hw == NULL) || (domain == NULL)) {
8215 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8216 hw, domain);
8217 return OCS_HW_RTN_ERROR;
8218 }
8219
8220 fcfi = domain->fcf_indicator;
8221
8222 if (fcfi < SLI4_MAX_FCFI) {
8223 uint16_t fcf_index = UINT16_MAX;
8224
8225 ocs_log_debug(hw->os, "adding domain %p @ %#x\n",
8226 domain, fcfi);
8227 hw->domains[fcfi] = domain;
8228
8229 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8230 if (hw->workaround.override_fcfi) {
8231 if (hw->first_domain_idx < 0) {
8232 hw->first_domain_idx = fcfi;
8233 }
8234 }
8235
8236 fcf_index = domain->fcf;
8237
8238 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8239 ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n",
8240 fcf_index, fcfi);
8241 hw->fcf_index_fcfi[fcf_index] = fcfi;
8242 rc = OCS_HW_RTN_SUCCESS;
8243 } else {
8244 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8245 fcf_index, SLI4_MAX_FCF_INDEX);
8246 hw->domains[fcfi] = NULL;
8247 }
8248 } else {
8249 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8250 fcfi, SLI4_MAX_FCFI);
8251 }
8252
8253 return rc;
8254 }
8255
8256 static int32_t
8257 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain)
8258 {
8259 int32_t rc = OCS_HW_RTN_ERROR;
8260 uint16_t fcfi = UINT16_MAX;
8261
8262 if ((hw == NULL) || (domain == NULL)) {
8263 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8264 hw, domain);
8265 return OCS_HW_RTN_ERROR;
8266 }
8267
8268 fcfi = domain->fcf_indicator;
8269
8270 if (fcfi < SLI4_MAX_FCFI) {
8271 uint16_t fcf_index = UINT16_MAX;
8272
8273 ocs_log_debug(hw->os, "deleting domain %p @ %#x\n",
8274 domain, fcfi);
8275
8276 if (domain != hw->domains[fcfi]) {
8277 ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n",
8278 domain, hw->domains[fcfi]);
8279 return OCS_HW_RTN_ERROR;
8280 }
8281
8282 hw->domains[fcfi] = NULL;
8283
8284 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8285 if (hw->workaround.override_fcfi) {
8286 if (hw->first_domain_idx == fcfi) {
8287 hw->first_domain_idx = -1;
8288 }
8289 }
8290
8291 fcf_index = domain->fcf;
8292
8293 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8294 if (hw->fcf_index_fcfi[fcf_index] == fcfi) {
8295 hw->fcf_index_fcfi[fcf_index] = 0;
8296 rc = OCS_HW_RTN_SUCCESS;
8297 } else {
8298 ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n",
8299 hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index);
8300 }
8301 } else {
8302 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8303 fcf_index, SLI4_MAX_FCF_INDEX);
8304 }
8305 } else {
8306 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8307 fcfi, SLI4_MAX_FCFI);
8308 }
8309
8310 return rc;
8311 }
8312
8313 ocs_domain_t *
8314 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi)
8315 {
8316
8317 if (hw == NULL) {
8318 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8319 return NULL;
8320 }
8321
8322 if (fcfi < SLI4_MAX_FCFI) {
8323 return hw->domains[fcfi];
8324 } else {
8325 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8326 fcfi, SLI4_MAX_FCFI);
8327 return NULL;
8328 }
8329 }
8330
8331 static ocs_domain_t *
8332 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index)
8333 {
8334
8335 if (hw == NULL) {
8336 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8337 return NULL;
8338 }
8339
8340 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8341 return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]);
8342 } else {
8343 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8344 fcf_index, SLI4_MAX_FCF_INDEX);
8345 return NULL;
8346 }
8347 }
8348
8349 /**
8350 * @brief Quaratine an IO by taking a reference count and adding it to the
8351 * quarantine list. When the IO is popped from the list then the
8352 * count is released and the IO MAY be freed depending on whether
8353 * it is still referenced by the IO.
8354 *
8355 * @n @b Note: BZ 160124 - If this is a target write or an initiator read using
8356 * DIF, then we must add the XRI to a quarantine list until we receive
8357 * 4 more completions of this same type.
8358 *
8359 * @param hw Hardware context.
8360 * @param wq Pointer to the WQ associated with the IO object to quarantine.
8361 * @param io Pointer to the io object to quarantine.
8362 */
8363 static void
8364 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io)
8365 {
8366 ocs_quarantine_info_t *q_info = &wq->quarantine_info;
8367 uint32_t index;
8368 ocs_hw_io_t *free_io = NULL;
8369
8370 /* return if the QX bit was clear */
8371 if (!io->quarantine) {
8372 return;
8373 }
8374
8375 /* increment the IO refcount to prevent it from being freed before the quarantine is over */
8376 if (ocs_ref_get_unless_zero(&io->ref) == 0) {
8377 /* command no longer active */
8378 ocs_log_debug(hw ? hw->os : NULL,
8379 "io not active xri=0x%x tag=0x%x\n",
8380 io->indicator, io->reqtag);
8381 return;
8382 }
8383
8384 sli_queue_lock(wq->queue);
8385 index = q_info->quarantine_index;
8386 free_io = q_info->quarantine_ios[index];
8387 q_info->quarantine_ios[index] = io;
8388 q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH;
8389 sli_queue_unlock(wq->queue);
8390
8391 if (free_io != NULL) {
8392 ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */
8393 }
8394 }
8395
8396 /**
8397 * @brief Process entries on the given completion queue.
8398 *
8399 * @param hw Hardware context.
8400 * @param cq Pointer to the HW completion queue object.
8401 *
8402 * @return None.
8403 */
8404 void
8405 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
8406 {
8407 uint8_t cqe[sizeof(sli4_mcqe_t)];
8408 uint16_t rid = UINT16_MAX;
8409 sli4_qentry_e ctype; /* completion type */
8410 int32_t status;
8411 uint32_t n_processed = 0;
8412 time_t tstart;
8413 time_t telapsed;
8414
8415 tstart = ocs_msectime();
8416
8417 while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
8418 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
8419 /*
8420 * The sign of status is significant. If status is:
8421 * == 0 : call completed correctly and the CQE indicated success
8422 * > 0 : call completed correctly and the CQE indicated an error
8423 * < 0 : call failed and no information is available about the CQE
8424 */
8425 if (status < 0) {
8426 if (status == -2) {
8427 /* Notification that an entry was consumed, but not completed */
8428 continue;
8429 }
8430
8431 break;
8432 }
8433
8434 switch (ctype) {
8435 case SLI_QENTRY_ASYNC:
8436 CPUTRACE("async");
8437 sli_cqe_async(&hw->sli, cqe);
8438 break;
8439 case SLI_QENTRY_MQ:
8440 /*
8441 * Process MQ entry. Note there is no way to determine
8442 * the MQ_ID from the completion entry.
8443 */
8444 CPUTRACE("mq");
8445 ocs_hw_mq_process(hw, status, hw->mq);
8446 break;
8447 case SLI_QENTRY_OPT_WRITE_CMD:
8448 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe);
8449 break;
8450 case SLI_QENTRY_OPT_WRITE_DATA:
8451 ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe);
8452 break;
8453 case SLI_QENTRY_WQ:
8454 CPUTRACE("wq");
8455 ocs_hw_wq_process(hw, cq, cqe, status, rid);
8456 break;
8457 case SLI_QENTRY_WQ_RELEASE: {
8458 uint32_t wq_id = rid;
8459 int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
8460
8461 if (unlikely(index < 0)) {
8462 ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n",
8463 index, rid);
8464 break;
8465 }
8466
8467 hw_wq_t *wq = hw->hw_wq[index];
8468
8469 /* Submit any HW IOs that are on the WQ pending list */
8470 hw_wq_submit_pending(wq, wq->wqec_set_count);
8471
8472 break;
8473 }
8474
8475 case SLI_QENTRY_RQ:
8476 CPUTRACE("rq");
8477 ocs_hw_rqpair_process_rq(hw, cq, cqe);
8478 break;
8479 case SLI_QENTRY_XABT: {
8480 CPUTRACE("xabt");
8481 ocs_hw_xabt_process(hw, cq, cqe, rid);
8482 break;
8483 }
8484 default:
8485 ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid);
8486 break;
8487 }
8488
8489 n_processed++;
8490 if (n_processed == cq->queue->proc_limit) {
8491 break;
8492 }
8493
8494 if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
8495 sli_queue_arm(&hw->sli, cq->queue, FALSE);
8496 }
8497 }
8498
8499 sli_queue_arm(&hw->sli, cq->queue, TRUE);
8500
8501 if (n_processed > cq->queue->max_num_processed) {
8502 cq->queue->max_num_processed = n_processed;
8503 }
8504 telapsed = ocs_msectime() - tstart;
8505 if (telapsed > cq->queue->max_process_time) {
8506 cq->queue->max_process_time = telapsed;
8507 }
8508 }
8509
8510 /**
8511 * @brief Process WQ completion queue entries.
8512 *
8513 * @param hw Hardware context.
8514 * @param cq Pointer to the HW completion queue object.
8515 * @param cqe Pointer to WQ completion queue.
8516 * @param status Completion status.
8517 * @param rid Resource ID (IO tag).
8518 *
8519 * @return none
8520 */
8521 void
8522 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid)
8523 {
8524 hw_wq_callback_t *wqcb;
8525
8526 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
8527 ((cq->queue->index - 1) & (cq->queue->length - 1)));
8528
8529 if(rid == OCS_HW_REQUE_XRI_REGTAG) {
8530 if(status) {
8531 ocs_log_err(hw->os, "reque xri failed, status = %d \n", status);
8532 }
8533 return;
8534 }
8535
8536 wqcb = ocs_hw_reqtag_get_instance(hw, rid);
8537 if (wqcb == NULL) {
8538 ocs_log_err(hw->os, "invalid request tag: x%x\n", rid);
8539 return;
8540 }
8541
8542 if (wqcb->callback == NULL) {
8543 ocs_log_err(hw->os, "wqcb callback is NULL\n");
8544 return;
8545 }
8546
8547 (*wqcb->callback)(wqcb->arg, cqe, status);
8548 }
8549
8550 /**
8551 * @brief Process WQ completions for IO requests
8552 *
8553 * @param arg Generic callback argument
8554 * @param cqe Pointer to completion queue entry
8555 * @param status Completion status
8556 *
8557 * @par Description
8558 * @n @b Note: Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized
8559 * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool.
8560 *
8561 * @return None.
8562 */
8563 static void
8564 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status)
8565 {
8566 ocs_hw_io_t *io = arg;
8567 ocs_hw_t *hw = io->hw;
8568 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8569 uint32_t len = 0;
8570 uint32_t ext = 0;
8571 uint8_t out_of_order_axr_cmd = 0;
8572 uint8_t out_of_order_axr_data = 0;
8573 uint8_t lock_taken = 0;
8574 #if defined(OCS_DISC_SPIN_DELAY)
8575 uint32_t delay = 0;
8576 char prop_buf[32];
8577 #endif
8578
8579 /*
8580 * For the primary IO, this will also be used for the
8581 * response. So it is important to only set/clear this
8582 * flag on the first data phase of the IO because
8583 * subsequent phases will be done on the secondary XRI.
8584 */
8585 if (io->quarantine && io->quarantine_first_phase) {
8586 io->quarantine = (wcqe->qx == 1);
8587 ocs_hw_io_quarantine(hw, io->wq, io);
8588 }
8589 io->quarantine_first_phase = FALSE;
8590
8591 /* BZ 161832 - free secondary HW IO */
8592 if (io->sec_hio != NULL &&
8593 io->sec_hio->quarantine) {
8594 /*
8595 * If the quarantine flag is set on the
8596 * IO, then set it on the secondary IO
8597 * based on the quarantine XRI (QX) bit
8598 * sent by the FW.
8599 */
8600 io->sec_hio->quarantine = (wcqe->qx == 1);
8601 /* use the primary io->wq because it is not set on the secondary IO. */
8602 ocs_hw_io_quarantine(hw, io->wq, io->sec_hio);
8603 }
8604
8605 ocs_hw_remove_io_timed_wqe(hw, io);
8606
8607 /* clear xbusy flag if WCQE[XB] is clear */
8608 if (io->xbusy && wcqe->xb == 0) {
8609 io->xbusy = FALSE;
8610 }
8611
8612 /* get extended CQE status */
8613 switch (io->type) {
8614 case OCS_HW_BLS_ACC:
8615 case OCS_HW_BLS_ACC_SID:
8616 break;
8617 case OCS_HW_ELS_REQ:
8618 sli_fc_els_did(&hw->sli, cqe, &ext);
8619 len = sli_fc_response_length(&hw->sli, cqe);
8620 break;
8621 case OCS_HW_ELS_RSP:
8622 case OCS_HW_ELS_RSP_SID:
8623 case OCS_HW_FC_CT_RSP:
8624 break;
8625 case OCS_HW_FC_CT:
8626 len = sli_fc_response_length(&hw->sli, cqe);
8627 break;
8628 case OCS_HW_IO_TARGET_WRITE:
8629 len = sli_fc_io_length(&hw->sli, cqe);
8630 #if defined(OCS_DISC_SPIN_DELAY)
8631 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
8632 delay = ocs_strtoul(prop_buf, 0, 0);
8633 ocs_udelay(delay);
8634 }
8635 #endif
8636 break;
8637 case OCS_HW_IO_TARGET_READ:
8638 len = sli_fc_io_length(&hw->sli, cqe);
8639 /*
8640 * if_type == 2 seems to return 0 "total length placed" on
8641 * FCP_TSEND64_WQE completions. If this appears to happen,
8642 * use the CTIO data transfer length instead.
8643 */
8644 if (hw->workaround.retain_tsend_io_length && !len && !status) {
8645 len = io->length;
8646 }
8647
8648 break;
8649 case OCS_HW_IO_TARGET_RSP:
8650 if(io->is_port_owned) {
8651 ocs_lock(&io->axr_lock);
8652 lock_taken = 1;
8653 if(io->axr_buf->call_axr_cmd) {
8654 out_of_order_axr_cmd = 1;
8655 }
8656 if(io->axr_buf->call_axr_data) {
8657 out_of_order_axr_data = 1;
8658 }
8659 }
8660 break;
8661 case OCS_HW_IO_INITIATOR_READ:
8662 len = sli_fc_io_length(&hw->sli, cqe);
8663 break;
8664 case OCS_HW_IO_INITIATOR_WRITE:
8665 len = sli_fc_io_length(&hw->sli, cqe);
8666 break;
8667 case OCS_HW_IO_INITIATOR_NODATA:
8668 break;
8669 case OCS_HW_IO_DNRX_REQUEUE:
8670 /* release the count for re-posting the buffer */
8671 //ocs_hw_io_free(hw, io);
8672 break;
8673 default:
8674 ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n",
8675 io->type, io->indicator);
8676 break;
8677 }
8678 if (status) {
8679 ext = sli_fc_ext_status(&hw->sli, cqe);
8680 /* Emulate IAAB=0 for initiator WQEs only; i.e. automatically
8681 * abort exchange if an error occurred and exchange is still busy.
8682 */
8683 if (hw->config.i_only_aab &&
8684 (ocs_hw_iotype_is_originator(io->type)) &&
8685 (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) {
8686 ocs_hw_rtn_e rc;
8687
8688 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
8689 io->indicator, io->reqtag);
8690 /*
8691 * Because the initiator will not issue another IO phase, then it is OK to issue the
8692 * callback on the abort completion, but for consistency with the target, wait for the
8693 * XRI_ABORTED CQE to issue the IO callback.
8694 */
8695 rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
8696
8697 if (rc == OCS_HW_RTN_SUCCESS) {
8698 /* latch status to return after abort is complete */
8699 io->status_saved = 1;
8700 io->saved_status = status;
8701 io->saved_ext = ext;
8702 io->saved_len = len;
8703 goto exit_ocs_hw_wq_process_io;
8704 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8705 /*
8706 * Already being aborted by someone else (ABTS
8707 * perhaps). Just fall through and return original
8708 * error.
8709 */
8710 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8711 io->indicator, io->reqtag);
8712
8713 } else {
8714 /* Failed to abort for some other reason, log error */
8715 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8716 io->indicator, io->reqtag, rc);
8717 }
8718 }
8719
8720 /*
8721 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW
8722 */
8723 if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) {
8724 ocs_hw_rtn_e rc;
8725
8726 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag);
8727
8728 /*
8729 * Because targets may send a response when the IO completes using the same XRI, we must
8730 * wait for the XRI_ABORTED CQE to issue the IO callback
8731 */
8732 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
8733 if (rc == OCS_HW_RTN_SUCCESS) {
8734 /* latch status to return after abort is complete */
8735 io->status_saved = 1;
8736 io->saved_status = status;
8737 io->saved_ext = ext;
8738 io->saved_len = len;
8739 goto exit_ocs_hw_wq_process_io;
8740 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8741 /*
8742 * Already being aborted by someone else (ABTS
8743 * perhaps). Just fall through and return original
8744 * error.
8745 */
8746 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8747 io->indicator, io->reqtag);
8748
8749 } else {
8750 /* Failed to abort for some other reason, log error */
8751 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8752 io->indicator, io->reqtag, rc);
8753 }
8754 }
8755 }
8756 /* BZ 161832 - free secondary HW IO */
8757 if (io->sec_hio != NULL) {
8758 ocs_hw_io_free(hw, io->sec_hio);
8759 io->sec_hio = NULL;
8760 }
8761
8762 if (io->done != NULL) {
8763 ocs_hw_done_t done = io->done;
8764 void *arg = io->arg;
8765
8766 io->done = NULL;
8767
8768 if (io->status_saved) {
8769 /* use latched status if exists */
8770 status = io->saved_status;
8771 len = io->saved_len;
8772 ext = io->saved_ext;
8773 io->status_saved = 0;
8774 }
8775
8776 /* Restore default SGL */
8777 ocs_hw_io_restore_sgl(hw, io);
8778 done(io, io->rnode, len, status, ext, arg);
8779 }
8780
8781 if(out_of_order_axr_cmd) {
8782 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8783 if (hw->config.bounce) {
8784 fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt;
8785 uint32_t s_id = fc_be24toh(hdr->s_id);
8786 uint32_t d_id = fc_be24toh(hdr->d_id);
8787 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
8788 if (hw->callback.bounce != NULL) {
8789 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id);
8790 }
8791 }else {
8792 hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq);
8793 }
8794
8795 if(out_of_order_axr_data) {
8796 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8797 if (hw->config.bounce) {
8798 fc_header_t *hdr = io->axr_buf->seq.header->dma.virt;
8799 uint32_t s_id = fc_be24toh(hdr->s_id);
8800 uint32_t d_id = fc_be24toh(hdr->d_id);
8801 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
8802 if (hw->callback.bounce != NULL) {
8803 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id);
8804 }
8805 }else {
8806 hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq);
8807 }
8808 }
8809 }
8810
8811 exit_ocs_hw_wq_process_io:
8812 if(lock_taken) {
8813 ocs_unlock(&io->axr_lock);
8814 }
8815 }
8816
8817 /**
8818 * @brief Process WQ completions for abort requests.
8819 *
8820 * @param arg Generic callback argument.
8821 * @param cqe Pointer to completion queue entry.
8822 * @param status Completion status.
8823 *
8824 * @return None.
8825 */
8826 static void
8827 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status)
8828 {
8829 ocs_hw_io_t *io = arg;
8830 ocs_hw_t *hw = io->hw;
8831 uint32_t ext = 0;
8832 uint32_t len = 0;
8833 hw_wq_callback_t *wqcb;
8834
8835 /*
8836 * For IOs that were aborted internally, we may need to issue the callback here depending
8837 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then
8838 * issue the callback now.
8839 */
8840 ext = sli_fc_ext_status(&hw->sli, cqe);
8841 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
8842 ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
8843 io->done != NULL) {
8844 ocs_hw_done_t done = io->done;
8845 void *arg = io->arg;
8846
8847 io->done = NULL;
8848
8849 /*
8850 * Use latched status as this is always saved for an internal abort
8851 *
8852 * Note: We wont have both a done and abort_done function, so don't worry about
8853 * clobbering the len, status and ext fields.
8854 */
8855 status = io->saved_status;
8856 len = io->saved_len;
8857 ext = io->saved_ext;
8858 io->status_saved = 0;
8859 done(io, io->rnode, len, status, ext, arg);
8860 }
8861
8862 if (io->abort_done != NULL) {
8863 ocs_hw_done_t done = io->abort_done;
8864 void *arg = io->abort_arg;
8865
8866 io->abort_done = NULL;
8867
8868 done(io, io->rnode, len, status, ext, arg);
8869 }
8870 ocs_lock(&hw->io_abort_lock);
8871 /* clear abort bit to indicate abort is complete */
8872 io->abort_in_progress = 0;
8873 ocs_unlock(&hw->io_abort_lock);
8874
8875 /* Free the WQ callback */
8876 ocs_hw_assert(io->abort_reqtag != UINT32_MAX);
8877 wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag);
8878 ocs_hw_reqtag_free(hw, wqcb);
8879
8880 /*
8881 * Call ocs_hw_io_free() because this releases the WQ reservation as
8882 * well as doing the refcount put. Don't duplicate the code here.
8883 */
8884 (void)ocs_hw_io_free(hw, io);
8885 }
8886
8887 /**
8888 * @brief Process XABT completions
8889 *
8890 * @param hw Hardware context.
8891 * @param cq Pointer to the HW completion queue object.
8892 * @param cqe Pointer to WQ completion queue.
8893 * @param rid Resource ID (IO tag).
8894 *
8895 *
8896 * @return None.
8897 */
8898 void
8899 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid)
8900 {
8901 /* search IOs wait free list */
8902 ocs_hw_io_t *io = NULL;
8903
8904 io = ocs_hw_io_lookup(hw, rid);
8905
8906 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
8907 ((cq->queue->index - 1) & (cq->queue->length - 1)));
8908 if (io == NULL) {
8909 /* IO lookup failure should never happen */
8910 ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid);
8911 return;
8912 }
8913
8914 if (!io->xbusy) {
8915 ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
8916 } else {
8917 /* mark IO as no longer busy */
8918 io->xbusy = FALSE;
8919 }
8920
8921 if (io->is_port_owned) {
8922 ocs_lock(&hw->io_lock);
8923 /* Take reference so that below callback will not free io before reque */
8924 ocs_ref_get(&io->ref);
8925 ocs_unlock(&hw->io_lock);
8926 }
8927
8928 /* For IOs that were aborted internally, we need to issue any pending callback here. */
8929 if (io->done != NULL) {
8930 ocs_hw_done_t done = io->done;
8931 void *arg = io->arg;
8932
8933 /* Use latched status as this is always saved for an internal abort */
8934 int32_t status = io->saved_status;
8935 uint32_t len = io->saved_len;
8936 uint32_t ext = io->saved_ext;
8937
8938 io->done = NULL;
8939 io->status_saved = 0;
8940
8941 done(io, io->rnode, len, status, ext, arg);
8942 }
8943
8944 /* Check to see if this is a port owned XRI */
8945 if (io->is_port_owned) {
8946 ocs_lock(&hw->io_lock);
8947 ocs_hw_reque_xri(hw, io);
8948 ocs_unlock(&hw->io_lock);
8949 /* Not hanlding reque xri completion, free io */
8950 ocs_hw_io_free(hw, io);
8951 return;
8952 }
8953
8954 ocs_lock(&hw->io_lock);
8955 if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) {
8956 /* if on wait_free list, caller has already freed IO;
8957 * remove from wait_free list and add to free list.
8958 * if on in-use list, already marked as no longer busy;
8959 * just leave there and wait for caller to free.
8960 */
8961 if (io->state == OCS_HW_IO_STATE_WAIT_FREE) {
8962 io->state = OCS_HW_IO_STATE_FREE;
8963 ocs_list_remove(&hw->io_wait_free, io);
8964 ocs_hw_io_free_move_correct_list(hw, io);
8965 }
8966 }
8967 ocs_unlock(&hw->io_lock);
8968 }
8969
8970 /**
8971 * @brief Adjust the number of WQs and CQs within the HW.
8972 *
8973 * @par Description
8974 * Calculates the number of WQs and associated CQs needed in the HW based on
8975 * the number of IOs. Calculates the starting CQ index for each WQ, RQ and
8976 * MQ.
8977 *
8978 * @param hw Hardware context allocated by the caller.
8979 */
8980 static void
8981 ocs_hw_adjust_wqs(ocs_hw_t *hw)
8982 {
8983 uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ);
8984 uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ];
8985 uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ];
8986
8987 /*
8988 * possibly adjust the the size of the WQs so that the CQ is twice as
8989 * big as the WQ to allow for 2 completions per IO. This allows us to
8990 * handle multi-phase as well as aborts.
8991 */
8992 if (max_cq_entries < max_wq_entries * 2) {
8993 max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2;
8994 }
8995
8996 /*
8997 * Calculate the number of WQs to use base on the number of IOs.
8998 *
8999 * Note: We need to reserve room for aborts which must be sent down
9000 * the same WQ as the IO. So we allocate enough WQ space to
9001 * handle 2 times the number of IOs. Half of the space will be
9002 * used for normal IOs and the other hwf is reserved for aborts.
9003 */
9004 hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries;
9005
9006 /*
9007 * For performance reasons, it is best to use use a minimum of 4 WQs
9008 * for BE3 and Skyhawk.
9009 */
9010 if (hw->config.n_wq < 4 &&
9011 SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
9012 hw->config.n_wq = 4;
9013 }
9014
9015 /*
9016 * For dual-chute support, we need to have at least one WQ per chute.
9017 */
9018 if (hw->config.n_wq < 2 &&
9019 ocs_hw_get_num_chutes(hw) > 1) {
9020 hw->config.n_wq = 2;
9021 }
9022
9023 /* make sure we haven't exceeded the max supported in the HW */
9024 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
9025 hw->config.n_wq = OCS_HW_MAX_NUM_WQ;
9026 }
9027
9028 /* make sure we haven't exceeded the chip maximum */
9029 if (hw->config.n_wq > max_wq_num) {
9030 hw->config.n_wq = max_wq_num;
9031 }
9032
9033 /*
9034 * Using Queue Topology string, we divide by number of chutes
9035 */
9036 hw->config.n_wq /= ocs_hw_get_num_chutes(hw);
9037 }
9038
9039 static int32_t
9040 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size)
9041 {
9042 ocs_command_ctx_t *ctx = NULL;
9043
9044 ocs_lock(&hw->cmd_lock);
9045 if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) {
9046 ocs_log_err(hw->os, "XXX no command context?!?\n");
9047 ocs_unlock(&hw->cmd_lock);
9048 return -1;
9049 }
9050
9051 hw->cmd_head_count--;
9052
9053 /* Post any pending requests */
9054 ocs_hw_cmd_submit_pending(hw);
9055
9056 ocs_unlock(&hw->cmd_lock);
9057
9058 if (ctx->cb) {
9059 if (ctx->buf) {
9060 ocs_memcpy(ctx->buf, mqe, size);
9061 }
9062 ctx->cb(hw, status, ctx->buf, ctx->arg);
9063 }
9064
9065 ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t));
9066 ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t));
9067
9068 return 0;
9069 }
9070
9071 /**
9072 * @brief Process entries on the given mailbox queue.
9073 *
9074 * @param hw Hardware context.
9075 * @param status CQE status.
9076 * @param mq Pointer to the mailbox queue object.
9077 *
9078 * @return Returns 0 on success, or a non-zero value on failure.
9079 */
9080 static int32_t
9081 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq)
9082 {
9083 uint8_t mqe[SLI4_BMBX_SIZE];
9084
9085 if (!sli_queue_read(&hw->sli, mq, mqe)) {
9086 ocs_hw_command_process(hw, status, mqe, mq->size);
9087 }
9088
9089 return 0;
9090 }
9091
9092 /**
9093 * @brief Read a FCF table entry.
9094 *
9095 * @param hw Hardware context.
9096 * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first
9097 * read and the next_index field from the FCOE_READ_FCF_TABLE command
9098 * for subsequent reads.
9099 *
9100 * @return Returns 0 on success, or a non-zero value on failure.
9101 */
9102 static ocs_hw_rtn_e
9103 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index)
9104 {
9105 uint8_t *buf = NULL;
9106 int32_t rc = OCS_HW_RTN_ERROR;
9107
9108 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9109 if (!buf) {
9110 ocs_log_err(hw->os, "no buffer for command\n");
9111 return OCS_HW_RTN_NO_MEMORY;
9112 }
9113
9114 if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem,
9115 index)) {
9116 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem);
9117 }
9118
9119 if (rc != OCS_HW_RTN_SUCCESS) {
9120 ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n");
9121 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9122 }
9123
9124 return rc;
9125 }
9126
9127 /**
9128 * @brief Callback function for the FCOE_READ_FCF_TABLE command.
9129 *
9130 * @par Description
9131 * Note that the caller has allocated:
9132 * - DMA memory to hold the table contents
9133 * - DMA memory structure
9134 * - Command/results buffer
9135 * .
9136 * Each of these must be freed here.
9137 *
9138 * @param hw Hardware context.
9139 * @param status Hardware status.
9140 * @param mqe Pointer to the mailbox command/results buffer.
9141 * @param arg Pointer to the DMA memory structure.
9142 *
9143 * @return Returns 0 on success, or a non-zero value on failure.
9144 */
9145 static int32_t
9146 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9147 {
9148 ocs_dma_t *dma = arg;
9149 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9150
9151 if (status || hdr->status) {
9152 ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n",
9153 status, hdr->status);
9154 } else if (dma->virt) {
9155 sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt;
9156
9157 /* if FC or FCOE and FCF entry valid, process it */
9158 if (read_fcf->fcf_entry.fc ||
9159 (read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) {
9160 if (hw->callback.domain != NULL) {
9161 ocs_domain_record_t drec = {0};
9162
9163 if (read_fcf->fcf_entry.fc) {
9164 /*
9165 * This is a pseudo FCF entry. Create a domain
9166 * record based on the read topology information
9167 */
9168 drec.speed = hw->link.speed;
9169 drec.fc_id = hw->link.fc_id;
9170 drec.is_fc = TRUE;
9171 if (SLI_LINK_TOPO_LOOP == hw->link.topology) {
9172 drec.is_loop = TRUE;
9173 ocs_memcpy(drec.map.loop, hw->link.loop_map,
9174 sizeof(drec.map.loop));
9175 } else if (SLI_LINK_TOPO_NPORT == hw->link.topology) {
9176 drec.is_nport = TRUE;
9177 }
9178 } else {
9179 drec.index = read_fcf->fcf_entry.fcf_index;
9180 drec.priority = read_fcf->fcf_entry.fip_priority;
9181
9182 /* copy address, wwn and vlan_bitmap */
9183 ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address,
9184 sizeof(drec.address));
9185 ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id,
9186 sizeof(drec.wwn));
9187 ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap,
9188 sizeof(drec.map.vlan));
9189
9190 drec.is_ethernet = TRUE;
9191 drec.is_nport = TRUE;
9192 }
9193
9194 hw->callback.domain(hw->args.domain,
9195 OCS_HW_DOMAIN_FOUND,
9196 &drec);
9197 }
9198 } else {
9199 /* if FCOE and FCF is not valid, ignore it */
9200 ocs_log_test(hw->os, "ignore invalid FCF entry\n");
9201 }
9202
9203 if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) {
9204 ocs_hw_read_fcf(hw, read_fcf->next_index);
9205 }
9206 }
9207
9208 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9209 //ocs_dma_free(hw->os, dma);
9210 //ocs_free(hw->os, dma, sizeof(ocs_dma_t));
9211
9212 return 0;
9213 }
9214
9215 /**
9216 * @brief Callback function for the SLI link events.
9217 *
9218 * @par Description
9219 * This function allocates memory which must be freed in its callback.
9220 *
9221 * @param ctx Hardware context pointer (that is, ocs_hw_t *).
9222 * @param e Event structure pointer (that is, sli4_link_event_t *).
9223 *
9224 * @return Returns 0 on success, or a non-zero value on failure.
9225 */
9226 static int32_t
9227 ocs_hw_cb_link(void *ctx, void *e)
9228 {
9229 ocs_hw_t *hw = ctx;
9230 sli4_link_event_t *event = e;
9231 ocs_domain_t *d = NULL;
9232 uint32_t i = 0;
9233 int32_t rc = OCS_HW_RTN_ERROR;
9234 ocs_t *ocs = hw->os;
9235
9236 ocs_hw_link_event_init(hw);
9237
9238 switch (event->status) {
9239 case SLI_LINK_STATUS_UP:
9240
9241 hw->link = *event;
9242
9243 if (SLI_LINK_TOPO_NPORT == event->topology) {
9244 device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed);
9245 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9246 } else if (SLI_LINK_TOPO_LOOP == event->topology) {
9247 uint8_t *buf = NULL;
9248 device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed);
9249
9250 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9251 if (!buf) {
9252 ocs_log_err(hw->os, "no buffer for command\n");
9253 break;
9254 }
9255
9256 if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) {
9257 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL);
9258 }
9259
9260 if (rc != OCS_HW_RTN_SUCCESS) {
9261 ocs_log_test(hw->os, "READ_TOPOLOGY failed\n");
9262 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9263 }
9264 } else {
9265 device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n",
9266 event->topology, event->speed);
9267 }
9268 break;
9269 case SLI_LINK_STATUS_DOWN:
9270 device_printf(ocs->dev, "Link Down\n");
9271
9272 hw->link.status = event->status;
9273
9274 for (i = 0; i < SLI4_MAX_FCFI; i++) {
9275 d = hw->domains[i];
9276 if (d != NULL &&
9277 hw->callback.domain != NULL) {
9278 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
9279 }
9280 }
9281 break;
9282 default:
9283 ocs_log_test(hw->os, "unhandled link status %#x\n", event->status);
9284 break;
9285 }
9286
9287 return 0;
9288 }
9289
9290 static int32_t
9291 ocs_hw_cb_fip(void *ctx, void *e)
9292 {
9293 ocs_hw_t *hw = ctx;
9294 ocs_domain_t *domain = NULL;
9295 sli4_fip_event_t *event = e;
9296
9297 ocs_hw_assert(event);
9298 ocs_hw_assert(hw);
9299
9300 /* Find the associated domain object */
9301 if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
9302 ocs_domain_t *d = NULL;
9303 uint32_t i = 0;
9304
9305 /* Clear VLINK is different from the other FIP events as it passes back
9306 * a VPI instead of a FCF index. Check all attached SLI ports for a
9307 * matching VPI */
9308 for (i = 0; i < SLI4_MAX_FCFI; i++) {
9309 d = hw->domains[i];
9310 if (d != NULL) {
9311 ocs_sport_t *sport = NULL;
9312
9313 ocs_list_foreach(&d->sport_list, sport) {
9314 if (sport->indicator == event->index) {
9315 domain = d;
9316 break;
9317 }
9318 }
9319
9320 if (domain != NULL) {
9321 break;
9322 }
9323 }
9324 }
9325 } else {
9326 domain = ocs_hw_domain_get_indexed(hw, event->index);
9327 }
9328
9329 switch (event->type) {
9330 case SLI4_FCOE_FIP_FCF_DISCOVERED:
9331 ocs_hw_read_fcf(hw, event->index);
9332 break;
9333 case SLI4_FCOE_FIP_FCF_DEAD:
9334 if (domain != NULL &&
9335 hw->callback.domain != NULL) {
9336 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9337 }
9338 break;
9339 case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
9340 if (domain != NULL &&
9341 hw->callback.domain != NULL) {
9342 /*
9343 * We will want to issue rediscover FCF when this domain is free'd in order
9344 * to invalidate the FCF table
9345 */
9346 domain->req_rediscover_fcf = TRUE;
9347 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9348 }
9349 break;
9350 case SLI4_FCOE_FIP_FCF_MODIFIED:
9351 if (domain != NULL &&
9352 hw->callback.domain != NULL) {
9353 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9354 }
9355
9356 ocs_hw_read_fcf(hw, event->index);
9357 break;
9358 default:
9359 ocs_log_test(hw->os, "unsupported event %#x\n", event->type);
9360 }
9361
9362 return 0;
9363 }
9364
9365 static int32_t
9366 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9367 {
9368 ocs_remote_node_t *rnode = arg;
9369 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9370 ocs_hw_remote_node_event_e evt = 0;
9371
9372 if (status || hdr->status) {
9373 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9374 hdr->status);
9375 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
9376 rnode->attached = FALSE;
9377 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9378 evt = OCS_HW_NODE_ATTACH_FAIL;
9379 } else {
9380 rnode->attached = TRUE;
9381 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1);
9382 evt = OCS_HW_NODE_ATTACH_OK;
9383 }
9384
9385 if (hw->callback.rnode != NULL) {
9386 hw->callback.rnode(hw->args.rnode, evt, rnode);
9387 }
9388 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9389
9390 return 0;
9391 }
9392
9393 static int32_t
9394 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9395 {
9396 ocs_remote_node_t *rnode = arg;
9397 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9398 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL;
9399 int32_t rc = 0;
9400
9401 if (status || hdr->status) {
9402 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9403 hdr->status);
9404
9405 /*
9406 * In certain cases, a non-zero MQE status is OK (all must be true):
9407 * - node is attached
9408 * - if High Login Mode is enabled, node is part of a node group
9409 * - status is 0x1400
9410 */
9411 if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) ||
9412 (hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) {
9413 rc = -1;
9414 }
9415 }
9416
9417 if (rc == 0) {
9418 rnode->node_group = FALSE;
9419 rnode->attached = FALSE;
9420
9421 if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) {
9422 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9423 }
9424
9425 evt = OCS_HW_NODE_FREE_OK;
9426 }
9427
9428 if (hw->callback.rnode != NULL) {
9429 hw->callback.rnode(hw->args.rnode, evt, rnode);
9430 }
9431
9432 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9433
9434 return rc;
9435 }
9436
9437 static int32_t
9438 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9439 {
9440 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9441 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL;
9442 int32_t rc = 0;
9443 uint32_t i;
9444
9445 if (status || hdr->status) {
9446 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9447 hdr->status);
9448 } else {
9449 evt = OCS_HW_NODE_FREE_ALL_OK;
9450 }
9451
9452 if (evt == OCS_HW_NODE_FREE_ALL_OK) {
9453 for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) {
9454 ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0);
9455 }
9456
9457 if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) {
9458 ocs_log_test(hw->os, "FCOE_RPI free all failure\n");
9459 rc = -1;
9460 }
9461 }
9462
9463 if (hw->callback.rnode != NULL) {
9464 hw->callback.rnode(hw->args.rnode, evt, NULL);
9465 }
9466
9467 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9468
9469 return rc;
9470 }
9471
9472 /**
9473 * @brief Initialize the pool of HW IO objects.
9474 *
9475 * @param hw Hardware context.
9476 *
9477 * @return Returns 0 on success, or a non-zero value on failure.
9478 */
9479 static ocs_hw_rtn_e
9480 ocs_hw_setup_io(ocs_hw_t *hw)
9481 {
9482 uint32_t i = 0;
9483 ocs_hw_io_t *io = NULL;
9484 uintptr_t xfer_virt = 0;
9485 uintptr_t xfer_phys = 0;
9486 uint32_t index;
9487 uint8_t new_alloc = TRUE;
9488
9489 if (NULL == hw->io) {
9490 hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT);
9491
9492 if (NULL == hw->io) {
9493 ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n",
9494 hw->config.n_io,
9495 sizeof(ocs_hw_io_t *));
9496 return OCS_HW_RTN_NO_MEMORY;
9497 }
9498 for (i = 0; i < hw->config.n_io; i++) {
9499 hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t),
9500 OCS_M_ZERO | OCS_M_NOWAIT);
9501 if (hw->io[i] == NULL) {
9502 ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i);
9503 goto error;
9504 }
9505 }
9506
9507 /* Create WQE buffs for IO */
9508 hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size,
9509 OCS_M_ZERO | OCS_M_NOWAIT);
9510 if (NULL == hw->wqe_buffs) {
9511 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t));
9512 ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n",
9513 __func__, hw->config.n_io, hw->sli.config.wqe_size);
9514 return OCS_HW_RTN_NO_MEMORY;
9515 }
9516
9517 } else {
9518 /* re-use existing IOs, including SGLs */
9519 new_alloc = FALSE;
9520 }
9521
9522 if (new_alloc) {
9523 if (ocs_dma_alloc(hw->os, &hw->xfer_rdy,
9524 sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io,
9525 4/*XXX what does this need to be? */)) {
9526 ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n");
9527 return OCS_HW_RTN_NO_MEMORY;
9528 }
9529 }
9530 xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
9531 xfer_phys = hw->xfer_rdy.phys;
9532
9533 for (i = 0; i < hw->config.n_io; i++) {
9534 hw_wq_callback_t *wqcb;
9535
9536 io = hw->io[i];
9537
9538 /* initialize IO fields */
9539 io->hw = hw;
9540
9541 /* Assign a WQE buff */
9542 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size];
9543
9544 /* Allocate the request tag for this IO */
9545 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io);
9546 if (wqcb == NULL) {
9547 ocs_log_err(hw->os, "can't allocate request tag\n");
9548 return OCS_HW_RTN_NO_RESOURCES;
9549 }
9550 io->reqtag = wqcb->instance_index;
9551
9552 /* Now for the fields that are initialized on each free */
9553 ocs_hw_init_free_io(io);
9554
9555 /* The XB flag isn't cleared on IO free, so initialize it to zero here */
9556 io->xbusy = 0;
9557
9558 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) {
9559 ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i);
9560 return OCS_HW_RTN_NO_MEMORY;
9561 }
9562
9563 if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) {
9564 ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i);
9565 ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t));
9566 return OCS_HW_RTN_NO_MEMORY;
9567 }
9568 io->def_sgl_count = hw->config.n_sgl;
9569 io->sgl = &io->def_sgl;
9570 io->sgl_count = io->def_sgl_count;
9571
9572 if (hw->xfer_rdy.size) {
9573 io->xfer_rdy.virt = (void *)xfer_virt;
9574 io->xfer_rdy.phys = xfer_phys;
9575 io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t);
9576
9577 xfer_virt += sizeof(fcp_xfer_rdy_iu_t);
9578 xfer_phys += sizeof(fcp_xfer_rdy_iu_t);
9579 }
9580 }
9581
9582 return OCS_HW_RTN_SUCCESS;
9583 error:
9584 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
9585 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
9586 hw->io[i] = NULL;
9587 }
9588
9589 return OCS_HW_RTN_NO_MEMORY;
9590 }
9591
9592 static ocs_hw_rtn_e
9593 ocs_hw_init_io(ocs_hw_t *hw)
9594 {
9595 uint32_t i = 0, io_index = 0;
9596 uint32_t prereg = 0;
9597 ocs_hw_io_t *io = NULL;
9598 uint8_t cmd[SLI4_BMBX_SIZE];
9599 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
9600 uint32_t nremaining;
9601 uint32_t n = 0;
9602 uint32_t sgls_per_request = 256;
9603 ocs_dma_t **sgls = NULL;
9604 ocs_dma_t reqbuf = { 0 };
9605
9606 prereg = sli_get_sgl_preregister(&hw->sli);
9607
9608 if (prereg) {
9609 sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT);
9610 if (sgls == NULL) {
9611 ocs_log_err(hw->os, "ocs_malloc sgls failed\n");
9612 return OCS_HW_RTN_NO_MEMORY;
9613 }
9614
9615 rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT);
9616 if (rc) {
9617 ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n");
9618 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9619 return OCS_HW_RTN_NO_MEMORY;
9620 }
9621 }
9622
9623 io = hw->io[io_index];
9624 for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
9625 if (prereg) {
9626 /* Copy address of SGL's into local sgls[] array, break out if the xri
9627 * is not contiguous.
9628 */
9629 for (n = 0; n < MIN(sgls_per_request, nremaining); n++) {
9630 /* Check that we have contiguous xri values */
9631 if (n > 0) {
9632 if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) {
9633 break;
9634 }
9635 }
9636 sgls[n] = hw->io[io_index + n]->sgl;
9637 }
9638
9639 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd),
9640 io->indicator, n, sgls, NULL, &reqbuf)) {
9641 if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) {
9642 rc = OCS_HW_RTN_ERROR;
9643 ocs_log_err(hw->os, "SGL post failed\n");
9644 break;
9645 }
9646 }
9647 } else {
9648 n = nremaining;
9649 }
9650
9651 /* Add to tail if successful */
9652 for (i = 0; i < n; i ++) {
9653 io->is_port_owned = 0;
9654 io->state = OCS_HW_IO_STATE_FREE;
9655 ocs_list_add_tail(&hw->io_free, io);
9656 io = hw->io[io_index+1];
9657 io_index++;
9658 }
9659 }
9660
9661 if (prereg) {
9662 ocs_dma_free(hw->os, &reqbuf);
9663 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9664 }
9665
9666 return rc;
9667 }
9668
9669 static int32_t
9670 ocs_hw_flush(ocs_hw_t *hw)
9671 {
9672 uint32_t i = 0;
9673
9674 /* Process any remaining completions */
9675 for (i = 0; i < hw->eq_count; i++) {
9676 ocs_hw_process(hw, i, ~0);
9677 }
9678
9679 return 0;
9680 }
9681
9682 static int32_t
9683 ocs_hw_command_cancel(ocs_hw_t *hw)
9684 {
9685
9686 ocs_lock(&hw->cmd_lock);
9687
9688 /*
9689 * Manually clean up remaining commands. Note: since this calls
9690 * ocs_hw_command_process(), we'll also process the cmd_pending
9691 * list, so no need to manually clean that out.
9692 */
9693 while (!ocs_list_empty(&hw->cmd_head)) {
9694 uint8_t mqe[SLI4_BMBX_SIZE] = { 0 };
9695 ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head);
9696
9697 ocs_log_test(hw->os, "hung command %08x\n",
9698 NULL == ctx ? UINT32_MAX :
9699 (NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf)));
9700 ocs_unlock(&hw->cmd_lock);
9701 ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE);
9702 ocs_lock(&hw->cmd_lock);
9703 }
9704
9705 ocs_unlock(&hw->cmd_lock);
9706
9707 return 0;
9708 }
9709
9710 /**
9711 * @brief Find IO given indicator (xri).
9712 *
9713 * @param hw Hal context.
9714 * @param indicator Indicator (xri) to look for.
9715 *
9716 * @return Returns io if found, NULL otherwise.
9717 */
9718 ocs_hw_io_t *
9719 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri)
9720 {
9721 uint32_t ioindex;
9722 ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0];
9723 return hw->io[ioindex];
9724 }
9725
9726 /**
9727 * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists.
9728 *
9729 * @param hw Hal context.
9730 * @param io Pointer to the IO to cleanup.
9731 */
9732 static void
9733 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io)
9734 {
9735 ocs_hw_done_t done = io->done;
9736 ocs_hw_done_t abort_done = io->abort_done;
9737
9738 /* first check active_wqe list and remove if there */
9739 if (ocs_list_on_list(&io->wqe_link)) {
9740 ocs_list_remove(&hw->io_timed_wqe, io);
9741 }
9742
9743 /* Remove from WQ pending list */
9744 if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) {
9745 ocs_list_remove(&io->wq->pending_list, io);
9746 }
9747
9748 if (io->done) {
9749 void *arg = io->arg;
9750
9751 io->done = NULL;
9752 ocs_unlock(&hw->io_lock);
9753 done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
9754 ocs_lock(&hw->io_lock);
9755 }
9756
9757 if (io->abort_done != NULL) {
9758 void *abort_arg = io->abort_arg;
9759
9760 io->abort_done = NULL;
9761 ocs_unlock(&hw->io_lock);
9762 abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg);
9763 ocs_lock(&hw->io_lock);
9764 }
9765 }
9766
9767 static int32_t
9768 ocs_hw_io_cancel(ocs_hw_t *hw)
9769 {
9770 ocs_hw_io_t *io = NULL;
9771 ocs_hw_io_t *tmp_io = NULL;
9772 uint32_t iters = 100; /* One second limit */
9773
9774 /*
9775 * Manually clean up outstanding IO.
9776 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called.
9777 */
9778 ocs_lock(&hw->io_lock);
9779 ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) {
9780 ocs_hw_done_t done = io->done;
9781 ocs_hw_done_t abort_done = io->abort_done;
9782
9783 ocs_hw_io_cancel_cleanup(hw, io);
9784
9785 /*
9786 * Since this is called in a reset/shutdown
9787 * case, If there is no callback, then just
9788 * free the IO.
9789 *
9790 * Note: A port owned XRI cannot be on
9791 * the in use list. We cannot call
9792 * ocs_hw_io_free() because we already
9793 * hold the io_lock.
9794 */
9795 if (done == NULL &&
9796 abort_done == NULL) {
9797 /*
9798 * Since this is called in a reset/shutdown
9799 * case, If there is no callback, then just
9800 * free the IO.
9801 */
9802 ocs_hw_io_free_common(hw, io);
9803 ocs_list_remove(&hw->io_inuse, io);
9804 ocs_hw_io_free_move_correct_list(hw, io);
9805 }
9806 }
9807
9808 /*
9809 * For port owned XRIs, they are not on the in use list, so
9810 * walk though XRIs and issue any callbacks.
9811 */
9812 ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) {
9813 /* check list and remove if there */
9814 if (ocs_list_on_list(&io->dnrx_link)) {
9815 ocs_list_remove(&hw->io_port_dnrx, io);
9816 ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
9817 }
9818 ocs_hw_io_cancel_cleanup(hw, io);
9819 ocs_list_remove(&hw->io_port_owned, io);
9820 ocs_hw_io_free_common(hw, io);
9821 }
9822 ocs_unlock(&hw->io_lock);
9823
9824 /* Give time for the callbacks to complete */
9825 do {
9826 ocs_udelay(10000);
9827 iters--;
9828 } while (!ocs_list_empty(&hw->io_inuse) && iters);
9829
9830 /* Leave a breadcrumb that cleanup is not yet complete. */
9831 if (!ocs_list_empty(&hw->io_inuse)) {
9832 ocs_log_test(hw->os, "io_inuse list is not empty\n");
9833 }
9834
9835 return 0;
9836 }
9837
9838 static int32_t
9839 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size,
9840 ocs_dma_t *rsp)
9841 {
9842 sli4_sge_t *data = NULL;
9843
9844 if (!hw || !io) {
9845 ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io);
9846 return OCS_HW_RTN_ERROR;
9847 }
9848
9849 data = io->def_sgl.virt;
9850
9851 /* setup command pointer */
9852 data->buffer_address_high = ocs_addr32_hi(cmnd->phys);
9853 data->buffer_address_low = ocs_addr32_lo(cmnd->phys);
9854 data->buffer_length = cmnd_size;
9855 data++;
9856
9857 /* setup response pointer */
9858 data->buffer_address_high = ocs_addr32_hi(rsp->phys);
9859 data->buffer_address_low = ocs_addr32_lo(rsp->phys);
9860 data->buffer_length = rsp->size;
9861
9862 return 0;
9863 }
9864
9865 static int32_t
9866 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9867 {
9868 sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe;
9869
9870 if (status || read_topo->hdr.status) {
9871 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n",
9872 status, read_topo->hdr.status);
9873 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9874 return -1;
9875 }
9876
9877 switch (read_topo->attention_type) {
9878 case SLI4_READ_TOPOLOGY_LINK_UP:
9879 hw->link.status = SLI_LINK_STATUS_UP;
9880 break;
9881 case SLI4_READ_TOPOLOGY_LINK_DOWN:
9882 hw->link.status = SLI_LINK_STATUS_DOWN;
9883 break;
9884 case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
9885 hw->link.status = SLI_LINK_STATUS_NO_ALPA;
9886 break;
9887 default:
9888 hw->link.status = SLI_LINK_STATUS_MAX;
9889 break;
9890 }
9891
9892 switch (read_topo->topology) {
9893 case SLI4_READ_TOPOLOGY_NPORT:
9894 hw->link.topology = SLI_LINK_TOPO_NPORT;
9895 break;
9896 case SLI4_READ_TOPOLOGY_FC_AL:
9897 hw->link.topology = SLI_LINK_TOPO_LOOP;
9898 if (SLI_LINK_STATUS_UP == hw->link.status) {
9899 hw->link.loop_map = hw->loop_map.virt;
9900 }
9901 hw->link.fc_id = read_topo->acquired_al_pa;
9902 break;
9903 default:
9904 hw->link.topology = SLI_LINK_TOPO_MAX;
9905 break;
9906 }
9907
9908 hw->link.medium = SLI_LINK_MEDIUM_FC;
9909
9910 switch (read_topo->link_current.link_speed) {
9911 case SLI4_READ_TOPOLOGY_SPEED_1G:
9912 hw->link.speed = 1 * 1000;
9913 break;
9914 case SLI4_READ_TOPOLOGY_SPEED_2G:
9915 hw->link.speed = 2 * 1000;
9916 break;
9917 case SLI4_READ_TOPOLOGY_SPEED_4G:
9918 hw->link.speed = 4 * 1000;
9919 break;
9920 case SLI4_READ_TOPOLOGY_SPEED_8G:
9921 hw->link.speed = 8 * 1000;
9922 break;
9923 case SLI4_READ_TOPOLOGY_SPEED_16G:
9924 hw->link.speed = 16 * 1000;
9925 hw->link.loop_map = NULL;
9926 break;
9927 case SLI4_READ_TOPOLOGY_SPEED_32G:
9928 hw->link.speed = 32 * 1000;
9929 hw->link.loop_map = NULL;
9930 break;
9931 }
9932
9933 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9934
9935 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9936
9937 return 0;
9938 }
9939
9940 static int32_t
9941 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9942 {
9943 ocs_sli_port_t *sport = ctx->app;
9944 ocs_hw_t *hw = sport->hw;
9945
9946 smtrace("port");
9947
9948 switch (evt) {
9949 case OCS_EVT_EXIT:
9950 /* ignore */
9951 break;
9952
9953 case OCS_EVT_HW_PORT_REQ_FREE:
9954 case OCS_EVT_HW_PORT_REQ_ATTACH:
9955 if (data != NULL) {
9956 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9957 }
9958 /* fall through */
9959 default:
9960 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
9961 break;
9962 }
9963
9964 return 0;
9965 }
9966
9967 static void *
9968 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9969 {
9970 ocs_sli_port_t *sport = ctx->app;
9971 ocs_hw_t *hw = sport->hw;
9972
9973 smtrace("port");
9974
9975 switch (evt) {
9976 case OCS_EVT_ENTER:
9977 if (data != NULL) {
9978 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9979 }
9980 if (hw->callback.port != NULL) {
9981 hw->callback.port(hw->args.port,
9982 OCS_HW_PORT_FREE_FAIL, sport);
9983 }
9984 break;
9985 default:
9986 break;
9987 }
9988
9989 return NULL;
9990 }
9991
9992 static void *
9993 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9994 {
9995 ocs_sli_port_t *sport = ctx->app;
9996 ocs_hw_t *hw = sport->hw;
9997
9998 smtrace("port");
9999
10000 switch (evt) {
10001 case OCS_EVT_ENTER:
10002 /* free SLI resource */
10003 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) {
10004 ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id);
10005 }
10006
10007 /* free mailbox buffer */
10008 if (data != NULL) {
10009 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10010 }
10011 if (hw->callback.port != NULL) {
10012 hw->callback.port(hw->args.port,
10013 OCS_HW_PORT_FREE_OK, sport);
10014 }
10015 break;
10016 default:
10017 break;
10018 }
10019
10020 return NULL;
10021 }
10022
10023 static void *
10024 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10025 {
10026 ocs_sli_port_t *sport = ctx->app;
10027 ocs_hw_t *hw = sport->hw;
10028
10029 smtrace("port");
10030
10031 switch (evt) {
10032 case OCS_EVT_ENTER:
10033 /* free SLI resource */
10034 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10035
10036 /* free mailbox buffer */
10037 if (data != NULL) {
10038 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10039 }
10040
10041 if (hw->callback.port != NULL) {
10042 hw->callback.port(hw->args.port,
10043 OCS_HW_PORT_ATTACH_FAIL, sport);
10044 }
10045 if (sport->sm_free_req_pending) {
10046 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10047 }
10048 break;
10049 default:
10050 __ocs_hw_port_common(__func__, ctx, evt, data);
10051 break;
10052 }
10053
10054 return NULL;
10055 }
10056
10057 static void *
10058 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10059 {
10060 ocs_sli_port_t *sport = ctx->app;
10061 ocs_hw_t *hw = sport->hw;
10062 uint8_t *cmd = NULL;
10063
10064 smtrace("port");
10065
10066 switch (evt) {
10067 case OCS_EVT_ENTER:
10068 /* allocate memory and send unreg_vpi */
10069 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10070 if (!cmd) {
10071 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10072 break;
10073 }
10074
10075 if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator,
10076 SLI4_UNREG_TYPE_PORT)) {
10077 ocs_log_err(hw->os, "UNREG_VPI format failure\n");
10078 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10079 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10080 break;
10081 }
10082
10083 if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10084 ocs_log_err(hw->os, "UNREG_VPI command failure\n");
10085 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10086 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10087 break;
10088 }
10089 break;
10090 case OCS_EVT_RESPONSE:
10091 ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10092 break;
10093 case OCS_EVT_ERROR:
10094 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10095 break;
10096 default:
10097 __ocs_hw_port_common(__func__, ctx, evt, data);
10098 break;
10099 }
10100
10101 return NULL;
10102 }
10103
10104 static void *
10105 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10106 {
10107 ocs_sli_port_t *sport = ctx->app;
10108 ocs_hw_t *hw = sport->hw;
10109
10110 smtrace("port");
10111
10112 switch (evt) {
10113 case OCS_EVT_ENTER:
10114 /* Forward to execute in mailbox completion processing context */
10115 if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) {
10116 ocs_log_err(hw->os, "ocs_hw_async_call failed\n");
10117 }
10118 break;
10119 case OCS_EVT_RESPONSE:
10120 ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10121 break;
10122 case OCS_EVT_ERROR:
10123 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10124 break;
10125 default:
10126 break;
10127 }
10128
10129 return NULL;
10130 }
10131
10132 static void *
10133 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10134 {
10135 ocs_sli_port_t *sport = ctx->app;
10136 ocs_hw_t *hw = sport->hw;
10137
10138 smtrace("port");
10139
10140 switch (evt) {
10141 case OCS_EVT_ENTER:
10142 if (data != NULL) {
10143 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10144 }
10145 if (hw->callback.port != NULL) {
10146 hw->callback.port(hw->args.port,
10147 OCS_HW_PORT_ATTACH_OK, sport);
10148 }
10149 if (sport->sm_free_req_pending) {
10150 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10151 }
10152 break;
10153 case OCS_EVT_HW_PORT_REQ_FREE:
10154 /* virtual/physical port request free */
10155 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10156 break;
10157 default:
10158 __ocs_hw_port_common(__func__, ctx, evt, data);
10159 break;
10160 }
10161
10162 return NULL;
10163 }
10164
10165 static void *
10166 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10167 {
10168 ocs_sli_port_t *sport = ctx->app;
10169 ocs_hw_t *hw = sport->hw;
10170
10171 smtrace("port");
10172
10173 switch (evt) {
10174 case OCS_EVT_ENTER:
10175 if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) {
10176 ocs_log_err(hw->os, "REG_VPI format failure\n");
10177 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10178 break;
10179 }
10180
10181 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10182 ocs_log_err(hw->os, "REG_VPI command failure\n");
10183 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10184 break;
10185 }
10186 break;
10187 case OCS_EVT_RESPONSE:
10188 ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10189 break;
10190 case OCS_EVT_ERROR:
10191 ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data);
10192 break;
10193 case OCS_EVT_HW_PORT_REQ_FREE:
10194 /* Wait for attach response and then free */
10195 sport->sm_free_req_pending = 1;
10196 break;
10197 default:
10198 __ocs_hw_port_common(__func__, ctx, evt, data);
10199 break;
10200 }
10201
10202 return NULL;
10203 }
10204
10205 static void *
10206 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10207 {
10208 ocs_sli_port_t *sport = ctx->app;
10209 ocs_hw_t *hw = sport->hw;
10210
10211 smtrace("port");
10212
10213 switch (evt) {
10214 case OCS_EVT_ENTER:
10215 /* free SLI resource */
10216 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10217
10218 /* free mailbox buffer */
10219 if (data != NULL) {
10220 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10221 }
10222 break;
10223 default:
10224 __ocs_hw_port_common(__func__, ctx, evt, data);
10225 break;
10226 }
10227
10228 return NULL;
10229 }
10230
10231 static void *
10232 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10233 {
10234 ocs_sli_port_t *sport = ctx->app;
10235 ocs_hw_t *hw = sport->hw;
10236
10237 smtrace("port");
10238
10239 switch (evt) {
10240 case OCS_EVT_ENTER:
10241 if (data != NULL) {
10242 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10243 }
10244 if (hw->callback.port != NULL) {
10245 hw->callback.port(hw->args.port,
10246 OCS_HW_PORT_ALLOC_OK, sport);
10247 }
10248 /* If there is a pending free request, then handle it now */
10249 if (sport->sm_free_req_pending) {
10250 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10251 }
10252 break;
10253 case OCS_EVT_HW_PORT_REQ_ATTACH:
10254 /* virtual port requests attach */
10255 ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data);
10256 break;
10257 case OCS_EVT_HW_PORT_ATTACH_OK:
10258 /* physical port attached (as part of attaching domain) */
10259 ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10260 break;
10261 case OCS_EVT_HW_PORT_REQ_FREE:
10262 /* virtual port request free */
10263 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
10264 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10265 } else {
10266 /*
10267 * Note: BE3/Skyhawk will respond with a status of 0x20
10268 * unless the reg_vpi has been issued, so we can
10269 * skip the unreg_vpi for these adapters.
10270 *
10271 * Send a nop to make sure that free doesn't occur in
10272 * same context
10273 */
10274 ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL);
10275 }
10276 break;
10277 default:
10278 __ocs_hw_port_common(__func__, ctx, evt, data);
10279 break;
10280 }
10281
10282 return NULL;
10283 }
10284
10285 static void *
10286 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10287 {
10288 ocs_sli_port_t *sport = ctx->app;
10289 ocs_hw_t *hw = sport->hw;
10290
10291 smtrace("port");
10292
10293 switch (evt) {
10294 case OCS_EVT_ENTER:
10295 /* free SLI resource */
10296 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10297
10298 /* free mailbox buffer */
10299 if (data != NULL) {
10300 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10301 }
10302
10303 if (hw->callback.port != NULL) {
10304 hw->callback.port(hw->args.port,
10305 OCS_HW_PORT_ALLOC_FAIL, sport);
10306 }
10307
10308 /* If there is a pending free request, then handle it now */
10309 if (sport->sm_free_req_pending) {
10310 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10311 }
10312 break;
10313 default:
10314 __ocs_hw_port_common(__func__, ctx, evt, data);
10315 break;
10316 }
10317
10318 return NULL;
10319 }
10320
10321 static void *
10322 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10323 {
10324 ocs_sli_port_t *sport = ctx->app;
10325 ocs_hw_t *hw = sport->hw;
10326 uint8_t *payload = NULL;
10327
10328 smtrace("port");
10329
10330 switch (evt) {
10331 case OCS_EVT_ENTER:
10332 /* allocate memory for the service parameters */
10333 if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) {
10334 ocs_log_err(hw->os, "Failed to allocate DMA memory\n");
10335 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10336 break;
10337 }
10338
10339 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10340 &sport->dma, sport->indicator)) {
10341 ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n");
10342 ocs_dma_free(hw->os, &sport->dma);
10343 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10344 break;
10345 }
10346
10347 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10348 ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10349 ocs_dma_free(hw->os, &sport->dma);
10350 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10351 break;
10352 }
10353 break;
10354 case OCS_EVT_RESPONSE:
10355 payload = sport->dma.virt;
10356
10357 ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload);
10358
10359 ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
10360 sizeof(sport->sli_wwpn));
10361 ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
10362 sizeof(sport->sli_wwnn));
10363
10364 ocs_dma_free(hw->os, &sport->dma);
10365 ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data);
10366 break;
10367 case OCS_EVT_ERROR:
10368 ocs_dma_free(hw->os, &sport->dma);
10369 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10370 break;
10371 case OCS_EVT_HW_PORT_REQ_FREE:
10372 /* Wait for attach response and then free */
10373 sport->sm_free_req_pending = 1;
10374 break;
10375 case OCS_EVT_EXIT:
10376 break;
10377 default:
10378 __ocs_hw_port_common(__func__, ctx, evt, data);
10379 break;
10380 }
10381
10382 return NULL;
10383 }
10384
10385 static void *
10386 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10387 {
10388 ocs_sli_port_t *sport = ctx->app;
10389
10390 smtrace("port");
10391
10392 switch (evt) {
10393 case OCS_EVT_ENTER:
10394 /* no-op */
10395 break;
10396 case OCS_EVT_HW_PORT_ALLOC_OK:
10397 ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL);
10398 break;
10399 case OCS_EVT_HW_PORT_ALLOC_FAIL:
10400 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL);
10401 break;
10402 case OCS_EVT_HW_PORT_REQ_FREE:
10403 /* Wait for attach response and then free */
10404 sport->sm_free_req_pending = 1;
10405 break;
10406 default:
10407 __ocs_hw_port_common(__func__, ctx, evt, data);
10408 break;
10409 }
10410
10411 return NULL;
10412 }
10413
10414 static void *
10415 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10416 {
10417 ocs_sli_port_t *sport = ctx->app;
10418 ocs_hw_t *hw = sport->hw;
10419
10420 smtrace("port");
10421
10422 switch (evt) {
10423 case OCS_EVT_ENTER:
10424 /* If there is a pending free request, then handle it now */
10425 if (sport->sm_free_req_pending) {
10426 ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL);
10427 return NULL;
10428 }
10429
10430 /* TODO XXX transitioning to done only works if this is called
10431 * directly from ocs_hw_port_alloc BUT not if called from
10432 * read_sparm64. In the later case, we actually want to go
10433 * through report_ok/fail
10434 */
10435 if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE,
10436 sport->indicator, sport->domain->indicator)) {
10437 ocs_log_err(hw->os, "INIT_VPI allocation failure\n");
10438 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10439 break;
10440 }
10441
10442 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10443 ocs_log_err(hw->os, "INIT_VPI command failure\n");
10444 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10445 break;
10446 }
10447 break;
10448 case OCS_EVT_RESPONSE:
10449 ocs_sm_transition(ctx, __ocs_hw_port_allocated, data);
10450 break;
10451 case OCS_EVT_ERROR:
10452 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10453 break;
10454 case OCS_EVT_HW_PORT_REQ_FREE:
10455 /* Wait for attach response and then free */
10456 sport->sm_free_req_pending = 1;
10457 break;
10458 case OCS_EVT_EXIT:
10459 break;
10460 default:
10461 __ocs_hw_port_common(__func__, ctx, evt, data);
10462 break;
10463 }
10464
10465 return NULL;
10466 }
10467
10468 static int32_t
10469 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10470 {
10471 ocs_sli_port_t *sport = arg;
10472 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
10473 ocs_sm_event_t evt;
10474
10475 if (status || hdr->status) {
10476 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10477 sport->indicator, status, hdr->status);
10478 evt = OCS_EVT_ERROR;
10479 } else {
10480 evt = OCS_EVT_RESPONSE;
10481 }
10482
10483 ocs_sm_post_event(&sport->ctx, evt, mqe);
10484
10485 return 0;
10486 }
10487
10488 static int32_t
10489 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10490 {
10491 ocs_sli_port_t *sport = arg;
10492 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
10493 ocs_sm_event_t evt;
10494 uint8_t *mqecpy;
10495
10496 if (status || hdr->status) {
10497 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10498 sport->indicator, status, hdr->status);
10499 evt = OCS_EVT_ERROR;
10500 } else {
10501 evt = OCS_EVT_RESPONSE;
10502 }
10503
10504 /*
10505 * In this case we have to malloc a mailbox command buffer, as it is reused
10506 * in the state machine post event call, and eventually freed
10507 */
10508 mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10509 if (mqecpy == NULL) {
10510 ocs_log_err(hw->os, "malloc mqecpy failed\n");
10511 return -1;
10512 }
10513 ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE);
10514
10515 ocs_sm_post_event(&sport->ctx, evt, mqecpy);
10516
10517 return 0;
10518 }
10519
10520 /***************************************************************************
10521 * Domain state machine
10522 */
10523
10524 static int32_t
10525 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10526 {
10527 ocs_domain_t *domain = ctx->app;
10528 ocs_hw_t *hw = domain->hw;
10529
10530 smtrace("domain");
10531
10532 switch (evt) {
10533 case OCS_EVT_EXIT:
10534 /* ignore */
10535 break;
10536
10537 default:
10538 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
10539 break;
10540 }
10541
10542 return 0;
10543 }
10544
10545 static void *
10546 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10547 {
10548 ocs_domain_t *domain = ctx->app;
10549 ocs_hw_t *hw = domain->hw;
10550
10551 smtrace("domain");
10552
10553 switch (evt) {
10554 case OCS_EVT_ENTER:
10555 /* free command buffer */
10556 if (data != NULL) {
10557 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10558 }
10559 /* free SLI resources */
10560 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10561 /* TODO how to free FCFI (or do we at all)? */
10562
10563 if (hw->callback.domain != NULL) {
10564 hw->callback.domain(hw->args.domain,
10565 OCS_HW_DOMAIN_ALLOC_FAIL,
10566 domain);
10567 }
10568 break;
10569 default:
10570 __ocs_hw_domain_common(__func__, ctx, evt, data);
10571 break;
10572 }
10573
10574 return NULL;
10575 }
10576
10577 static void *
10578 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10579 {
10580 ocs_domain_t *domain = ctx->app;
10581 ocs_hw_t *hw = domain->hw;
10582
10583 smtrace("domain");
10584
10585 switch (evt) {
10586 case OCS_EVT_ENTER:
10587 /* free mailbox buffer and send alloc ok to physical sport */
10588 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10589 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL);
10590
10591 /* now inform registered callbacks */
10592 if (hw->callback.domain != NULL) {
10593 hw->callback.domain(hw->args.domain,
10594 OCS_HW_DOMAIN_ATTACH_OK,
10595 domain);
10596 }
10597 break;
10598 case OCS_EVT_HW_DOMAIN_REQ_FREE:
10599 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10600 break;
10601 default:
10602 __ocs_hw_domain_common(__func__, ctx, evt, data);
10603 break;
10604 }
10605
10606 return NULL;
10607 }
10608
10609 static void *
10610 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10611 {
10612 ocs_domain_t *domain = ctx->app;
10613 ocs_hw_t *hw = domain->hw;
10614
10615 smtrace("domain");
10616
10617 switch (evt) {
10618 case OCS_EVT_ENTER:
10619 if (data != NULL) {
10620 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10621 }
10622 /* free SLI resources */
10623 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10624 /* TODO how to free FCFI (or do we at all)? */
10625
10626 if (hw->callback.domain != NULL) {
10627 hw->callback.domain(hw->args.domain,
10628 OCS_HW_DOMAIN_ATTACH_FAIL,
10629 domain);
10630 }
10631 break;
10632 case OCS_EVT_EXIT:
10633 break;
10634 default:
10635 __ocs_hw_domain_common(__func__, ctx, evt, data);
10636 break;
10637 }
10638
10639 return NULL;
10640 }
10641
10642 static void *
10643 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10644 {
10645 ocs_domain_t *domain = ctx->app;
10646 ocs_hw_t *hw = domain->hw;
10647
10648 smtrace("domain");
10649
10650 switch (evt) {
10651 case OCS_EVT_ENTER:
10652
10653 ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt);
10654
10655 if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) {
10656 ocs_log_err(hw->os, "REG_VFI format failure\n");
10657 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10658 break;
10659 }
10660
10661 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10662 ocs_log_err(hw->os, "REG_VFI command failure\n");
10663 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10664 break;
10665 }
10666 break;
10667 case OCS_EVT_RESPONSE:
10668 ocs_sm_transition(ctx, __ocs_hw_domain_attached, data);
10669 break;
10670 case OCS_EVT_ERROR:
10671 ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data);
10672 break;
10673 default:
10674 __ocs_hw_domain_common(__func__, ctx, evt, data);
10675 break;
10676 }
10677
10678 return NULL;
10679 }
10680
10681 static void *
10682 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10683 {
10684 ocs_domain_t *domain = ctx->app;
10685 ocs_hw_t *hw = domain->hw;
10686
10687 smtrace("domain");
10688
10689 switch (evt) {
10690 case OCS_EVT_ENTER:
10691 /* free mailbox buffer and send alloc ok to physical sport */
10692 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10693 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL);
10694
10695 ocs_hw_domain_add(hw, domain);
10696
10697 /* now inform registered callbacks */
10698 if (hw->callback.domain != NULL) {
10699 hw->callback.domain(hw->args.domain,
10700 OCS_HW_DOMAIN_ALLOC_OK,
10701 domain);
10702 }
10703 break;
10704 case OCS_EVT_HW_DOMAIN_REQ_ATTACH:
10705 ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data);
10706 break;
10707 case OCS_EVT_HW_DOMAIN_REQ_FREE:
10708 /* unreg_fcfi/vfi */
10709 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10710 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL);
10711 } else {
10712 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10713 }
10714 break;
10715 default:
10716 __ocs_hw_domain_common(__func__, ctx, evt, data);
10717 break;
10718 }
10719
10720 return NULL;
10721 }
10722
10723 static void *
10724 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10725 {
10726 ocs_domain_t *domain = ctx->app;
10727 ocs_hw_t *hw = domain->hw;
10728
10729 smtrace("domain");
10730
10731 switch (evt) {
10732 case OCS_EVT_ENTER:
10733 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10734 &domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) {
10735 ocs_log_err(hw->os, "READ_SPARM64 format failure\n");
10736 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10737 break;
10738 }
10739
10740 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10741 ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10742 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10743 break;
10744 }
10745 break;
10746 case OCS_EVT_EXIT:
10747 break;
10748 case OCS_EVT_RESPONSE:
10749 ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt);
10750
10751 ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data);
10752 break;
10753 case OCS_EVT_ERROR:
10754 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10755 break;
10756 default:
10757 __ocs_hw_domain_common(__func__, ctx, evt, data);
10758 break;
10759 }
10760
10761 return NULL;
10762 }
10763
10764 static void *
10765 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10766 {
10767 ocs_domain_t *domain = ctx->app;
10768 ocs_sli_port_t *sport = domain->sport;
10769 ocs_hw_t *hw = domain->hw;
10770
10771 smtrace("domain");
10772
10773 switch (evt) {
10774 case OCS_EVT_ENTER:
10775 if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator,
10776 domain->fcf_indicator, sport->indicator)) {
10777 ocs_log_err(hw->os, "INIT_VFI format failure\n");
10778 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10779 break;
10780 }
10781 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10782 ocs_log_err(hw->os, "INIT_VFI command failure\n");
10783 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10784 break;
10785 }
10786 break;
10787 case OCS_EVT_EXIT:
10788 break;
10789 case OCS_EVT_RESPONSE:
10790 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10791 break;
10792 case OCS_EVT_ERROR:
10793 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10794 break;
10795 default:
10796 __ocs_hw_domain_common(__func__, ctx, evt, data);
10797 break;
10798 }
10799
10800 return NULL;
10801 }
10802
10803 static void *
10804 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10805 {
10806 ocs_domain_t *domain = ctx->app;
10807 ocs_hw_t *hw = domain->hw;
10808
10809 smtrace("domain");
10810
10811 switch (evt) {
10812 case OCS_EVT_ENTER: {
10813 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
10814 uint32_t i;
10815
10816 /* Set the filter match/mask values from hw's filter_def values */
10817 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
10818 rq_cfg[i].rq_id = 0xffff;
10819 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
10820 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
10821 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
10822 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
10823 }
10824
10825 /* Set the rq_id for each, in order of RQ definition */
10826 for (i = 0; i < hw->hw_rq_count; i++) {
10827 if (i >= ARRAY_SIZE(rq_cfg)) {
10828 ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n");
10829 break;
10830 }
10831 rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id;
10832 }
10833
10834 if (!data) {
10835 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10836 break;
10837 }
10838
10839 if (hw->hw_mrq_count) {
10840 if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE,
10841 domain->vlan_id, domain->fcf)) {
10842 ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n");
10843 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10844 break;
10845 }
10846
10847 } else {
10848 if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf,
10849 rq_cfg, domain->vlan_id)) {
10850 ocs_log_err(hw->os, "REG_FCFI format failure\n");
10851 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10852 break;
10853 }
10854 }
10855
10856 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10857 ocs_log_err(hw->os, "REG_FCFI command failure\n");
10858 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10859 break;
10860 }
10861 break;
10862 }
10863 case OCS_EVT_EXIT:
10864 break;
10865 case OCS_EVT_RESPONSE:
10866 if (!data) {
10867 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10868 break;
10869 }
10870
10871 domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi;
10872
10873 /*
10874 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization
10875 * and instead rely on implicit initialization during VFI registration.
10876 * Short circuit normal processing here for those devices.
10877 */
10878 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10879 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10880 } else {
10881 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data);
10882 }
10883 break;
10884 case OCS_EVT_ERROR:
10885 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10886 break;
10887 default:
10888 __ocs_hw_domain_common(__func__, ctx, evt, data);
10889 break;
10890 }
10891
10892 return NULL;
10893 }
10894
10895 static void *
10896 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10897 {
10898 ocs_domain_t *domain = ctx->app;
10899 ocs_hw_t *hw = domain->hw;
10900
10901 smtrace("domain");
10902
10903 switch (evt) {
10904 case OCS_EVT_ENTER:
10905 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
10906 /*
10907 * For FC, the HW alread registered a FCFI
10908 * Copy FCF information into the domain and jump to INIT_VFI
10909 */
10910 domain->fcf_indicator = hw->fcf_indicator;
10911 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data);
10912 } else {
10913 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data);
10914 }
10915 break;
10916 default:
10917 __ocs_hw_domain_common(__func__, ctx, evt, data);
10918 break;
10919 }
10920
10921 return NULL;
10922 }
10923
10924 static void *
10925 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10926 {
10927 ocs_domain_t *domain = ctx->app;
10928
10929 smtrace("domain");
10930
10931 switch (evt) {
10932 case OCS_EVT_ENTER:
10933 if (domain != NULL) {
10934 ocs_hw_t *hw = domain->hw;
10935
10936 ocs_hw_domain_del(hw, domain);
10937
10938 if (hw->callback.domain != NULL) {
10939 hw->callback.domain(hw->args.domain,
10940 OCS_HW_DOMAIN_FREE_FAIL,
10941 domain);
10942 }
10943 }
10944
10945 /* free command buffer */
10946 if (data != NULL) {
10947 ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE);
10948 }
10949 break;
10950 case OCS_EVT_EXIT:
10951 break;
10952 default:
10953 __ocs_hw_domain_common(__func__, ctx, evt, data);
10954 break;
10955 }
10956
10957 return NULL;
10958 }
10959
10960 static void *
10961 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10962 {
10963 ocs_domain_t *domain = ctx->app;
10964
10965 smtrace("domain");
10966
10967 switch (evt) {
10968 case OCS_EVT_ENTER:
10969 /* Free DMA and mailbox buffer */
10970 if (domain != NULL) {
10971 ocs_hw_t *hw = domain->hw;
10972
10973 /* free VFI resource */
10974 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI,
10975 domain->indicator);
10976
10977 ocs_hw_domain_del(hw, domain);
10978
10979 /* inform registered callbacks */
10980 if (hw->callback.domain != NULL) {
10981 hw->callback.domain(hw->args.domain,
10982 OCS_HW_DOMAIN_FREE_OK,
10983 domain);
10984 }
10985 }
10986 if (data != NULL) {
10987 ocs_free(NULL, data, SLI4_BMBX_SIZE);
10988 }
10989 break;
10990 case OCS_EVT_EXIT:
10991 break;
10992 default:
10993 __ocs_hw_domain_common(__func__, ctx, evt, data);
10994 break;
10995 }
10996
10997 return NULL;
10998 }
10999
11000 static void *
11001 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11002 {
11003 ocs_domain_t *domain = ctx->app;
11004 ocs_hw_t *hw = domain->hw;
11005
11006 smtrace("domain");
11007
11008 switch (evt) {
11009 case OCS_EVT_ENTER:
11010 /* if we're in the middle of a teardown, skip sending rediscover */
11011 if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) {
11012 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11013 break;
11014 }
11015 if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) {
11016 ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n");
11017 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11018 break;
11019 }
11020
11021 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11022 ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n");
11023 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11024 }
11025 break;
11026 case OCS_EVT_RESPONSE:
11027 case OCS_EVT_ERROR:
11028 /* REDISCOVER_FCF can fail if none exist */
11029 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11030 break;
11031 case OCS_EVT_EXIT:
11032 break;
11033 default:
11034 __ocs_hw_domain_common(__func__, ctx, evt, data);
11035 break;
11036 }
11037
11038 return NULL;
11039 }
11040
11041 static void *
11042 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11043 {
11044 ocs_domain_t *domain = ctx->app;
11045 ocs_hw_t *hw = domain->hw;
11046
11047 smtrace("domain");
11048
11049 switch (evt) {
11050 case OCS_EVT_ENTER:
11051 if (data == NULL) {
11052 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11053 if (!data) {
11054 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11055 break;
11056 }
11057 }
11058
11059 if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) {
11060 ocs_log_err(hw->os, "UNREG_FCFI format failure\n");
11061 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11062 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11063 break;
11064 }
11065
11066 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11067 ocs_log_err(hw->os, "UNREG_FCFI command failure\n");
11068 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11069 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11070 break;
11071 }
11072 break;
11073 case OCS_EVT_RESPONSE:
11074 if (domain->req_rediscover_fcf) {
11075 domain->req_rediscover_fcf = FALSE;
11076 ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data);
11077 } else {
11078 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11079 }
11080 break;
11081 case OCS_EVT_ERROR:
11082 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11083 break;
11084 case OCS_EVT_EXIT:
11085 break;
11086 default:
11087 __ocs_hw_domain_common(__func__, ctx, evt, data);
11088 break;
11089 }
11090
11091 return NULL;
11092 }
11093
11094 static void *
11095 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11096 {
11097 ocs_domain_t *domain = ctx->app;
11098 ocs_hw_t *hw = domain->hw;
11099 uint8_t is_fc = FALSE;
11100
11101 smtrace("domain");
11102
11103 is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC);
11104
11105 switch (evt) {
11106 case OCS_EVT_ENTER:
11107 if (data == NULL) {
11108 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11109 if (!data) {
11110 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11111 break;
11112 }
11113 }
11114
11115 if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain,
11116 SLI4_UNREG_TYPE_DOMAIN)) {
11117 ocs_log_err(hw->os, "UNREG_VFI format failure\n");
11118 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11119 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11120 break;
11121 }
11122
11123 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11124 ocs_log_err(hw->os, "UNREG_VFI command failure\n");
11125 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11126 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11127 break;
11128 }
11129 break;
11130 case OCS_EVT_ERROR:
11131 if (is_fc) {
11132 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11133 } else {
11134 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11135 }
11136 break;
11137 case OCS_EVT_RESPONSE:
11138 if (is_fc) {
11139 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11140 } else {
11141 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11142 }
11143 break;
11144 default:
11145 __ocs_hw_domain_common(__func__, ctx, evt, data);
11146 break;
11147 }
11148
11149 return NULL;
11150 }
11151
11152 /* callback for domain alloc/attach/free */
11153 static int32_t
11154 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11155 {
11156 ocs_domain_t *domain = arg;
11157 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
11158 ocs_sm_event_t evt;
11159
11160 if (status || hdr->status) {
11161 ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n",
11162 domain->indicator, status, hdr->status);
11163 evt = OCS_EVT_ERROR;
11164 } else {
11165 evt = OCS_EVT_RESPONSE;
11166 }
11167
11168 ocs_sm_post_event(&domain->sm, evt, mqe);
11169
11170 return 0;
11171 }
11172
11173 static int32_t
11174 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11175 {
11176 ocs_hw_io_t *io = NULL;
11177 ocs_hw_io_t *io_next = NULL;
11178 uint64_t ticks_current = ocs_get_os_ticks();
11179 uint32_t sec_elapsed;
11180 ocs_hw_rtn_e rc;
11181
11182 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
11183
11184 if (status || hdr->status) {
11185 ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n",
11186 status, hdr->status);
11187 /* go ahead and proceed with wqe timer checks... */
11188 }
11189
11190 /* loop through active WQE list and check for timeouts */
11191 ocs_lock(&hw->io_lock);
11192 ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
11193 sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
11194
11195 /*
11196 * If elapsed time > timeout, abort it. No need to check type since
11197 * it wouldn't be on this list unless it was a target WQE
11198 */
11199 if (sec_elapsed > io->tgt_wqe_timeout) {
11200 ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
11201 io->indicator, io->reqtag, io->type);
11202
11203 /* remove from active_wqe list so won't try to abort again */
11204 ocs_list_remove(&hw->io_timed_wqe, io);
11205
11206 /* save status of "timed out" for when abort completes */
11207 io->status_saved = 1;
11208 io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
11209 io->saved_ext = 0;
11210 io->saved_len = 0;
11211
11212 /* now abort outstanding IO */
11213 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
11214 if (rc) {
11215 ocs_log_test(hw->os,
11216 "abort failed xri=%#x tag=%#x rc=%d\n",
11217 io->indicator, io->reqtag, rc);
11218 }
11219 }
11220 /*
11221 * need to go through entire list since each IO could have a
11222 * different timeout value
11223 */
11224 }
11225 ocs_unlock(&hw->io_lock);
11226
11227 /* if we're not in the middle of shutting down, schedule next timer */
11228 if (!hw->active_wqe_timer_shutdown) {
11229 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS);
11230 }
11231 hw->in_active_wqe_timer = FALSE;
11232 return 0;
11233 }
11234
11235 static void
11236 target_wqe_timer_cb(void *arg)
11237 {
11238 ocs_hw_t *hw = (ocs_hw_t *)arg;
11239
11240 /* delete existing timer; will kick off new timer after checking wqe timeouts */
11241 hw->in_active_wqe_timer = TRUE;
11242 ocs_del_timer(&hw->wqe_timer);
11243
11244 /* Forward timer callback to execute in the mailbox completion processing context */
11245 if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) {
11246 ocs_log_test(hw->os, "ocs_hw_async_call failed\n");
11247 }
11248 }
11249
11250 static void
11251 shutdown_target_wqe_timer(ocs_hw_t *hw)
11252 {
11253 uint32_t iters = 100;
11254
11255 if (hw->config.emulate_tgt_wqe_timeout) {
11256 /* request active wqe timer shutdown, then wait for it to complete */
11257 hw->active_wqe_timer_shutdown = TRUE;
11258
11259 /* delete WQE timer and wait for timer handler to complete (if necessary) */
11260 ocs_del_timer(&hw->wqe_timer);
11261
11262 /* now wait for timer handler to complete (if necessary) */
11263 while (hw->in_active_wqe_timer && iters) {
11264 /*
11265 * if we happen to have just sent NOP mailbox command, make sure
11266 * completions are being processed
11267 */
11268 ocs_hw_flush(hw);
11269 iters--;
11270 }
11271
11272 if (iters == 0) {
11273 ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n");
11274 }
11275 }
11276 }
11277
11278 /**
11279 * @brief Determine if HW IO is owned by the port.
11280 *
11281 * @par Description
11282 * Determines if the given HW IO has been posted to the chip.
11283 *
11284 * @param hw Hardware context allocated by the caller.
11285 * @param io HW IO.
11286 *
11287 * @return Returns TRUE if given HW IO is port-owned.
11288 */
11289 uint8_t
11290 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
11291 {
11292 /* Check to see if this is a port owned XRI */
11293 return io->is_port_owned;
11294 }
11295
11296 /**
11297 * @brief Return TRUE if exchange is port-owned.
11298 *
11299 * @par Description
11300 * Test to see if the xri is a port-owned xri.
11301 *
11302 * @param hw Hardware context.
11303 * @param xri Exchange indicator.
11304 *
11305 * @return Returns TRUE if XRI is a port owned XRI.
11306 */
11307
11308 uint8_t
11309 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri)
11310 {
11311 ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri);
11312 return (io == NULL ? FALSE : io->is_port_owned);
11313 }
11314
11315 /**
11316 * @brief Returns an XRI from the port owned list to the host.
11317 *
11318 * @par Description
11319 * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes.
11320 *
11321 * @param hw Hardware context.
11322 * @param xri_base The starting XRI number.
11323 * @param xri_count The number of XRIs to free from the base.
11324 */
11325 static void
11326 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count)
11327 {
11328 ocs_hw_io_t *io;
11329 uint32_t i;
11330
11331 for (i = 0; i < xri_count; i++) {
11332 io = ocs_hw_io_lookup(hw, xri_base + i);
11333
11334 /*
11335 * if this is an auto xfer rdy XRI, then we need to release any
11336 * buffer attached to the XRI before moving the XRI back to the free pool.
11337 */
11338 if (hw->auto_xfer_rdy_enabled) {
11339 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io);
11340 }
11341
11342 ocs_lock(&hw->io_lock);
11343 ocs_list_remove(&hw->io_port_owned, io);
11344 io->is_port_owned = 0;
11345 ocs_list_add_tail(&hw->io_free, io);
11346 ocs_unlock(&hw->io_lock);
11347 }
11348 }
11349
11350 /**
11351 * @brief Called when the POST_XRI command completes.
11352 *
11353 * @par Description
11354 * Free the mailbox command buffer and reclaim the XRIs on failure.
11355 *
11356 * @param hw Hardware context.
11357 * @param status Status field from the mbox completion.
11358 * @param mqe Mailbox response structure.
11359 * @param arg Pointer to a callback function that signals the caller that the command is done.
11360 *
11361 * @return Returns 0.
11362 */
11363 static int32_t
11364 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11365 {
11366 sli4_cmd_post_xri_t *post_xri = (sli4_cmd_post_xri_t*)mqe;
11367
11368 /* Reclaim the XRIs as host owned if the command fails */
11369 if (status != 0) {
11370 ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n",
11371 status, post_xri->xri_base, post_xri->xri_count);
11372 ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count);
11373 }
11374
11375 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11376 return 0;
11377 }
11378
11379 /**
11380 * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port.
11381 *
11382 * @param hw Hardware context.
11383 * @param xri_start The starting XRI to post.
11384 * @param num_to_post The number of XRIs to post.
11385 *
11386 * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS.
11387 */
11388
11389 static ocs_hw_rtn_e
11390 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post)
11391 {
11392 uint8_t *post_xri;
11393 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11394
11395 /* Since we need to allocate for mailbox queue, just always allocate */
11396 post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11397 if (post_xri == NULL) {
11398 ocs_log_err(hw->os, "no buffer for command\n");
11399 return OCS_HW_RTN_NO_MEMORY;
11400 }
11401
11402 /* Register the XRIs */
11403 if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE,
11404 xri_start, num_to_post)) {
11405 rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL);
11406 if (rc != OCS_HW_RTN_SUCCESS) {
11407 ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE);
11408 ocs_log_err(hw->os, "post_xri failed\n");
11409 }
11410 }
11411 return rc;
11412 }
11413
11414 /**
11415 * @brief Move XRIs from the host-controlled pool to the port.
11416 *
11417 * @par Description
11418 * Removes IOs from the free list and moves them to the port.
11419 *
11420 * @param hw Hardware context.
11421 * @param num_xri The number of XRIs being requested to move to the chip.
11422 *
11423 * @return Returns the number of XRIs that were moved.
11424 */
11425
11426 uint32_t
11427 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri)
11428 {
11429 ocs_hw_io_t *io;
11430 uint32_t i;
11431 uint32_t num_posted = 0;
11432
11433 /*
11434 * Note: We cannot use ocs_hw_io_alloc() because that would place the
11435 * IO on the io_inuse list. We need to move from the io_free to
11436 * the io_port_owned list.
11437 */
11438 ocs_lock(&hw->io_lock);
11439
11440 for (i = 0; i < num_xri; i++) {
11441 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
11442 ocs_hw_rtn_e rc;
11443
11444 /*
11445 * if this is an auto xfer rdy XRI, then we need to attach a
11446 * buffer to the XRI before submitting it to the chip. If a
11447 * buffer is unavailable, then we cannot post it, so return it
11448 * to the free pool.
11449 */
11450 if (hw->auto_xfer_rdy_enabled) {
11451 /* Note: uses the IO lock to get the auto xfer rdy buffer */
11452 ocs_unlock(&hw->io_lock);
11453 rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io);
11454 ocs_lock(&hw->io_lock);
11455 if (rc != OCS_HW_RTN_SUCCESS) {
11456 ocs_list_add_head(&hw->io_free, io);
11457 break;
11458 }
11459 }
11460 ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator);
11461 io->is_port_owned = 1;
11462 ocs_list_add_tail(&hw->io_port_owned, io);
11463
11464 /* Post XRI */
11465 if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) {
11466 ocs_hw_reclaim_xri(hw, io->indicator, i);
11467 break;
11468 }
11469 num_posted++;
11470 } else {
11471 /* no more free XRIs */
11472 break;
11473 }
11474 }
11475 ocs_unlock(&hw->io_lock);
11476
11477 return num_posted;
11478 }
11479
11480 /**
11481 * @brief Called when the RELEASE_XRI command completes.
11482 *
11483 * @par Description
11484 * Move the IOs back to the free pool on success.
11485 *
11486 * @param hw Hardware context.
11487 * @param status Status field from the mbox completion.
11488 * @param mqe Mailbox response structure.
11489 * @param arg Pointer to a callback function that signals the caller that the command is done.
11490 *
11491 * @return Returns 0.
11492 */
11493 static int32_t
11494 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11495 {
11496 sli4_cmd_release_xri_t *release_xri = (sli4_cmd_release_xri_t*)mqe;
11497 uint8_t i;
11498
11499 /* Reclaim the XRIs as host owned if the command fails */
11500 if (status != 0) {
11501 ocs_log_err(hw->os, "Status 0x%x\n", status);
11502 } else {
11503 for (i = 0; i < release_xri->released_xri_count; i++) {
11504 uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 :
11505 release_xri->xri_tbl[i/2].xri_tag1);
11506 ocs_hw_reclaim_xri(hw, xri, 1);
11507 }
11508 }
11509
11510 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11511 return 0;
11512 }
11513
11514 /**
11515 * @brief Move XRIs from the port-controlled pool to the host.
11516 *
11517 * Requests XRIs from the FW to return to the host-owned pool.
11518 *
11519 * @param hw Hardware context.
11520 * @param num_xri The number of XRIs being requested to moved from the chip.
11521 *
11522 * @return Returns 0 for success, or a negative error code value for failure.
11523 */
11524
11525 ocs_hw_rtn_e
11526 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri)
11527 {
11528 uint8_t *release_xri;
11529 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11530
11531 /* non-local buffer required for mailbox queue */
11532 release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11533 if (release_xri == NULL) {
11534 ocs_log_err(hw->os, "no buffer for command\n");
11535 return OCS_HW_RTN_NO_MEMORY;
11536 }
11537
11538 /* release the XRIs */
11539 if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) {
11540 rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL);
11541 if (rc != OCS_HW_RTN_SUCCESS) {
11542 ocs_log_err(hw->os, "release_xri failed\n");
11543 }
11544 }
11545 /* If we are polling or an error occurred, then free the mailbox buffer */
11546 if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) {
11547 ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE);
11548 }
11549 return rc;
11550 }
11551
11552 /**
11553 * @brief Allocate an ocs_hw_rx_buffer_t array.
11554 *
11555 * @par Description
11556 * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory.
11557 *
11558 * @param hw Pointer to HW object.
11559 * @param rqindex RQ index for this buffer.
11560 * @param count Count of buffers in array.
11561 * @param size Size of buffer.
11562 *
11563 * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array.
11564 */
11565 static ocs_hw_rq_buffer_t *
11566 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size)
11567 {
11568 ocs_t *ocs = hw->os;
11569 ocs_hw_rq_buffer_t *rq_buf = NULL;
11570 ocs_hw_rq_buffer_t *prq;
11571 uint32_t i;
11572
11573 if (count != 0) {
11574 rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO);
11575 if (rq_buf == NULL) {
11576 ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n");
11577 return NULL;
11578 }
11579
11580 for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
11581 prq->rqindex = rqindex;
11582 if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) {
11583 ocs_log_err(hw->os, "DMA allocation failed\n");
11584 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11585 rq_buf = NULL;
11586 break;
11587 }
11588 }
11589 }
11590 return rq_buf;
11591 }
11592
11593 /**
11594 * @brief Free an ocs_hw_rx_buffer_t array.
11595 *
11596 * @par Description
11597 * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory.
11598 *
11599 * @param hw Pointer to HW object.
11600 * @param rq_buf Pointer to ocs_hw_rx_buffer_t array.
11601 * @param count Count of buffers in array.
11602 *
11603 * @return None.
11604 */
11605 static void
11606 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count)
11607 {
11608 ocs_t *ocs = hw->os;
11609 uint32_t i;
11610 ocs_hw_rq_buffer_t *prq;
11611
11612 if (rq_buf != NULL) {
11613 for (i = 0, prq = rq_buf; i < count; i++, prq++) {
11614 ocs_dma_free(ocs, &prq->dma);
11615 }
11616 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11617 }
11618 }
11619
11620 /**
11621 * @brief Allocate the RQ data buffers.
11622 *
11623 * @param hw Pointer to HW object.
11624 *
11625 * @return Returns 0 on success, or a non-zero value on failure.
11626 */
11627 ocs_hw_rtn_e
11628 ocs_hw_rx_allocate(ocs_hw_t *hw)
11629 {
11630 ocs_t *ocs = hw->os;
11631 uint32_t i;
11632 int32_t rc = OCS_HW_RTN_SUCCESS;
11633 uint32_t rqindex = 0;
11634 hw_rq_t *rq;
11635 uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR;
11636 uint32_t payload_size = hw->config.rq_default_buffer_size;
11637
11638 rqindex = 0;
11639
11640 for (i = 0; i < hw->hw_rq_count; i++) {
11641 rq = hw->hw_rq[i];
11642
11643 /* Allocate header buffers */
11644 rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size);
11645 if (rq->hdr_buf == NULL) {
11646 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n");
11647 rc = OCS_HW_RTN_ERROR;
11648 break;
11649 }
11650
11651 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header %4d by %4d bytes\n", i, rq->hdr->id,
11652 rq->entry_count, hdr_size);
11653
11654 rqindex++;
11655
11656 /* Allocate payload buffers */
11657 rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size);
11658 if (rq->payload_buf == NULL) {
11659 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n");
11660 rc = OCS_HW_RTN_ERROR;
11661 break;
11662 }
11663 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id,
11664 rq->entry_count, payload_size);
11665 rqindex++;
11666 }
11667
11668 return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
11669 }
11670
11671 /**
11672 * @brief Post the RQ data buffers to the chip.
11673 *
11674 * @param hw Pointer to HW object.
11675 *
11676 * @return Returns 0 on success, or a non-zero value on failure.
11677 */
11678 ocs_hw_rtn_e
11679 ocs_hw_rx_post(ocs_hw_t *hw)
11680 {
11681 uint32_t i;
11682 uint32_t idx;
11683 uint32_t rq_idx;
11684 int32_t rc = 0;
11685
11686 /*
11687 * In RQ pair mode, we MUST post the header and payload buffer at the
11688 * same time.
11689 */
11690 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
11691 hw_rq_t *rq = hw->hw_rq[rq_idx];
11692
11693 for (i = 0; i < rq->entry_count-1; i++) {
11694 ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++);
11695 ocs_hw_assert(seq != NULL);
11696
11697 seq->header = &rq->hdr_buf[i];
11698
11699 seq->payload = &rq->payload_buf[i];
11700
11701 rc = ocs_hw_sequence_free(hw, seq);
11702 if (rc) {
11703 break;
11704 }
11705 }
11706 if (rc) {
11707 break;
11708 }
11709 }
11710
11711 return rc;
11712 }
11713
11714 /**
11715 * @brief Free the RQ data buffers.
11716 *
11717 * @param hw Pointer to HW object.
11718 *
11719 */
11720 void
11721 ocs_hw_rx_free(ocs_hw_t *hw)
11722 {
11723 hw_rq_t *rq;
11724 uint32_t i;
11725
11726 /* Free hw_rq buffers */
11727 for (i = 0; i < hw->hw_rq_count; i++) {
11728 rq = hw->hw_rq[i];
11729 if (rq != NULL) {
11730 ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count);
11731 rq->hdr_buf = NULL;
11732 ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count);
11733 rq->payload_buf = NULL;
11734 }
11735 }
11736 }
11737
11738 /**
11739 * @brief HW async call context structure.
11740 */
11741 typedef struct {
11742 ocs_hw_async_cb_t callback;
11743 void *arg;
11744 uint8_t cmd[SLI4_BMBX_SIZE];
11745 } ocs_hw_async_call_ctx_t;
11746
11747 /**
11748 * @brief HW async callback handler
11749 *
11750 * @par Description
11751 * This function is called when the NOP mailbox command completes. The callback stored
11752 * in the requesting context is invoked.
11753 *
11754 * @param hw Pointer to HW object.
11755 * @param status Completion status.
11756 * @param mqe Pointer to mailbox completion queue entry.
11757 * @param arg Caller-provided argument.
11758 *
11759 * @return None.
11760 */
11761 static void
11762 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11763 {
11764 ocs_hw_async_call_ctx_t *ctx = arg;
11765
11766 if (ctx != NULL) {
11767 if (ctx->callback != NULL) {
11768 (*ctx->callback)(hw, status, mqe, ctx->arg);
11769 }
11770 ocs_free(hw->os, ctx, sizeof(*ctx));
11771 }
11772 }
11773
11774 /**
11775 * @brief Make an async callback using NOP mailbox command
11776 *
11777 * @par Description
11778 * Post a NOP mailbox command; the callback with argument is invoked upon completion
11779 * while in the event processing context.
11780 *
11781 * @param hw Pointer to HW object.
11782 * @param callback Pointer to callback function.
11783 * @param arg Caller-provided callback.
11784 *
11785 * @return Returns 0 on success, or a negative error code value on failure.
11786 */
11787 int32_t
11788 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg)
11789 {
11790 ocs_hw_async_call_ctx_t *ctx;
11791
11792 /*
11793 * Allocate a callback context (which includes the mailbox command buffer), we need
11794 * this to be persistent as the mailbox command submission may be queued and executed later
11795 * execution.
11796 */
11797 ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT);
11798 if (ctx == NULL) {
11799 ocs_log_err(hw->os, "failed to malloc async call context\n");
11800 return OCS_HW_RTN_NO_MEMORY;
11801 }
11802 ctx->callback = callback;
11803 ctx->arg = arg;
11804
11805 /* Build and send a NOP mailbox command */
11806 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) {
11807 ocs_log_err(hw->os, "COMMON_NOP format failure\n");
11808 ocs_free(hw->os, ctx, sizeof(*ctx));
11809 return OCS_HW_RTN_ERROR;
11810 }
11811
11812 if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) {
11813 ocs_log_err(hw->os, "COMMON_NOP command failure\n");
11814 ocs_free(hw->os, ctx, sizeof(*ctx));
11815 return OCS_HW_RTN_ERROR;
11816 }
11817 return OCS_HW_RTN_SUCCESS;
11818 }
11819
11820 /**
11821 * @brief Initialize the reqtag pool.
11822 *
11823 * @par Description
11824 * The WQ request tag pool is initialized.
11825 *
11826 * @param hw Pointer to HW object.
11827 *
11828 * @return Returns 0 on success, or a negative error code value on failure.
11829 */
11830 ocs_hw_rtn_e
11831 ocs_hw_reqtag_init(ocs_hw_t *hw)
11832 {
11833 if (hw->wq_reqtag_pool == NULL) {
11834 hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE);
11835 if (hw->wq_reqtag_pool == NULL) {
11836 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n");
11837 return OCS_HW_RTN_NO_MEMORY;
11838 }
11839 }
11840 ocs_hw_reqtag_reset(hw);
11841 return OCS_HW_RTN_SUCCESS;
11842 }
11843
11844 /**
11845 * @brief Allocate a WQ request tag.
11846 *
11847 * Allocate and populate a WQ request tag from the WQ request tag pool.
11848 *
11849 * @param hw Pointer to HW object.
11850 * @param callback Callback function.
11851 * @param arg Pointer to callback argument.
11852 *
11853 * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated.
11854 */
11855 hw_wq_callback_t *
11856 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
11857 {
11858 hw_wq_callback_t *wqcb;
11859
11860 ocs_hw_assert(callback != NULL);
11861
11862 wqcb = ocs_pool_get(hw->wq_reqtag_pool);
11863 if (wqcb != NULL) {
11864 ocs_hw_assert(wqcb->callback == NULL);
11865 wqcb->callback = callback;
11866 wqcb->arg = arg;
11867 }
11868 return wqcb;
11869 }
11870
11871 /**
11872 * @brief Free a WQ request tag.
11873 *
11874 * Free the passed in WQ request tag.
11875 *
11876 * @param hw Pointer to HW object.
11877 * @param wqcb Pointer to WQ request tag object to free.
11878 *
11879 * @return None.
11880 */
11881 void
11882 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb)
11883 {
11884 ocs_hw_assert(wqcb->callback != NULL);
11885 wqcb->callback = NULL;
11886 wqcb->arg = NULL;
11887 ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11888 }
11889
11890 /**
11891 * @brief Return WQ request tag by index.
11892 *
11893 * @par Description
11894 * Return pointer to WQ request tag object given an index.
11895 *
11896 * @param hw Pointer to HW object.
11897 * @param instance_index Index of WQ request tag to return.
11898 *
11899 * @return Pointer to WQ request tag, or NULL.
11900 */
11901 hw_wq_callback_t *
11902 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index)
11903 {
11904 hw_wq_callback_t *wqcb;
11905
11906 wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index);
11907 if (wqcb == NULL) {
11908 ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index);
11909 }
11910 return wqcb;
11911 }
11912
11913 /**
11914 * @brief Reset the WQ request tag pool.
11915 *
11916 * @par Description
11917 * Reset the WQ request tag pool, returning all to the free list.
11918 *
11919 * @param hw pointer to HW object.
11920 *
11921 * @return None.
11922 */
11923 void
11924 ocs_hw_reqtag_reset(ocs_hw_t *hw)
11925 {
11926 hw_wq_callback_t *wqcb;
11927 uint32_t i;
11928
11929 /* Remove all from freelist */
11930 while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) {
11931 ;
11932 }
11933
11934 /* Put them all back */
11935 for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) {
11936 wqcb->instance_index = i;
11937 wqcb->callback = NULL;
11938 wqcb->arg = NULL;
11939 ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11940 }
11941 }
11942
11943 /**
11944 * @brief Handle HW assertion
11945 *
11946 * HW assert, display diagnostic message, and abort.
11947 *
11948 * @param cond string describing failing assertion condition
11949 * @param filename file name
11950 * @param linenum line number
11951 *
11952 * @return none
11953 */
11954 void
11955 _ocs_hw_assert(const char *cond, const char *filename, int linenum)
11956 {
11957 ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond);
11958 ocs_abort();
11959 /* no return */
11960 }
11961
11962 /**
11963 * @brief Handle HW verify
11964 *
11965 * HW verify, display diagnostic message, dump stack and return.
11966 *
11967 * @param cond string describing failing verify condition
11968 * @param filename file name
11969 * @param linenum line number
11970 *
11971 * @return none
11972 */
11973 void
11974 _ocs_hw_verify(const char *cond, const char *filename, int linenum)
11975 {
11976 ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond);
11977 ocs_print_stack();
11978 }
11979
11980 /**
11981 * @brief Reque XRI
11982 *
11983 * @par Description
11984 * Reque XRI
11985 *
11986 * @param hw Pointer to HW object.
11987 * @param io Pointer to HW IO
11988 *
11989 * @return Return 0 if successful else returns -1
11990 */
11991 int32_t
11992 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io )
11993 {
11994 int32_t rc = 0;
11995
11996 rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1);
11997 if (rc) {
11998 ocs_list_add_tail(&hw->io_port_dnrx, io);
11999 rc = -1;
12000 goto exit_ocs_hw_reque_xri;
12001 }
12002
12003 io->auto_xfer_rdy_dnrx = 0;
12004 io->type = OCS_HW_IO_DNRX_REQUEUE;
12005 if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) {
12006 /* Clear buffer from XRI */
12007 ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
12008 io->axr_buf = NULL;
12009
12010 ocs_log_err(hw->os, "requeue_xri WQE error\n");
12011 ocs_list_add_tail(&hw->io_port_dnrx, io);
12012
12013 rc = -1;
12014 goto exit_ocs_hw_reque_xri;
12015 }
12016
12017 if (io->wq == NULL) {
12018 io->wq = ocs_hw_queue_next_wq(hw, io);
12019 ocs_hw_assert(io->wq != NULL);
12020 }
12021
12022 /*
12023 * Add IO to active io wqe list before submitting, in case the
12024 * wcqe processing preempts this thread.
12025 */
12026 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
12027 OCS_STAT(io->wq->use_count++);
12028
12029 rc = hw_wq_write(io->wq, &io->wqe);
12030 if (rc < 0) {
12031 ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc);
12032 rc = -1;
12033 }
12034
12035 exit_ocs_hw_reque_xri:
12036 return 0;
12037 }
12038
12039 uint32_t
12040 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn)
12041 {
12042 sli4_t *sli4 = &ocs->hw.sli;
12043 ocs_dma_t dma;
12044 uint8_t *payload = NULL;
12045
12046 int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan;
12047
12048 /* allocate memory for the service parameters */
12049 if (ocs_dma_alloc(ocs, &dma, 112, 4)) {
12050 ocs_log_err(ocs, "Failed to allocate DMA memory\n");
12051 return 1;
12052 }
12053
12054 if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
12055 &dma, indicator)) {
12056 ocs_log_err(ocs, "READ_SPARM64 allocation failure\n");
12057 ocs_dma_free(ocs, &dma);
12058 return 1;
12059 }
12060
12061 if (sli_bmbx_command(sli4)) {
12062 ocs_log_err(ocs, "READ_SPARM64 command failure\n");
12063 ocs_dma_free(ocs, &dma);
12064 return 1;
12065 }
12066
12067 payload = dma.virt;
12068 ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn));
12069 ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn));
12070 ocs_dma_free(ocs, &dma);
12071 return 0;
12072 }
12073
12074 uint32_t
12075 ocs_hw_get_config_persistent_topology(ocs_hw_t *hw)
12076 {
12077 uint32_t topology = OCS_HW_TOPOLOGY_AUTO;
12078 sli4_t *sli = &hw->sli;
12079
12080 if (!sli_persist_topology_enabled(sli))
12081 return topology;
12082
12083 switch (sli->config.pt) {
12084 case SLI4_INIT_LINK_F_P2P_ONLY:
12085 topology = OCS_HW_TOPOLOGY_NPORT;
12086 break;
12087 case SLI4_INIT_LINK_F_FCAL_ONLY:
12088 topology = OCS_HW_TOPOLOGY_LOOP;
12089 break;
12090 default:
12091 break;
12092 }
12093
12094 return topology;
12095 }
12096
12097 /*
12098 * @brief Persistent topology configuration callback argument.
12099 */
12100 typedef struct ocs_hw_persistent_topo_cb_arg {
12101 ocs_sem_t semaphore;
12102 int32_t status;
12103 } ocs_hw_persistent_topo_cb_arg_t;
12104
12105 /*
12106 * @brief Called after the completion of set persistent topology request
12107 *
12108 * @par Description
12109 * This is callback fn for the set_persistent_topology
12110 * function. This callback is called when the common feature mbx cmd
12111 * completes.
12112 *
12113 * @param hw Hardware context.
12114 * @param status The status from the MQE.
12115 * @param mqe Pointer to mailbox command buffer.
12116 * @param arg Pointer to a callback argument.
12117 *
12118 * @return 0 on success, non-zero otherwise
12119 */
12120 static int32_t
12121 ocs_hw_set_persistent_topolgy_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
12122 {
12123 ocs_hw_persistent_topo_cb_arg_t *req = (ocs_hw_persistent_topo_cb_arg_t *)arg;
12124
12125 req->status = status;
12126
12127 ocs_sem_v(&req->semaphore);
12128
12129 return 0;
12130 }
12131
12132 /**
12133 * @brief Set persistent topology
12134 *
12135 * Sets the persistent topology(PT) feature using
12136 * COMMON_SET_FEATURES cmd. If mbx cmd succeeds, update the
12137 * topology into sli config. PT stores the value to be set into link_flags
12138 * of the cmd INIT_LINK, to bring up the link.
12139 *
12140 * SLI specs defines following for PT:
12141 * When TF is set to 0:
12142 * 0 Reserved
12143 * 1 Attempt point-to-point initialization (direct attach or Fabric topology).
12144 * 2 Attempt FC-AL loop initialization.
12145 * 3 Reserved
12146 *
12147 * When TF is set to 1:
12148 * 0 Attempt FC-AL loop initialization; if it fails, attempt point-to-point initialization.
12149 * 1 Attempt point-to-point initialization; if it fails, attempt FC-AL loop initialization.
12150 * 2 Reserved
12151 * 3 Reserved
12152 *
12153 * Note: Topology failover is only available on Lancer G5. This command will fail
12154 * if TF is set to 1 on any other ASICs
12155 *
12156 * @param hw Pointer to hw
12157 * @param topology topology value to be set, provided through
12158 * elxsdkutil set-topology cmd
12159 *
12160 * @return Returns 0 on success, or a non-zero value on failure.
12161 */
12162 ocs_hw_rtn_e
12163 ocs_hw_set_persistent_topology(ocs_hw_t *hw, uint32_t topology, uint32_t opts)
12164 {
12165 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
12166 uint8_t buf[SLI4_BMBX_SIZE];
12167 sli4_req_common_set_features_persistent_topo_param_t param;
12168 ocs_hw_persistent_topo_cb_arg_t request;
12169
12170 ocs_memset(¶m, 0, sizeof(param));
12171 param.persistent_topo = topology;
12172
12173 switch (topology) {
12174 case OCS_HW_TOPOLOGY_AUTO:
12175 if (sli_get_asic_type(&hw->sli) == SLI4_ASIC_TYPE_LANCER) {
12176 param.persistent_topo = SLI4_INIT_LINK_F_P2P_FAIL_OVER;
12177 param.topo_failover = 1;
12178 } else {
12179 param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY;;
12180 param.topo_failover = 0;
12181 }
12182 break;
12183
12184 case OCS_HW_TOPOLOGY_NPORT:
12185 param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY;
12186 param.topo_failover = 0;
12187 break;
12188
12189 case OCS_HW_TOPOLOGY_LOOP:
12190 param.persistent_topo = SLI4_INIT_LINK_F_FCAL_ONLY;
12191 param.topo_failover = 0;
12192 break;
12193
12194 default:
12195 ocs_log_err(hw->os, "unsupported topology %#x\n", topology);
12196 return -1;
12197 }
12198
12199 ocs_sem_init(&request.semaphore, 0, "set_persistent_topo");
12200
12201 /* build the set_features command */
12202 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
12203 SLI4_SET_FEATURES_PERSISTENT_TOPOLOGY, sizeof(param), ¶m);
12204
12205 if (opts == OCS_CMD_POLL) {
12206 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
12207 if (rc) {
12208 ocs_log_err(hw->os, "Failed to set persistent topology, rc: %#x\n", rc);
12209 return rc;
12210 }
12211 } else {
12212
12213 // there's no response for this feature command
12214 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_set_persistent_topolgy_cb, &request);
12215 if (rc) {
12216 ocs_log_err(hw->os, "Failed to set persistent topology, rc: %#x\n", rc);
12217 return rc;
12218 }
12219
12220 if (ocs_sem_p(&request.semaphore, OCS_SEM_FOREVER)) {
12221 ocs_log_err(hw->os, "ocs_sem_p failed\n");
12222 return -ENXIO;
12223 }
12224
12225 if (request.status) {
12226 ocs_log_err(hw->os, "set persistent topology failed; status: %d\n", request.status);
12227 return -EFAULT;
12228 }
12229 }
12230
12231 sli_config_persistent_topology(&hw->sli, ¶m);
12232
12233 return rc;
12234 }
12235
12236 /**
12237 * @page fc_hw_api_overview HW APIs
12238 * - @ref devInitShutdown
12239 * - @ref domain
12240 * - @ref port
12241 * - @ref node
12242 * - @ref io
12243 * - @ref interrupt
12244 *
12245 * <div class="overview">
12246 * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4
12247 * message details, but the higher level code must still manage domains, ports,
12248 * IT nexuses, and IOs. The HW API is designed to help the higher level manage
12249 * these objects.<br><br>
12250 *
12251 * The HW uses function callbacks to notify the higher-level code of events
12252 * that are received from the chip. There are currently three types of
12253 * functions that may be registered:
12254 *
12255 * <ul><li>domain – This function is called whenever a domain event is generated
12256 * within the HW. Examples include a new FCF is discovered, a connection
12257 * to a domain is disrupted, and allocation callbacks.</li>
12258 * <li>unsolicited – This function is called whenever new data is received in
12259 * the SLI-4 receive queue.</li>
12260 * <li>rnode – This function is called for remote node events, such as attach status
12261 * and allocation callbacks.</li></ul>
12262 *
12263 * Upper layer functions may be registered by using the ocs_hw_callback() function.
12264 *
12265 * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/>
12266 * <h2>FC/FCoE HW API</h2>
12267 * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible
12268 * interface for creating the necessary common objects and sending I/Os. It may be used
12269 * “as is” in customer implementations or it can serve as an example of typical interactions
12270 * between a driver and the SLI-4 hardware. The broad categories of functionality include:
12271 *
12272 * <ul><li>Setting-up and tearing-down of the HW.</li>
12273 * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li>
12274 * <li>Sending and receiving I/Os.</li></ul>
12275 *
12276 * <h3>HW Setup</h3>
12277 * To set up the HW:
12278 *
12279 * <ol>
12280 * <li>Set up the HW object using ocs_hw_setup().<br>
12281 * This step performs a basic configuration of the SLI-4 component and the HW to
12282 * enable querying the hardware for its capabilities. At this stage, the HW is not
12283 * capable of general operations (such as, receiving events or sending I/Os).</li><br><br>
12284 * <li>Configure the HW according to the driver requirements.<br>
12285 * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as
12286 * well as configures the amount of resources required (ocs_hw_set()). The driver
12287 * must also register callback functions (ocs_hw_callback()) to receive notification of
12288 * various asynchronous events.<br><br>
12289 * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This
12290 * step creates the underlying queues, commits resources to the hardware, and
12291 * prepares the hardware for operation. While the hardware is operational, the
12292 * port is not online, and cannot send or receive data.</li><br><br>
12293 * <br><br>
12294 * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br>
12295 * When the link comes up, the HW determines if a domain is present and notifies the
12296 * driver using the domain callback function. This is the starting point of the driver's
12297 * interaction with the common objects.<br><br>
12298 * @b Note: For FCoE, there may be more than one domain available and, therefore,
12299 * more than one callback.</li>
12300 * </ol>
12301 *
12302 * <h3>Allocating and Using Common Objects</h3>
12303 * Common objects provide a mechanism through which the various OneCore Storage
12304 * driver components share and track information. These data structures are primarily
12305 * used to track SLI component information but can be extended by other components, if
12306 * needed. The main objects are:
12307 *
12308 * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct
12309 * memory access (DMA) transactions.</li>
12310 * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including
12311 * any infrastructure devices such as FC switches and FC forwarders. The domain
12312 * object contains both an FCFI and a VFI.</li>
12313 * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between
12314 * the driver and the SCSI domain. The SLI Port object contains a VPI.</li>
12315 * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI
12316 * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul>
12317 *
12318 * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote
12319 * node common objects and establish the connections between them. The goal is to
12320 * connect the driver to the SCSI domain to exchange I/Os with other devices. These
12321 * common object connections are shown in the following figure, FC Driver Common Objects:
12322 * <img src="elx_fc_common_objects.jpg"
12323 * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/>
12324 *
12325 * The first step is to create a connection to the domain by allocating an SLI Port object.
12326 * The SLI Port object represents a particular FC ID and must be initialized with one. With
12327 * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying
12328 * a domain, the driver allocates a domain object and attaches to it using the previous SLI
12329 * port object.<br><br>
12330 *
12331 * @b Note: In some cases, the driver may need to negotiate service parameters (that is,
12332 * FLOGI) with the domain before attaching.<br><br>
12333 *
12334 * Once attached to the domain, the driver can discover and attach to other devices
12335 * (remote nodes). The exact discovery method depends on the driver, but it typically
12336 * includes using a position map, querying the fabric name server, or an out-of-band
12337 * method. In most cases, it is necessary to log in with devices before performing I/Os.
12338 * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must
12339 * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is
12340 * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port
12341 * before exchanging FCP I/O.<br><br>
12342 *
12343 * @b Note: The HW manages both the well known fabric address and the name server as
12344 * nodes in the domain. Therefore, the driver must allocate node objects prior to
12345 * communicating with either of these entities.
12346 *
12347 * <h3>Sending and Receiving I/Os</h3>
12348 * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the
12349 * commands are conceptually similar. Since the commands complete asynchronously,
12350 * the caller must provide a HW I/O object that maintains the I/O state, as well as
12351 * provide a callback function. The driver may use the same callback function for all I/O
12352 * operations, but each operation must use a unique HW I/O object. In the SLI-4
12353 * architecture, there is a direct association between the HW I/O object and the SGL used
12354 * to describe the data. Therefore, a driver typically performs the following operations:
12355 *
12356 * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li>
12357 * <li>Formats the SGL, specifying both the HW I/O object and the SGL.
12358 * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li>
12359 * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul>
12360 *
12361 * <h3>HW Tear Down</h3>
12362 * To tear-down the HW:
12363 *
12364 * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further
12365 * data andevents.</li>
12366 * <li>Destroy the HW object (ocs_hw_teardown()).</li>
12367 * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol>
12368 * <br>
12369 * </div><!-- overview -->
12370 *
12371 */
12372
12373 /**
12374 * This contains all hw runtime workaround code. Based on the asic type,
12375 * asic revision, and range of fw revisions, a particular workaround may be enabled.
12376 *
12377 * A workaround may consist of overriding a particular HW/SLI4 value that was initialized
12378 * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue
12379 * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to
12380 * control specific runtime behavior.
12381 *
12382 * It is intended that the controls in ocs_hw_workaround_t be defined functionally. So we
12383 * would have the driver look like: "if (hw->workaround.enable_xxx) then ...", rather than
12384 * what we might previously see as "if this is a BE3, then do xxx"
12385 *
12386 */
12387
12388 #define HW_FWREV_ZERO (0ull)
12389 #define HW_FWREV_MAX (~0ull)
12390
12391 #define SLI4_ASIC_TYPE_ANY 0
12392 #define SLI4_ASIC_REV_ANY 0
12393
12394 /**
12395 * @brief Internal definition of workarounds
12396 */
12397
12398 typedef enum {
12399 HW_WORKAROUND_TEST = 1,
12400 HW_WORKAROUND_MAX_QUEUE, /**< Limits all queues */
12401 HW_WORKAROUND_MAX_RQ, /**< Limits only the RQ */
12402 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH,
12403 HW_WORKAROUND_WQE_COUNT_METHOD,
12404 HW_WORKAROUND_RQE_COUNT_METHOD,
12405 HW_WORKAROUND_USE_UNREGISTERD_RPI,
12406 HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */
12407 HW_WORKAROUND_DISABLE_SET_DUMP_LOC,
12408 HW_WORKAROUND_USE_DIF_QUARANTINE,
12409 HW_WORKAROUND_USE_DIF_SEC_XRI, /**< Use secondary xri for multiple data phases */
12410 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, /**< FCFI reported in SRB not correct, use "first" registered domain */
12411 HW_WORKAROUND_FW_VERSION_TOO_LOW, /**< The FW version is not the min version supported by this driver */
12412 HW_WORKAROUND_SGLC_MISREPORTED, /**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */
12413 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, /**< Don't use SEND_FRAME capable if FW version is too old */
12414 } hw_workaround_e;
12415
12416 /**
12417 * @brief Internal workaround structure instance
12418 */
12419
12420 typedef struct {
12421 sli4_asic_type_e asic_type;
12422 sli4_asic_rev_e asic_rev;
12423 uint64_t fwrev_low;
12424 uint64_t fwrev_high;
12425
12426 hw_workaround_e workaround;
12427 uint32_t value;
12428 } hw_workaround_t;
12429
12430 static hw_workaround_t hw_workarounds[] = {
12431 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12432 HW_WORKAROUND_TEST, 999},
12433
12434 /* Bug: 127585: if_type == 2 returns 0 for total length placed on
12435 * FCP_TSEND64_WQE completions. Note, original driver code enables this
12436 * workaround for all asic types
12437 */
12438 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12439 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0},
12440
12441 /* Bug: unknown, Lancer A0 has mis-reported max queue depth */
12442 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX,
12443 HW_WORKAROUND_MAX_QUEUE, 2048},
12444
12445 /* Bug: 143399, BE3 has mis-reported max RQ queue depth */
12446 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0),
12447 HW_WORKAROUND_MAX_RQ, 2048},
12448
12449 /* Bug: 143399, skyhawk has mis-reported max RQ queue depth */
12450 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0),
12451 HW_WORKAROUND_MAX_RQ, 2048},
12452
12453 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */
12454 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12455 HW_WORKAROUND_WQE_COUNT_METHOD, 1},
12456
12457 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */
12458 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12459 HW_WORKAROUND_RQE_COUNT_METHOD, 1},
12460
12461 /* Bug: 142968, BE3 UE with RPI == 0xffff */
12462 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12463 HW_WORKAROUND_USE_UNREGISTERD_RPI, 0},
12464
12465 /* Bug: unknown, Skyhawk won't support auto-response on target T10-PI */
12466 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12467 HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0},
12468
12469 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0),
12470 HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0},
12471
12472 /* Bug: 160124, Skyhawk quarantine DIF XRIs */
12473 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12474 HW_WORKAROUND_USE_DIF_QUARANTINE, 0},
12475
12476 /* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */
12477 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12478 HW_WORKAROUND_USE_DIF_SEC_XRI, 0},
12479
12480 /* Bug: xxxxxx, FCFI reported in SRB not corrrect */
12481 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12482 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0},
12483 #if 0
12484 /* Bug: 165642, FW version check for driver */
12485 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER),
12486 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12487 #endif
12488 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK),
12489 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12490
12491 /* Bug 177061, Lancer FW does not set the SGLC bit */
12492 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12493 HW_WORKAROUND_SGLC_MISREPORTED, 0},
12494
12495 /* BZ 181208/183914, enable this workaround for ALL revisions */
12496 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12497 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0},
12498 };
12499
12500 /**
12501 * @brief Function prototypes
12502 */
12503
12504 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w);
12505
12506 /**
12507 * @brief Parse the firmware version (name)
12508 *
12509 * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined
12510 * by the HW_FWREV() macro
12511 *
12512 * @param fwrev_string pointer to the firmware string
12513 *
12514 * @return packed firmware revision value
12515 */
12516
12517 static uint64_t
12518 parse_fw_version(const char *fwrev_string)
12519 {
12520 int v[4] = {0};
12521 const char *p;
12522 int i;
12523
12524 for (p = fwrev_string, i = 0; *p && (i < 4); i ++) {
12525 v[i] = ocs_strtoul(p, 0, 0);
12526 while(*p && *p != '.') {
12527 p ++;
12528 }
12529 if (*p) {
12530 p ++;
12531 }
12532 }
12533
12534 /* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */
12535 if (v[2] == 9999) {
12536 return HW_FWREV_MAX;
12537 } else {
12538 return HW_FWREV(v[0], v[1], v[2], v[3]);
12539 }
12540 }
12541
12542 /**
12543 * @brief Test for a workaround match
12544 *
12545 * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match.
12546 *
12547 * @param hw Pointer to the HW structure
12548 * @param w Pointer to a workaround structure entry
12549 *
12550 * @return Return TRUE for a match
12551 */
12552
12553 static int32_t
12554 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w)
12555 {
12556 return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) &&
12557 ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) &&
12558 (w->fwrev_low <= hw->workaround.fwrev) &&
12559 ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high)));
12560 }
12561
12562 /**
12563 * @brief Setup HW runtime workarounds
12564 *
12565 * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds
12566 * based on the HW/SLI setup.
12567 *
12568 * @param hw Pointer to HW structure
12569 *
12570 * @return none
12571 */
12572
12573 void
12574 ocs_hw_workaround_setup(struct ocs_hw_s *hw)
12575 {
12576 hw_workaround_t *w;
12577 sli4_t *sli4 = &hw->sli;
12578 uint32_t i;
12579
12580 /* Initialize the workaround settings */
12581 ocs_memset(&hw->workaround, 0, sizeof(hw->workaround));
12582
12583 /* If hw_war_version is non-null, then its a value that was set by a module parameter
12584 * (sorry for the break in abstraction, but workarounds are ... well, workarounds)
12585 */
12586
12587 if (hw->hw_war_version) {
12588 hw->workaround.fwrev = parse_fw_version(hw->hw_war_version);
12589 } else {
12590 hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]);
12591 }
12592
12593 /* Walk the workaround list, if a match is found, then handle it */
12594 for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) {
12595 if (ocs_hw_workaround_match(hw, w)) {
12596 switch(w->workaround) {
12597 case HW_WORKAROUND_TEST: {
12598 ocs_log_debug(hw->os, "Override: test: %d\n", w->value);
12599 break;
12600 }
12601
12602 case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: {
12603 ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n");
12604 hw->workaround.retain_tsend_io_length = 1;
12605 break;
12606 }
12607 case HW_WORKAROUND_MAX_QUEUE: {
12608 sli4_qtype_e q;
12609
12610 ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value);
12611 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
12612 if (hw->num_qentries[q] > w->value) {
12613 hw->num_qentries[q] = w->value;
12614 }
12615 }
12616 break;
12617 }
12618 case HW_WORKAROUND_MAX_RQ: {
12619 ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value);
12620 if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) {
12621 hw->num_qentries[SLI_QTYPE_RQ] = w->value;
12622 }
12623 break;
12624 }
12625 case HW_WORKAROUND_WQE_COUNT_METHOD: {
12626 ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value);
12627 sli4->config.count_method[SLI_QTYPE_WQ] = w->value;
12628 sli_calc_max_qentries(sli4);
12629 break;
12630 }
12631 case HW_WORKAROUND_RQE_COUNT_METHOD: {
12632 ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value);
12633 sli4->config.count_method[SLI_QTYPE_RQ] = w->value;
12634 sli_calc_max_qentries(sli4);
12635 break;
12636 }
12637 case HW_WORKAROUND_USE_UNREGISTERD_RPI:
12638 ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n");
12639 hw->workaround.use_unregistered_rpi = TRUE;
12640 /*
12641 * Allocate an RPI that is never registered, to be used in the case where
12642 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF
12643 */
12644 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid,
12645 &hw->workaround.unregistered_index)) {
12646 ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n");
12647 hw->workaround.use_unregistered_rpi = FALSE;
12648 }
12649 break;
12650 case HW_WORKAROUND_DISABLE_AR_TGT_DIF:
12651 ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n");
12652 hw->workaround.disable_ar_tgt_dif = TRUE;
12653 break;
12654 case HW_WORKAROUND_DISABLE_SET_DUMP_LOC:
12655 ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n");
12656 hw->workaround.disable_dump_loc = TRUE;
12657 break;
12658 case HW_WORKAROUND_USE_DIF_QUARANTINE:
12659 ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n");
12660 hw->workaround.use_dif_quarantine = TRUE;
12661 break;
12662 case HW_WORKAROUND_USE_DIF_SEC_XRI:
12663 ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n");
12664 hw->workaround.use_dif_sec_xri = TRUE;
12665 break;
12666 case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB:
12667 ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n");
12668 hw->workaround.override_fcfi = TRUE;
12669 break;
12670
12671 case HW_WORKAROUND_FW_VERSION_TOO_LOW:
12672 ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n");
12673 hw->workaround.fw_version_too_low = TRUE;
12674 break;
12675 case HW_WORKAROUND_SGLC_MISREPORTED:
12676 ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n");
12677 hw->workaround.sglc_misreported = TRUE;
12678 break;
12679 case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE:
12680 ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n");
12681 hw->workaround.ignore_send_frame = TRUE;
12682 break;
12683 } /* switch(w->workaround) */
12684 }
12685 }
12686 }
Cache object: 3485fe66ba778d310497cf9a32068bb7
|