1 /*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34 /**
35 * @file
36 * Defines and implements the Hardware Abstraction Layer (HW).
37 * All interaction with the hardware is performed through the HW, which abstracts
38 * the details of the underlying SLI-4 implementation.
39 */
40
41 /**
42 * @defgroup devInitShutdown Device Initialization and Shutdown
43 * @defgroup domain Domain Functions
44 * @defgroup port Port Functions
45 * @defgroup node Remote Node Functions
46 * @defgroup io IO Functions
47 * @defgroup interrupt Interrupt handling
48 * @defgroup os OS Required Functions
49 */
50
51 #include "ocs.h"
52 #include "ocs_os.h"
53 #include "ocs_hw.h"
54 #include "ocs_hw_queues.h"
55
56 #define OCS_HW_MQ_DEPTH 128
57 #define OCS_HW_READ_FCF_SIZE 4096
58 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256
59 #define OCS_HW_WQ_TIMER_PERIOD_MS 500
60
61 /* values used for setting the auto xfer rdy parameters */
62 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */
63 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE
64 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE
65 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0
66 #define OCS_HW_REQUE_XRI_REGTAG 65534
67 /* max command and response buffer lengths -- arbitrary at the moment */
68 #define OCS_HW_DMTF_CLP_CMD_MAX 256
69 #define OCS_HW_DMTF_CLP_RSP_MAX 256
70
71 /* HW global data */
72 ocs_hw_global_t hw_global;
73
74 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
75 static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
76 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
77 static int32_t ocs_hw_cb_link(void *, void *);
78 static int32_t ocs_hw_cb_fip(void *, void *);
79 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
80 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
81 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
82 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
83 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
84 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
85 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
86 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
87 static int32_t ocs_hw_flush(ocs_hw_t *);
88 static int32_t ocs_hw_command_cancel(ocs_hw_t *);
89 static int32_t ocs_hw_io_cancel(ocs_hw_t *);
90 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
91 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
92 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
93 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
94 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *);
95 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *);
96 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *);
97 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *);
98 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
99 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
100 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
101 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
102 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
103 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
104
105 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
106 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
107 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
108 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
109 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
110 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
111 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
112 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
113 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
114 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
115 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
116 static void ocs_hw_io_free_internal(void *arg);
117 static void ocs_hw_io_free_port_owned(void *arg);
118 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
119 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
120 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
121 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
122 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
123 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
124
125 /* HW domain database operations */
126 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
127 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
128
129 /* Port state machine */
130 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
131 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
132 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
133 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
134 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
135
136 /* Domain state machine */
137 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
138 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
139 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
140 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
141 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
142 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
143 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
144 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
145
146 /* BZ 161832 */
147 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
148
149 /* WQE timeouts */
150 static void target_wqe_timer_cb(void *arg);
151 static void shutdown_target_wqe_timer(ocs_hw_t *hw);
152
153 static inline void
154 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
155 {
156 if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) {
157 /*
158 * Active WQE list currently only used for
159 * target WQE timeouts.
160 */
161 ocs_lock(&hw->io_lock);
162 ocs_list_add_tail(&hw->io_timed_wqe, io);
163 io->submit_ticks = ocs_get_os_ticks();
164 ocs_unlock(&hw->io_lock);
165 }
166 }
167
168 static inline void
169 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
170 {
171 if (hw->config.emulate_tgt_wqe_timeout) {
172 /*
173 * If target wqe timeouts are enabled,
174 * remove from active wqe list.
175 */
176 ocs_lock(&hw->io_lock);
177 if (ocs_list_on_list(&io->wqe_link)) {
178 ocs_list_remove(&hw->io_timed_wqe, io);
179 }
180 ocs_unlock(&hw->io_lock);
181 }
182 }
183
184 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
185 {
186 switch (io_type) {
187 case OCS_HW_IO_INITIATOR_READ:
188 case OCS_HW_IO_INITIATOR_WRITE:
189 case OCS_HW_IO_INITIATOR_NODATA:
190 case OCS_HW_FC_CT:
191 case OCS_HW_ELS_REQ:
192 return 1;
193 default:
194 return 0;
195 }
196 }
197
198 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
199 {
200 /* if exchange not active, nothing to abort */
201 if (!xb) {
202 return FALSE;
203 }
204 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
205 switch (ext) {
206 /* exceptions where abort is not needed */
207 case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
208 case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
209 return FALSE;
210 default:
211 break;
212 }
213 }
214 return TRUE;
215 }
216
217 /**
218 * @brief Determine the number of chutes on the device.
219 *
220 * @par Description
221 * Some devices require queue resources allocated per protocol processor
222 * (chute). This function returns the number of chutes on this device.
223 *
224 * @param hw Hardware context allocated by the caller.
225 *
226 * @return Returns the number of chutes on the device for protocol.
227 */
228 static uint32_t
229 ocs_hw_get_num_chutes(ocs_hw_t *hw)
230 {
231 uint32_t num_chutes = 1;
232
233 if (sli_get_is_dual_ulp_capable(&hw->sli) &&
234 sli_get_is_ulp_enabled(&hw->sli, 0) &&
235 sli_get_is_ulp_enabled(&hw->sli, 1)) {
236 num_chutes = 2;
237 }
238 return num_chutes;
239 }
240
241 static ocs_hw_rtn_e
242 ocs_hw_link_event_init(ocs_hw_t *hw)
243 {
244 ocs_hw_assert(hw);
245
246 hw->link.status = SLI_LINK_STATUS_MAX;
247 hw->link.topology = SLI_LINK_TOPO_NONE;
248 hw->link.medium = SLI_LINK_MEDIUM_MAX;
249 hw->link.speed = 0;
250 hw->link.loop_map = NULL;
251 hw->link.fc_id = UINT32_MAX;
252
253 return OCS_HW_RTN_SUCCESS;
254 }
255
256 /**
257 * @ingroup devInitShutdown
258 * @brief If this is physical port 0, then read the max dump size.
259 *
260 * @par Description
261 * Queries the FW for the maximum dump size
262 *
263 * @param hw Hardware context allocated by the caller.
264 *
265 * @return Returns 0 on success, or a non-zero value on failure.
266 */
267 static ocs_hw_rtn_e
268 ocs_hw_read_max_dump_size(ocs_hw_t *hw)
269 {
270 uint8_t buf[SLI4_BMBX_SIZE];
271 uint8_t bus, dev, func;
272 int rc;
273
274 /* lancer only */
275 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
276 ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
277 return OCS_HW_RTN_ERROR;
278 }
279
280 /*
281 * Make sure the FW is new enough to support this command. If the FW
282 * is too old, the FW will UE.
283 */
284 if (hw->workaround.disable_dump_loc) {
285 ocs_log_test(hw->os, "FW version is too old for this feature\n");
286 return OCS_HW_RTN_ERROR;
287 }
288
289 /* attempt to detemine the dump size for function 0 only. */
290 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
291 if (func == 0) {
292 if (sli_cmd_common_set_dump_location(&hw->sli, buf,
293 SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
294 sli4_res_common_set_dump_location_t *rsp =
295 (sli4_res_common_set_dump_location_t *)
296 (buf + offsetof(sli4_cmd_sli_config_t,
297 payload.embed));
298
299 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
300 if (rc != OCS_HW_RTN_SUCCESS) {
301 ocs_log_test(hw->os, "set dump location command failed\n");
302 return rc;
303 } else {
304 hw->dump_size = rsp->buffer_length;
305 ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
306 }
307 }
308 }
309 return OCS_HW_RTN_SUCCESS;
310 }
311
312 /**
313 * @ingroup devInitShutdown
314 * @brief Set up the Hardware Abstraction Layer module.
315 *
316 * @par Description
317 * Calls set up to configure the hardware.
318 *
319 * @param hw Hardware context allocated by the caller.
320 * @param os Device abstraction.
321 * @param port_type Protocol type of port, such as FC and NIC.
322 *
323 * @todo Why is port_type a parameter?
324 *
325 * @return Returns 0 on success, or a non-zero value on failure.
326 */
327 ocs_hw_rtn_e
328 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
329 {
330 uint32_t i;
331 char prop_buf[32];
332
333 if (hw == NULL) {
334 ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
335 return OCS_HW_RTN_ERROR;
336 }
337
338 if (hw->hw_setup_called) {
339 /* Setup run-time workarounds.
340 * Call for each setup, to allow for hw_war_version
341 */
342 ocs_hw_workaround_setup(hw);
343 return OCS_HW_RTN_SUCCESS;
344 }
345
346 /*
347 * ocs_hw_init() relies on NULL pointers indicating that a structure
348 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
349 * free/realloc that memory
350 */
351 ocs_memset(hw, 0, sizeof(ocs_hw_t));
352
353 hw->hw_setup_called = TRUE;
354
355 hw->os = os;
356
357 ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
358 ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
359 ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
360 hw->cmd_head_count = 0;
361
362 ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
363 ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
364
365 ocs_atomic_init(&hw->io_alloc_failed_count, 0);
366
367 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
368 hw->config.dif_seed = 0;
369 hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
370 hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
371 hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
372 hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
373
374 if (sli_setup(&hw->sli, hw->os, port_type)) {
375 ocs_log_err(hw->os, "SLI setup failed\n");
376 return OCS_HW_RTN_ERROR;
377 }
378
379 ocs_memset(hw->domains, 0, sizeof(hw->domains));
380
381 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
382
383 ocs_hw_link_event_init(hw);
384
385 sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
386 sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
387
388 /*
389 * Set all the queue sizes to the maximum allowed. These values may
390 * be changes later by the adjust and workaround functions.
391 */
392 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
393 hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
394 }
395
396 /*
397 * The RQ assignment for RQ pair mode.
398 */
399 hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
400 hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
401 if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
402 hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
403 }
404
405 /* by default, enable initiator-only auto-ABTS emulation */
406 hw->config.i_only_aab = TRUE;
407
408 /* Setup run-time workarounds */
409 ocs_hw_workaround_setup(hw);
410
411 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
412 if (hw->workaround.override_fcfi) {
413 hw->first_domain_idx = -1;
414 }
415
416 /* Must be done after the workaround setup */
417 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
418 (void)ocs_hw_read_max_dump_size(hw);
419 }
420
421 /* calculate the number of WQs required. */
422 ocs_hw_adjust_wqs(hw);
423
424 /* Set the default dif mode */
425 if (! sli_is_dif_inline_capable(&hw->sli)) {
426 ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
427 hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
428 }
429 /* Workaround: BZ 161832 */
430 if (hw->workaround.use_dif_sec_xri) {
431 ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
432 }
433
434 /*
435 * Figure out the starting and max ULP to spread the WQs across the
436 * ULPs.
437 */
438 if (sli_get_is_dual_ulp_capable(&hw->sli)) {
439 if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
440 sli_get_is_ulp_enabled(&hw->sli, 1)) {
441 hw->ulp_start = 0;
442 hw->ulp_max = 1;
443 } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
444 hw->ulp_start = 0;
445 hw->ulp_max = 0;
446 } else {
447 hw->ulp_start = 1;
448 hw->ulp_max = 1;
449 }
450 } else {
451 if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
452 hw->ulp_start = 0;
453 hw->ulp_max = 0;
454 } else {
455 hw->ulp_start = 1;
456 hw->ulp_max = 1;
457 }
458 }
459 ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
460 hw->ulp_start, hw->ulp_max);
461 hw->config.queue_topology = hw_global.queue_topology_string;
462
463 hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
464
465 hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
466 hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
467 hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
468 hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
469 hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
470
471 /* Verify qtop configuration against driver supported configuration */
472 if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
473 ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
474 OCE_HW_MAX_NUM_MRQ_PAIRS);
475 return OCS_HW_RTN_ERROR;
476 }
477
478 if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
479 ocs_log_crit(hw->os, "Max supported EQs = %d\n",
480 OCS_HW_MAX_NUM_EQ);
481 return OCS_HW_RTN_ERROR;
482 }
483
484 if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
485 ocs_log_crit(hw->os, "Max supported CQs = %d\n",
486 OCS_HW_MAX_NUM_CQ);
487 return OCS_HW_RTN_ERROR;
488 }
489
490 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
491 ocs_log_crit(hw->os, "Max supported WQs = %d\n",
492 OCS_HW_MAX_NUM_WQ);
493 return OCS_HW_RTN_ERROR;
494 }
495
496 if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
497 ocs_log_crit(hw->os, "Max supported MQs = %d\n",
498 OCS_HW_MAX_NUM_MQ);
499 return OCS_HW_RTN_ERROR;
500 }
501
502 return OCS_HW_RTN_SUCCESS;
503 }
504
505 /**
506 * @ingroup devInitShutdown
507 * @brief Allocate memory structures to prepare for the device operation.
508 *
509 * @par Description
510 * Allocates memory structures needed by the device and prepares the device
511 * for operation.
512 * @n @n @b Note: This function may be called more than once (for example, at
513 * initialization and then after a reset), but the size of the internal resources
514 * may not be changed without tearing down the HW (ocs_hw_teardown()).
515 *
516 * @param hw Hardware context allocated by the caller.
517 *
518 * @return Returns 0 on success, or a non-zero value on failure.
519 */
520 ocs_hw_rtn_e
521 ocs_hw_init(ocs_hw_t *hw)
522 {
523 ocs_hw_rtn_e rc;
524 uint32_t i = 0;
525 uint8_t buf[SLI4_BMBX_SIZE];
526 uint32_t max_rpi;
527 int rem_count;
528 int written_size = 0;
529 uint32_t count;
530 char prop_buf[32];
531 uint32_t ramdisc_blocksize = 512;
532 uint32_t q_count = 0;
533 /*
534 * Make sure the command lists are empty. If this is start-of-day,
535 * they'll be empty since they were just initialized in ocs_hw_setup.
536 * If we've just gone through a reset, the command and command pending
537 * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
538 */
539 ocs_lock(&hw->cmd_lock);
540 if (!ocs_list_empty(&hw->cmd_head)) {
541 ocs_log_test(hw->os, "command found on cmd list\n");
542 ocs_unlock(&hw->cmd_lock);
543 return OCS_HW_RTN_ERROR;
544 }
545 if (!ocs_list_empty(&hw->cmd_pending)) {
546 ocs_log_test(hw->os, "command found on pending list\n");
547 ocs_unlock(&hw->cmd_lock);
548 return OCS_HW_RTN_ERROR;
549 }
550 ocs_unlock(&hw->cmd_lock);
551
552 /* Free RQ buffers if prevously allocated */
553 ocs_hw_rx_free(hw);
554
555 /*
556 * The IO queues must be initialized here for the reset case. The
557 * ocs_hw_init_io() function will re-add the IOs to the free list.
558 * The cmd_head list should be OK since we free all entries in
559 * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
560 */
561
562 /* If we are in this function due to a reset, there may be stale items
563 * on lists that need to be removed. Clean them up.
564 */
565 rem_count=0;
566 if (ocs_list_valid(&hw->io_wait_free)) {
567 while ((!ocs_list_empty(&hw->io_wait_free))) {
568 rem_count++;
569 ocs_list_remove_head(&hw->io_wait_free);
570 }
571 if (rem_count > 0) {
572 ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
573 }
574 }
575 rem_count=0;
576 if (ocs_list_valid(&hw->io_inuse)) {
577 while ((!ocs_list_empty(&hw->io_inuse))) {
578 rem_count++;
579 ocs_list_remove_head(&hw->io_inuse);
580 }
581 if (rem_count > 0) {
582 ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
583 }
584 }
585 rem_count=0;
586 if (ocs_list_valid(&hw->io_free)) {
587 while ((!ocs_list_empty(&hw->io_free))) {
588 rem_count++;
589 ocs_list_remove_head(&hw->io_free);
590 }
591 if (rem_count > 0) {
592 ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
593 }
594 }
595 if (ocs_list_valid(&hw->io_port_owned)) {
596 while ((!ocs_list_empty(&hw->io_port_owned))) {
597 ocs_list_remove_head(&hw->io_port_owned);
598 }
599 }
600 ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
601 ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
602 ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
603 ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
604 ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
605 ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
606
607 /* If MRQ not required, Make sure we dont request feature. */
608 if (hw->config.n_rq == 1) {
609 hw->sli.config.features.flag.mrqp = FALSE;
610 }
611
612 if (sli_init(&hw->sli)) {
613 ocs_log_err(hw->os, "SLI failed to initialize\n");
614 return OCS_HW_RTN_ERROR;
615 }
616
617 /*
618 * Enable the auto xfer rdy feature if requested.
619 */
620 hw->auto_xfer_rdy_enabled = FALSE;
621 if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
622 hw->config.auto_xfer_rdy_size > 0) {
623 if (hw->config.esoc){
624 if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
625 ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
626 }
627 written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
628 } else {
629 written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
630 }
631 if (written_size) {
632 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
633 if (rc != OCS_HW_RTN_SUCCESS) {
634 ocs_log_err(hw->os, "config auto xfer rdy failed\n");
635 return rc;
636 }
637 }
638 hw->auto_xfer_rdy_enabled = TRUE;
639
640 if (hw->config.auto_xfer_rdy_t10_enable) {
641 rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
642 if (rc != OCS_HW_RTN_SUCCESS) {
643 ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
644 return rc;
645 }
646 }
647 }
648
649 if(hw->sliport_healthcheck) {
650 rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
651 if (rc != OCS_HW_RTN_SUCCESS) {
652 ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
653 return rc;
654 }
655 }
656
657 /*
658 * Set FDT transfer hint, only works on Lancer
659 */
660 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
661 /*
662 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
663 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
664 */
665 ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
666 }
667
668 /*
669 * Verify that we have not exceeded any queue sizes
670 */
671 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
672 OCS_HW_MAX_NUM_EQ);
673 if (hw->config.n_eq > q_count) {
674 ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
675 hw->config.n_eq, q_count);
676 return OCS_HW_RTN_ERROR;
677 }
678
679 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
680 OCS_HW_MAX_NUM_CQ);
681 if (hw->config.n_cq > q_count) {
682 ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
683 hw->config.n_cq, q_count);
684 return OCS_HW_RTN_ERROR;
685 }
686
687 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
688 OCS_HW_MAX_NUM_MQ);
689 if (hw->config.n_mq > q_count) {
690 ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
691 hw->config.n_mq, q_count);
692 return OCS_HW_RTN_ERROR;
693 }
694
695 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
696 OCS_HW_MAX_NUM_RQ);
697 if (hw->config.n_rq > q_count) {
698 ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
699 hw->config.n_rq, q_count);
700 return OCS_HW_RTN_ERROR;
701 }
702
703 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
704 OCS_HW_MAX_NUM_WQ);
705 if (hw->config.n_wq > q_count) {
706 ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
707 hw->config.n_wq, q_count);
708 return OCS_HW_RTN_ERROR;
709 }
710
711 /* zero the hashes */
712 ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
713 ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
714 OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
715
716 ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
717 ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
718 OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
719
720 ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
721 ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
722 OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
723
724 rc = ocs_hw_init_queues(hw, hw->qtop);
725 if (rc != OCS_HW_RTN_SUCCESS) {
726 return rc;
727 }
728
729 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
730 i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
731 if (i) {
732 ocs_dma_t payload_memory;
733
734 rc = OCS_HW_RTN_ERROR;
735
736 if (hw->rnode_mem.size) {
737 ocs_dma_free(hw->os, &hw->rnode_mem);
738 }
739
740 if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
741 ocs_log_err(hw->os, "remote node memory allocation fail\n");
742 return OCS_HW_RTN_NO_MEMORY;
743 }
744
745 payload_memory.size = 0;
746 if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
747 &hw->rnode_mem, UINT16_MAX, &payload_memory)) {
748 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
749
750 if (payload_memory.size != 0) {
751 /* The command was non-embedded - need to free the dma buffer */
752 ocs_dma_free(hw->os, &payload_memory);
753 }
754 }
755
756 if (rc != OCS_HW_RTN_SUCCESS) {
757 ocs_log_err(hw->os, "header template registration failed\n");
758 return rc;
759 }
760 }
761
762 /* Allocate and post RQ buffers */
763 rc = ocs_hw_rx_allocate(hw);
764 if (rc) {
765 ocs_log_err(hw->os, "rx_allocate failed\n");
766 return rc;
767 }
768
769 /* Populate hw->seq_free_list */
770 if (hw->seq_pool == NULL) {
771 uint32_t count = 0;
772 uint32_t i;
773
774 /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
775 for (i = 0; i < hw->hw_rq_count; i++) {
776 count += hw->hw_rq[i]->entry_count;
777 }
778
779 hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
780 if (hw->seq_pool == NULL) {
781 ocs_log_err(hw->os, "malloc seq_pool failed\n");
782 return OCS_HW_RTN_NO_MEMORY;
783 }
784 }
785
786 if(ocs_hw_rx_post(hw)) {
787 ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
788 }
789
790 /* Allocate rpi_ref if not previously allocated */
791 if (hw->rpi_ref == NULL) {
792 hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
793 OCS_M_ZERO | OCS_M_NOWAIT);
794 if (hw->rpi_ref == NULL) {
795 ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
796 return OCS_HW_RTN_NO_MEMORY;
797 }
798 }
799
800 for (i = 0; i < max_rpi; i ++) {
801 ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
802 ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
803 }
804
805 ocs_memset(hw->domains, 0, sizeof(hw->domains));
806
807 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
808 if (hw->workaround.override_fcfi) {
809 hw->first_domain_idx = -1;
810 }
811
812 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
813
814 /* Register a FCFI to allow unsolicited frames to be routed to the driver */
815 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
816 if (hw->hw_mrq_count) {
817 ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
818
819 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
820 if (rc != OCS_HW_RTN_SUCCESS) {
821 ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
822 return rc;
823 }
824
825 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
826 if (rc != OCS_HW_RTN_SUCCESS) {
827 ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
828 return rc;
829 }
830 } else {
831 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
832
833 ocs_log_debug(hw->os, "using REG_FCFI standard\n");
834
835 /* Set the filter match/mask values from hw's filter_def values */
836 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
837 rq_cfg[i].rq_id = 0xffff;
838 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
839 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
840 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
841 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
842 }
843
844 /*
845 * Update the rq_id's of the FCF configuration (don't update more than the number
846 * of rq_cfg elements)
847 */
848 for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
849 hw_rq_t *rq = hw->hw_rq[i];
850 uint32_t j;
851 for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
852 uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
853 if (mask & (1U << j)) {
854 rq_cfg[j].rq_id = rq->hdr->id;
855 ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
856 j, hw->config.filter_def[j], i, rq->hdr->id);
857 }
858 }
859 }
860
861 rc = OCS_HW_RTN_ERROR;
862
863 if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
864 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
865 }
866
867 if (rc != OCS_HW_RTN_SUCCESS) {
868 ocs_log_err(hw->os, "FCFI registration failed\n");
869 return rc;
870 }
871 hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
872 }
873 }
874
875 /*
876 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
877 * thus the pool allocation size of 64k)
878 */
879 rc = ocs_hw_reqtag_init(hw);
880 if (rc) {
881 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
882 return rc;
883 }
884
885 rc = ocs_hw_setup_io(hw);
886 if (rc) {
887 ocs_log_err(hw->os, "IO allocation failure\n");
888 return rc;
889 }
890
891 rc = ocs_hw_init_io(hw);
892 if (rc) {
893 ocs_log_err(hw->os, "IO initialization failure\n");
894 return rc;
895 }
896
897 ocs_queue_history_init(hw->os, &hw->q_hist);
898
899 /* get hw link config; polling, so callback will be called immediately */
900 hw->linkcfg = OCS_HW_LINKCFG_NA;
901 ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
902
903 /* if lancer ethernet, ethernet ports need to be enabled */
904 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
905 (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
906 if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
907 /* log warning but continue */
908 ocs_log_err(hw->os, "Failed to set ethernet license\n");
909 }
910 }
911
912 /* Set the DIF seed - only for lancer right now */
913 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
914 ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
915 ocs_log_err(hw->os, "Failed to set DIF seed value\n");
916 return rc;
917 }
918
919 /* Set the DIF mode - skyhawk only */
920 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
921 sli_get_dif_capable(&hw->sli)) {
922 rc = ocs_hw_set_dif_mode(hw);
923 if (rc != OCS_HW_RTN_SUCCESS) {
924 ocs_log_err(hw->os, "Failed to set DIF mode value\n");
925 return rc;
926 }
927 }
928
929 /*
930 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
931 */
932 for (i = 0; i < hw->eq_count; i++) {
933 sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
934 }
935
936 /*
937 * Initialize RQ hash
938 */
939 for (i = 0; i < hw->rq_count; i++) {
940 ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
941 }
942
943 /*
944 * Initialize WQ hash
945 */
946 for (i = 0; i < hw->wq_count; i++) {
947 ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
948 }
949
950 /*
951 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
952 */
953 for (i = 0; i < hw->cq_count; i++) {
954 ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
955 sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
956 }
957
958 /* record the fact that the queues are functional */
959 hw->state = OCS_HW_STATE_ACTIVE;
960
961 /* Note: Must be after the IOs are setup and the state is active*/
962 if (ocs_hw_rqpair_init(hw)) {
963 ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
964 }
965
966 /* finally kick off periodic timer to check for timed out target WQEs */
967 if (hw->config.emulate_tgt_wqe_timeout) {
968 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
969 OCS_HW_WQ_TIMER_PERIOD_MS);
970 }
971
972 /*
973 * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there
974 * are none of those, allocate one for WQ[0]
975 */
976 if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
977 for (i = 0; i < count; i++) {
978 hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
979 wq->send_frame_io = ocs_hw_io_alloc(hw);
980 if (wq->send_frame_io == NULL) {
981 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
982 }
983 }
984 } else {
985 hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
986 if (hw->hw_wq[0]->send_frame_io == NULL) {
987 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
988 }
989 }
990
991 /* Initialize send frame frame sequence id */
992 ocs_atomic_init(&hw->send_frame_seq_id, 0);
993
994 /* Initialize watchdog timer if enabled by user */
995 hw->expiration_logged = 0;
996 if(hw->watchdog_timeout) {
997 if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
998 ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
999 }else if(!ocs_hw_config_watchdog_timer(hw)) {
1000 ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
1001 }
1002 }
1003
1004 if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
1005 ocs_log_err(hw->os, "domain node memory allocation fail\n");
1006 return OCS_HW_RTN_NO_MEMORY;
1007 }
1008
1009 if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
1010 ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
1011 return OCS_HW_RTN_NO_MEMORY;
1012 }
1013
1014 if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map,
1015 SLI4_MIN_LOOP_MAP_BYTES, 4)) {
1016 ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
1017 }
1018
1019 return OCS_HW_RTN_SUCCESS;
1020 }
1021
1022 /**
1023 * @brief Configure Multi-RQ
1024 *
1025 * @param hw Hardware context allocated by the caller.
1026 * @param mode 1 to set MRQ filters and 0 to set FCFI index
1027 * @param vlanid valid in mode 0
1028 * @param fcf_index valid in mode 0
1029 *
1030 * @return Returns 0 on success, or a non-zero value on failure.
1031 */
1032 static int32_t
1033 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
1034 {
1035 uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
1036 hw_rq_t *rq;
1037 sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
1038 uint32_t i, j;
1039 sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
1040 int32_t rc;
1041
1042 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1043 goto issue_cmd;
1044 }
1045
1046 /* Set the filter match/mask values from hw's filter_def values */
1047 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1048 rq_filter[i].rq_id = 0xffff;
1049 rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
1050 rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
1051 rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
1052 rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
1053 }
1054
1055 /* Accumulate counts for each filter type used, build rq_ids[] list */
1056 for (i = 0; i < hw->hw_rq_count; i++) {
1057 rq = hw->hw_rq[i];
1058 for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
1059 if (rq->filter_mask & (1U << j)) {
1060 if (rq_filter[j].rq_id != 0xffff) {
1061 /* Already used. Bailout ifts not RQset case */
1062 if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
1063 ocs_log_err(hw->os, "Wrong queue topology.\n");
1064 return OCS_HW_RTN_ERROR;
1065 }
1066 continue;
1067 }
1068
1069 if (rq->is_mrq) {
1070 rq_filter[j].rq_id = rq->base_mrq_id;
1071 mrq_bitmask |= (1U << j);
1072 } else {
1073 rq_filter[j].rq_id = rq->hdr->id;
1074 }
1075 }
1076 }
1077 }
1078
1079 issue_cmd:
1080 /* Invoke REG_FCFI_MRQ */
1081 rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
1082 buf, /* buf */
1083 SLI4_BMBX_SIZE, /* size */
1084 mode, /* mode 1 */
1085 fcf_index, /* fcf_index */
1086 vlanid, /* vlan_id */
1087 hw->config.rq_selection_policy, /* RQ selection policy*/
1088 mrq_bitmask, /* MRQ bitmask */
1089 hw->hw_mrq_count, /* num_mrqs */
1090 rq_filter); /* RQ filter */
1091 if (rc == 0) {
1092 ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
1093 return OCS_HW_RTN_ERROR;
1094 }
1095
1096 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
1097
1098 rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
1099
1100 if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
1101 ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
1102 rsp->hdr.command, rsp->hdr.status);
1103 return OCS_HW_RTN_ERROR;
1104 }
1105
1106 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1107 hw->fcf_indicator = rsp->fcfi;
1108 }
1109 return 0;
1110 }
1111
1112 /**
1113 * @brief Callback function for getting linkcfg during HW initialization.
1114 *
1115 * @param status Status of the linkcfg get operation.
1116 * @param value Link configuration enum to which the link configuration is set.
1117 * @param arg Callback argument (ocs_hw_t *).
1118 *
1119 * @return None.
1120 */
1121 static void
1122 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
1123 {
1124 ocs_hw_t *hw = (ocs_hw_t *)arg;
1125 if (status == 0) {
1126 hw->linkcfg = (ocs_hw_linkcfg_e)value;
1127 } else {
1128 hw->linkcfg = OCS_HW_LINKCFG_NA;
1129 }
1130 ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
1131 }
1132
1133 /**
1134 * @ingroup devInitShutdown
1135 * @brief Tear down the Hardware Abstraction Layer module.
1136 *
1137 * @par Description
1138 * Frees memory structures needed by the device, and shuts down the device. Does
1139 * not free the HW context memory (which is done by the caller).
1140 *
1141 * @param hw Hardware context allocated by the caller.
1142 *
1143 * @return Returns 0 on success, or a non-zero value on failure.
1144 */
1145 ocs_hw_rtn_e
1146 ocs_hw_teardown(ocs_hw_t *hw)
1147 {
1148 uint32_t i = 0;
1149 uint32_t iters = 10;/*XXX*/
1150 uint32_t max_rpi;
1151 uint32_t destroy_queues;
1152 uint32_t free_memory;
1153
1154 if (!hw) {
1155 ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
1156 return OCS_HW_RTN_ERROR;
1157 }
1158
1159 destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
1160 free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
1161
1162 /* shutdown target wqe timer */
1163 shutdown_target_wqe_timer(hw);
1164
1165 /* Cancel watchdog timer if enabled */
1166 if(hw->watchdog_timeout) {
1167 hw->watchdog_timeout = 0;
1168 ocs_hw_config_watchdog_timer(hw);
1169 }
1170
1171 /* Cancel Sliport Healthcheck */
1172 if(hw->sliport_healthcheck) {
1173 hw->sliport_healthcheck = 0;
1174 ocs_hw_config_sli_port_health_check(hw, 0, 0);
1175 }
1176
1177 if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
1178 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1179
1180 ocs_hw_flush(hw);
1181
1182 /* If there are outstanding commands, wait for them to complete */
1183 while (!ocs_list_empty(&hw->cmd_head) && iters) {
1184 ocs_udelay(10000);
1185 ocs_hw_flush(hw);
1186 iters--;
1187 }
1188
1189 if (ocs_list_empty(&hw->cmd_head)) {
1190 ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1191 } else {
1192 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1193 }
1194
1195 /* Cancel any remaining commands */
1196 ocs_hw_command_cancel(hw);
1197 } else {
1198 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1199 }
1200
1201 ocs_lock_free(&hw->cmd_lock);
1202
1203 /* Free unregistered RPI if workaround is in force */
1204 if (hw->workaround.use_unregistered_rpi) {
1205 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
1206 }
1207
1208 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1209 if (hw->rpi_ref) {
1210 for (i = 0; i < max_rpi; i++) {
1211 if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
1212 ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
1213 i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
1214 }
1215 }
1216 ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
1217 hw->rpi_ref = NULL;
1218 }
1219
1220 ocs_dma_free(hw->os, &hw->rnode_mem);
1221
1222 if (hw->io) {
1223 for (i = 0; i < hw->config.n_io; i++) {
1224 if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
1225 (hw->io[i]->sgl->virt != NULL)) {
1226 if(hw->io[i]->is_port_owned) {
1227 ocs_lock_free(&hw->io[i]->axr_lock);
1228 }
1229 ocs_dma_free(hw->os, hw->io[i]->sgl);
1230 }
1231 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
1232 hw->io[i] = NULL;
1233 }
1234 ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
1235 hw->wqe_buffs = NULL;
1236 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
1237 hw->io = NULL;
1238 }
1239
1240 ocs_dma_free(hw->os, &hw->xfer_rdy);
1241 ocs_dma_free(hw->os, &hw->dump_sges);
1242 ocs_dma_free(hw->os, &hw->loop_map);
1243
1244 ocs_lock_free(&hw->io_lock);
1245 ocs_lock_free(&hw->io_abort_lock);
1246
1247 for (i = 0; i < hw->wq_count; i++) {
1248 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
1249 }
1250
1251 for (i = 0; i < hw->rq_count; i++) {
1252 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
1253 }
1254
1255 for (i = 0; i < hw->mq_count; i++) {
1256 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
1257 }
1258
1259 for (i = 0; i < hw->cq_count; i++) {
1260 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
1261 }
1262
1263 for (i = 0; i < hw->eq_count; i++) {
1264 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
1265 }
1266
1267 ocs_hw_qtop_free(hw->qtop);
1268
1269 /* Free rq buffers */
1270 ocs_hw_rx_free(hw);
1271
1272 hw_queue_teardown(hw);
1273
1274 ocs_hw_rqpair_teardown(hw);
1275
1276 if (sli_teardown(&hw->sli)) {
1277 ocs_log_err(hw->os, "SLI teardown failed\n");
1278 }
1279
1280 ocs_queue_history_free(&hw->q_hist);
1281
1282 /* record the fact that the queues are non-functional */
1283 hw->state = OCS_HW_STATE_UNINITIALIZED;
1284
1285 /* free sequence free pool */
1286 ocs_array_free(hw->seq_pool);
1287 hw->seq_pool = NULL;
1288
1289 /* free hw_wq_callback pool */
1290 ocs_pool_free(hw->wq_reqtag_pool);
1291
1292 ocs_dma_free(hw->os, &hw->domain_dmem);
1293 ocs_dma_free(hw->os, &hw->fcf_dmem);
1294 /* Mark HW setup as not having been called */
1295 hw->hw_setup_called = FALSE;
1296
1297 return OCS_HW_RTN_SUCCESS;
1298 }
1299
1300 ocs_hw_rtn_e
1301 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
1302 {
1303 uint32_t i;
1304 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1305 uint32_t iters;
1306 ocs_hw_state_e prev_state = hw->state;
1307
1308 if (hw->state != OCS_HW_STATE_ACTIVE) {
1309 ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
1310 }
1311
1312 hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
1313
1314 /* shutdown target wqe timer */
1315 shutdown_target_wqe_timer(hw);
1316
1317 ocs_hw_flush(hw);
1318
1319 /*
1320 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
1321 * then the FW will UE when the reset is issued. So attempt to complete
1322 * all mailbox commands.
1323 */
1324 iters = 10;
1325 while (!ocs_list_empty(&hw->cmd_head) && iters) {
1326 ocs_udelay(10000);
1327 ocs_hw_flush(hw);
1328 iters--;
1329 }
1330
1331 if (ocs_list_empty(&hw->cmd_head)) {
1332 ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1333 } else {
1334 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1335 }
1336
1337 /* Reset the chip */
1338 switch(reset) {
1339 case OCS_HW_RESET_FUNCTION:
1340 ocs_log_debug(hw->os, "issuing function level reset\n");
1341 if (sli_reset(&hw->sli)) {
1342 ocs_log_err(hw->os, "sli_reset failed\n");
1343 rc = OCS_HW_RTN_ERROR;
1344 }
1345 break;
1346 case OCS_HW_RESET_FIRMWARE:
1347 ocs_log_debug(hw->os, "issuing firmware reset\n");
1348 if (sli_fw_reset(&hw->sli)) {
1349 ocs_log_err(hw->os, "sli_soft_reset failed\n");
1350 rc = OCS_HW_RTN_ERROR;
1351 }
1352 /*
1353 * Because the FW reset leaves the FW in a non-running state,
1354 * follow that with a regular reset.
1355 */
1356 ocs_log_debug(hw->os, "issuing function level reset\n");
1357 if (sli_reset(&hw->sli)) {
1358 ocs_log_err(hw->os, "sli_reset failed\n");
1359 rc = OCS_HW_RTN_ERROR;
1360 }
1361 break;
1362 default:
1363 ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
1364 hw->state = prev_state;
1365 return OCS_HW_RTN_ERROR;
1366 }
1367
1368 /* Not safe to walk command/io lists unless they've been initialized */
1369 if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1370 ocs_hw_command_cancel(hw);
1371
1372 /* Clean up the inuse list, the free list and the wait free list */
1373 ocs_hw_io_cancel(hw);
1374
1375 ocs_memset(hw->domains, 0, sizeof(hw->domains));
1376 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
1377
1378 ocs_hw_link_event_init(hw);
1379
1380 ocs_lock(&hw->io_lock);
1381 /* The io lists should be empty, but remove any that didn't get cleaned up. */
1382 while (!ocs_list_empty(&hw->io_timed_wqe)) {
1383 ocs_list_remove_head(&hw->io_timed_wqe);
1384 }
1385 /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
1386
1387 while (!ocs_list_empty(&hw->io_free)) {
1388 ocs_list_remove_head(&hw->io_free);
1389 }
1390 while (!ocs_list_empty(&hw->io_wait_free)) {
1391 ocs_list_remove_head(&hw->io_wait_free);
1392 }
1393
1394 /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
1395 ocs_hw_reqtag_reset(hw);
1396
1397 ocs_unlock(&hw->io_lock);
1398 }
1399
1400 if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1401 for (i = 0; i < hw->wq_count; i++) {
1402 sli_queue_reset(&hw->sli, &hw->wq[i]);
1403 }
1404
1405 for (i = 0; i < hw->rq_count; i++) {
1406 sli_queue_reset(&hw->sli, &hw->rq[i]);
1407 }
1408
1409 for (i = 0; i < hw->hw_rq_count; i++) {
1410 hw_rq_t *rq = hw->hw_rq[i];
1411 if (rq->rq_tracker != NULL) {
1412 uint32_t j;
1413
1414 for (j = 0; j < rq->entry_count; j++) {
1415 rq->rq_tracker[j] = NULL;
1416 }
1417 }
1418 }
1419
1420 for (i = 0; i < hw->mq_count; i++) {
1421 sli_queue_reset(&hw->sli, &hw->mq[i]);
1422 }
1423
1424 for (i = 0; i < hw->cq_count; i++) {
1425 sli_queue_reset(&hw->sli, &hw->cq[i]);
1426 }
1427
1428 for (i = 0; i < hw->eq_count; i++) {
1429 sli_queue_reset(&hw->sli, &hw->eq[i]);
1430 }
1431
1432 /* Free rq buffers */
1433 ocs_hw_rx_free(hw);
1434
1435 /* Teardown the HW queue topology */
1436 hw_queue_teardown(hw);
1437 } else {
1438 /* Free rq buffers */
1439 ocs_hw_rx_free(hw);
1440 }
1441
1442 /*
1443 * Re-apply the run-time workarounds after clearing the SLI config
1444 * fields in sli_reset.
1445 */
1446 ocs_hw_workaround_setup(hw);
1447 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
1448
1449 return rc;
1450 }
1451
1452 int32_t
1453 ocs_hw_get_num_eq(ocs_hw_t *hw)
1454 {
1455 return hw->eq_count;
1456 }
1457
1458 static int32_t
1459 ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
1460 {
1461 /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
1462 * No further explanation is given in the document.
1463 * */
1464 return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
1465 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
1466 }
1467
1468 ocs_hw_rtn_e
1469 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
1470 {
1471 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1472 int32_t tmp;
1473
1474 if (!value) {
1475 return OCS_HW_RTN_ERROR;
1476 }
1477
1478 *value = 0;
1479
1480 switch (prop) {
1481 case OCS_HW_N_IO:
1482 *value = hw->config.n_io;
1483 break;
1484 case OCS_HW_N_SGL:
1485 *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
1486 break;
1487 case OCS_HW_MAX_IO:
1488 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
1489 break;
1490 case OCS_HW_MAX_NODES:
1491 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1492 break;
1493 case OCS_HW_MAX_RQ_ENTRIES:
1494 *value = hw->num_qentries[SLI_QTYPE_RQ];
1495 break;
1496 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1497 *value = hw->config.rq_default_buffer_size;
1498 break;
1499 case OCS_HW_AUTO_XFER_RDY_CAPABLE:
1500 *value = sli_get_auto_xfer_rdy_capable(&hw->sli);
1501 break;
1502 case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1503 *value = hw->config.auto_xfer_rdy_xri_cnt;
1504 break;
1505 case OCS_HW_AUTO_XFER_RDY_SIZE:
1506 *value = hw->config.auto_xfer_rdy_size;
1507 break;
1508 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1509 switch (hw->config.auto_xfer_rdy_blk_size_chip) {
1510 case 0:
1511 *value = 512;
1512 break;
1513 case 1:
1514 *value = 1024;
1515 break;
1516 case 2:
1517 *value = 2048;
1518 break;
1519 case 3:
1520 *value = 4096;
1521 break;
1522 case 4:
1523 *value = 520;
1524 break;
1525 default:
1526 *value = 0;
1527 rc = OCS_HW_RTN_ERROR;
1528 break;
1529 }
1530 break;
1531 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1532 *value = hw->config.auto_xfer_rdy_t10_enable;
1533 break;
1534 case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1535 *value = hw->config.auto_xfer_rdy_p_type;
1536 break;
1537 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1538 *value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
1539 break;
1540 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1541 *value = hw->config.auto_xfer_rdy_app_tag_valid;
1542 break;
1543 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1544 *value = hw->config.auto_xfer_rdy_app_tag_value;
1545 break;
1546 case OCS_HW_MAX_SGE:
1547 *value = sli_get_max_sge(&hw->sli);
1548 break;
1549 case OCS_HW_MAX_SGL:
1550 *value = sli_get_max_sgl(&hw->sli);
1551 break;
1552 case OCS_HW_TOPOLOGY:
1553 /*
1554 * Infer link.status based on link.speed.
1555 * Report OCS_HW_TOPOLOGY_NONE if the link is down.
1556 */
1557 if (hw->link.speed == 0) {
1558 *value = OCS_HW_TOPOLOGY_NONE;
1559 break;
1560 }
1561 switch (hw->link.topology) {
1562 case SLI_LINK_TOPO_NPORT:
1563 *value = OCS_HW_TOPOLOGY_NPORT;
1564 break;
1565 case SLI_LINK_TOPO_LOOP:
1566 *value = OCS_HW_TOPOLOGY_LOOP;
1567 break;
1568 case SLI_LINK_TOPO_NONE:
1569 *value = OCS_HW_TOPOLOGY_NONE;
1570 break;
1571 default:
1572 ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
1573 rc = OCS_HW_RTN_ERROR;
1574 break;
1575 }
1576 break;
1577 case OCS_HW_CONFIG_TOPOLOGY:
1578 *value = hw->config.topology;
1579 break;
1580 case OCS_HW_LINK_SPEED:
1581 *value = hw->link.speed;
1582 break;
1583 case OCS_HW_LINK_CONFIG_SPEED:
1584 switch (hw->config.speed) {
1585 case FC_LINK_SPEED_10G:
1586 *value = 10000;
1587 break;
1588 case FC_LINK_SPEED_AUTO_16_8_4:
1589 *value = 0;
1590 break;
1591 case FC_LINK_SPEED_2G:
1592 *value = 2000;
1593 break;
1594 case FC_LINK_SPEED_4G:
1595 *value = 4000;
1596 break;
1597 case FC_LINK_SPEED_8G:
1598 *value = 8000;
1599 break;
1600 case FC_LINK_SPEED_16G:
1601 *value = 16000;
1602 break;
1603 case FC_LINK_SPEED_32G:
1604 *value = 32000;
1605 break;
1606 default:
1607 ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
1608 rc = OCS_HW_RTN_ERROR;
1609 break;
1610 }
1611 break;
1612 case OCS_HW_IF_TYPE:
1613 *value = sli_get_if_type(&hw->sli);
1614 break;
1615 case OCS_HW_SLI_REV:
1616 *value = sli_get_sli_rev(&hw->sli);
1617 break;
1618 case OCS_HW_SLI_FAMILY:
1619 *value = sli_get_sli_family(&hw->sli);
1620 break;
1621 case OCS_HW_DIF_CAPABLE:
1622 *value = sli_get_dif_capable(&hw->sli);
1623 break;
1624 case OCS_HW_DIF_SEED:
1625 *value = hw->config.dif_seed;
1626 break;
1627 case OCS_HW_DIF_MODE:
1628 *value = hw->config.dif_mode;
1629 break;
1630 case OCS_HW_DIF_MULTI_SEPARATE:
1631 /* Lancer supports multiple DIF separates */
1632 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
1633 *value = TRUE;
1634 } else {
1635 *value = FALSE;
1636 }
1637 break;
1638 case OCS_HW_DUMP_MAX_SIZE:
1639 *value = hw->dump_size;
1640 break;
1641 case OCS_HW_DUMP_READY:
1642 *value = sli_dump_is_ready(&hw->sli);
1643 break;
1644 case OCS_HW_DUMP_PRESENT:
1645 *value = sli_dump_is_present(&hw->sli);
1646 break;
1647 case OCS_HW_RESET_REQUIRED:
1648 tmp = sli_reset_required(&hw->sli);
1649 if(tmp < 0) {
1650 rc = OCS_HW_RTN_ERROR;
1651 } else {
1652 *value = tmp;
1653 }
1654 break;
1655 case OCS_HW_FW_ERROR:
1656 *value = sli_fw_error_status(&hw->sli);
1657 break;
1658 case OCS_HW_FW_READY:
1659 *value = sli_fw_ready(&hw->sli);
1660 break;
1661 case OCS_HW_FW_TIMED_OUT:
1662 *value = ocs_hw_get_fw_timed_out(hw);
1663 break;
1664 case OCS_HW_HIGH_LOGIN_MODE:
1665 *value = sli_get_hlm_capable(&hw->sli);
1666 break;
1667 case OCS_HW_PREREGISTER_SGL:
1668 *value = sli_get_sgl_preregister_required(&hw->sli);
1669 break;
1670 case OCS_HW_HW_REV1:
1671 *value = sli_get_hw_revision(&hw->sli, 0);
1672 break;
1673 case OCS_HW_HW_REV2:
1674 *value = sli_get_hw_revision(&hw->sli, 1);
1675 break;
1676 case OCS_HW_HW_REV3:
1677 *value = sli_get_hw_revision(&hw->sli, 2);
1678 break;
1679 case OCS_HW_LINKCFG:
1680 *value = hw->linkcfg;
1681 break;
1682 case OCS_HW_ETH_LICENSE:
1683 *value = hw->eth_license;
1684 break;
1685 case OCS_HW_LINK_MODULE_TYPE:
1686 *value = sli_get_link_module_type(&hw->sli);
1687 break;
1688 case OCS_HW_NUM_CHUTES:
1689 *value = ocs_hw_get_num_chutes(hw);
1690 break;
1691 case OCS_HW_DISABLE_AR_TGT_DIF:
1692 *value = hw->workaround.disable_ar_tgt_dif;
1693 break;
1694 case OCS_HW_EMULATE_I_ONLY_AAB:
1695 *value = hw->config.i_only_aab;
1696 break;
1697 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1698 *value = hw->config.emulate_tgt_wqe_timeout;
1699 break;
1700 case OCS_HW_VPD_LEN:
1701 *value = sli_get_vpd_len(&hw->sli);
1702 break;
1703 case OCS_HW_SGL_CHAINING_CAPABLE:
1704 *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
1705 break;
1706 case OCS_HW_SGL_CHAINING_ALLOWED:
1707 /*
1708 * SGL Chaining is allowed in the following cases:
1709 * 1. Lancer with host SGL Lists
1710 * 2. Skyhawk with pre-registered SGL Lists
1711 */
1712 *value = FALSE;
1713 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1714 !sli_get_sgl_preregister(&hw->sli) &&
1715 SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
1716 *value = TRUE;
1717 }
1718
1719 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1720 sli_get_sgl_preregister(&hw->sli) &&
1721 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
1722 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
1723 *value = TRUE;
1724 }
1725 break;
1726 case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
1727 /* Only lancer supports host allocated SGL Chaining buffers. */
1728 *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1729 (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)));
1730 break;
1731 case OCS_HW_SEND_FRAME_CAPABLE:
1732 if (hw->workaround.ignore_send_frame) {
1733 *value = 0;
1734 } else {
1735 /* Only lancer is capable */
1736 *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
1737 }
1738 break;
1739 case OCS_HW_RQ_SELECTION_POLICY:
1740 *value = hw->config.rq_selection_policy;
1741 break;
1742 case OCS_HW_RR_QUANTA:
1743 *value = hw->config.rr_quanta;
1744 break;
1745 case OCS_HW_MAX_VPORTS:
1746 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
1747 break;
1748 default:
1749 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1750 rc = OCS_HW_RTN_ERROR;
1751 }
1752
1753 return rc;
1754 }
1755
1756 void *
1757 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
1758 {
1759 void *rc = NULL;
1760
1761 switch (prop) {
1762 case OCS_HW_WWN_NODE:
1763 rc = sli_get_wwn_node(&hw->sli);
1764 break;
1765 case OCS_HW_WWN_PORT:
1766 rc = sli_get_wwn_port(&hw->sli);
1767 break;
1768 case OCS_HW_VPD:
1769 /* make sure VPD length is non-zero */
1770 if (sli_get_vpd_len(&hw->sli)) {
1771 rc = sli_get_vpd(&hw->sli);
1772 }
1773 break;
1774 case OCS_HW_FW_REV:
1775 rc = sli_get_fw_name(&hw->sli, 0);
1776 break;
1777 case OCS_HW_FW_REV2:
1778 rc = sli_get_fw_name(&hw->sli, 1);
1779 break;
1780 case OCS_HW_IPL:
1781 rc = sli_get_ipl_name(&hw->sli);
1782 break;
1783 case OCS_HW_PORTNUM:
1784 rc = sli_get_portnum(&hw->sli);
1785 break;
1786 case OCS_HW_BIOS_VERSION_STRING:
1787 rc = sli_get_bios_version_string(&hw->sli);
1788 break;
1789 default:
1790 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1791 }
1792
1793 return rc;
1794 }
1795
1796 ocs_hw_rtn_e
1797 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
1798 {
1799 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1800
1801 switch (prop) {
1802 case OCS_HW_N_IO:
1803 if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
1804 value == 0) {
1805 ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
1806 value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
1807 rc = OCS_HW_RTN_ERROR;
1808 } else {
1809 hw->config.n_io = value;
1810 }
1811 break;
1812 case OCS_HW_N_SGL:
1813 value += SLI4_SGE_MAX_RESERVED;
1814 if (value > sli_get_max_sgl(&hw->sli)) {
1815 ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
1816 value, sli_get_max_sgl(&hw->sli));
1817 rc = OCS_HW_RTN_ERROR;
1818 } else {
1819 hw->config.n_sgl = value;
1820 }
1821 break;
1822 case OCS_HW_TOPOLOGY:
1823 if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
1824 (value != OCS_HW_TOPOLOGY_AUTO)) {
1825 ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
1826 value, sli_get_medium(&hw->sli));
1827 rc = OCS_HW_RTN_ERROR;
1828 break;
1829 }
1830
1831 switch (value) {
1832 case OCS_HW_TOPOLOGY_AUTO:
1833 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
1834 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
1835 } else {
1836 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
1837 }
1838 break;
1839 case OCS_HW_TOPOLOGY_NPORT:
1840 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
1841 break;
1842 case OCS_HW_TOPOLOGY_LOOP:
1843 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
1844 break;
1845 default:
1846 ocs_log_test(hw->os, "unsupported topology %#x\n", value);
1847 rc = OCS_HW_RTN_ERROR;
1848 }
1849 hw->config.topology = value;
1850 break;
1851 case OCS_HW_LINK_SPEED:
1852 if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
1853 switch (value) {
1854 case 0: /* Auto-speed negotiation */
1855 case 10000: /* FCoE speed */
1856 hw->config.speed = FC_LINK_SPEED_10G;
1857 break;
1858 default:
1859 ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
1860 value, sli_get_medium(&hw->sli));
1861 rc = OCS_HW_RTN_ERROR;
1862 }
1863 break;
1864 }
1865
1866 switch (value) {
1867 case 0: /* Auto-speed negotiation */
1868 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
1869 break;
1870 case 2000: /* FC speeds */
1871 hw->config.speed = FC_LINK_SPEED_2G;
1872 break;
1873 case 4000:
1874 hw->config.speed = FC_LINK_SPEED_4G;
1875 break;
1876 case 8000:
1877 hw->config.speed = FC_LINK_SPEED_8G;
1878 break;
1879 case 16000:
1880 hw->config.speed = FC_LINK_SPEED_16G;
1881 break;
1882 case 32000:
1883 hw->config.speed = FC_LINK_SPEED_32G;
1884 break;
1885 default:
1886 ocs_log_test(hw->os, "unsupported speed %d\n", value);
1887 rc = OCS_HW_RTN_ERROR;
1888 }
1889 break;
1890 case OCS_HW_DIF_SEED:
1891 /* Set the DIF seed - only for lancer right now */
1892 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
1893 ocs_log_test(hw->os, "DIF seed not supported for this device\n");
1894 rc = OCS_HW_RTN_ERROR;
1895 } else {
1896 hw->config.dif_seed = value;
1897 }
1898 break;
1899 case OCS_HW_DIF_MODE:
1900 switch (value) {
1901 case OCS_HW_DIF_MODE_INLINE:
1902 /*
1903 * Make sure we support inline DIF.
1904 *
1905 * Note: Having both bits clear means that we have old
1906 * FW that doesn't set the bits.
1907 */
1908 if (sli_is_dif_inline_capable(&hw->sli)) {
1909 hw->config.dif_mode = value;
1910 } else {
1911 ocs_log_test(hw->os, "chip does not support DIF inline\n");
1912 rc = OCS_HW_RTN_ERROR;
1913 }
1914 break;
1915 case OCS_HW_DIF_MODE_SEPARATE:
1916 /* Make sure we support DIF separates. */
1917 if (sli_is_dif_separate_capable(&hw->sli)) {
1918 hw->config.dif_mode = value;
1919 } else {
1920 ocs_log_test(hw->os, "chip does not support DIF separate\n");
1921 rc = OCS_HW_RTN_ERROR;
1922 }
1923 }
1924 break;
1925 case OCS_HW_RQ_PROCESS_LIMIT: {
1926 hw_rq_t *rq;
1927 uint32_t i;
1928
1929 /* For each hw_rq object, set its parent CQ limit value */
1930 for (i = 0; i < hw->hw_rq_count; i++) {
1931 rq = hw->hw_rq[i];
1932 hw->cq[rq->cq->instance].proc_limit = value;
1933 }
1934 break;
1935 }
1936 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1937 hw->config.rq_default_buffer_size = value;
1938 break;
1939 case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1940 hw->config.auto_xfer_rdy_xri_cnt = value;
1941 break;
1942 case OCS_HW_AUTO_XFER_RDY_SIZE:
1943 hw->config.auto_xfer_rdy_size = value;
1944 break;
1945 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1946 switch (value) {
1947 case 512:
1948 hw->config.auto_xfer_rdy_blk_size_chip = 0;
1949 break;
1950 case 1024:
1951 hw->config.auto_xfer_rdy_blk_size_chip = 1;
1952 break;
1953 case 2048:
1954 hw->config.auto_xfer_rdy_blk_size_chip = 2;
1955 break;
1956 case 4096:
1957 hw->config.auto_xfer_rdy_blk_size_chip = 3;
1958 break;
1959 case 520:
1960 hw->config.auto_xfer_rdy_blk_size_chip = 4;
1961 break;
1962 default:
1963 ocs_log_err(hw->os, "Invalid block size %d\n",
1964 value);
1965 rc = OCS_HW_RTN_ERROR;
1966 }
1967 break;
1968 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1969 hw->config.auto_xfer_rdy_t10_enable = value;
1970 break;
1971 case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1972 hw->config.auto_xfer_rdy_p_type = value;
1973 break;
1974 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1975 hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
1976 break;
1977 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1978 hw->config.auto_xfer_rdy_app_tag_valid = value;
1979 break;
1980 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1981 hw->config.auto_xfer_rdy_app_tag_value = value;
1982 break;
1983 case OCS_ESOC:
1984 hw->config.esoc = value;
1985 break;
1986 case OCS_HW_HIGH_LOGIN_MODE:
1987 rc = sli_set_hlm(&hw->sli, value);
1988 break;
1989 case OCS_HW_PREREGISTER_SGL:
1990 rc = sli_set_sgl_preregister(&hw->sli, value);
1991 break;
1992 case OCS_HW_ETH_LICENSE:
1993 hw->eth_license = value;
1994 break;
1995 case OCS_HW_EMULATE_I_ONLY_AAB:
1996 hw->config.i_only_aab = value;
1997 break;
1998 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1999 hw->config.emulate_tgt_wqe_timeout = value;
2000 break;
2001 case OCS_HW_BOUNCE:
2002 hw->config.bounce = value;
2003 break;
2004 case OCS_HW_RQ_SELECTION_POLICY:
2005 hw->config.rq_selection_policy = value;
2006 break;
2007 case OCS_HW_RR_QUANTA:
2008 hw->config.rr_quanta = value;
2009 break;
2010 default:
2011 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2012 rc = OCS_HW_RTN_ERROR;
2013 }
2014
2015 return rc;
2016 }
2017
2018 ocs_hw_rtn_e
2019 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
2020 {
2021 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2022
2023 switch (prop) {
2024 case OCS_HW_WAR_VERSION:
2025 hw->hw_war_version = value;
2026 break;
2027 case OCS_HW_FILTER_DEF: {
2028 char *p = value;
2029 uint32_t idx = 0;
2030
2031 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
2032 hw->config.filter_def[idx] = 0;
2033 }
2034
2035 for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
2036 hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
2037 p = ocs_strchr(p, ',');
2038 if (p != NULL) {
2039 p++;
2040 }
2041 }
2042
2043 break;
2044 }
2045 default:
2046 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2047 rc = OCS_HW_RTN_ERROR;
2048 break;
2049 }
2050 return rc;
2051 }
2052 /**
2053 * @ingroup interrupt
2054 * @brief Check for the events associated with the interrupt vector.
2055 *
2056 * @param hw Hardware context.
2057 * @param vector Zero-based interrupt vector number.
2058 *
2059 * @return Returns 0 on success, or a non-zero value on failure.
2060 */
2061 int32_t
2062 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
2063 {
2064 int32_t rc = 0;
2065
2066 if (!hw) {
2067 ocs_log_err(NULL, "HW context NULL?!?\n");
2068 return -1;
2069 }
2070
2071 if (vector > hw->eq_count) {
2072 ocs_log_err(hw->os, "vector %d. max %d\n",
2073 vector, hw->eq_count);
2074 return -1;
2075 }
2076
2077 /*
2078 * The caller should disable interrupts if they wish to prevent us
2079 * from processing during a shutdown. The following states are defined:
2080 * OCS_HW_STATE_UNINITIALIZED - No queues allocated
2081 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2082 * queues are cleared.
2083 * OCS_HW_STATE_ACTIVE - Chip and queues are operational
2084 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2085 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2086 * completions.
2087 */
2088 if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
2089 rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
2090
2091 /* Re-arm queue if there are no entries */
2092 if (rc != 0) {
2093 sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
2094 }
2095 }
2096 return rc;
2097 }
2098
2099 void
2100 ocs_hw_unsol_process_bounce(void *arg)
2101 {
2102 ocs_hw_sequence_t *seq = arg;
2103 ocs_hw_t *hw = seq->hw;
2104
2105 ocs_hw_assert(hw != NULL);
2106 ocs_hw_assert(hw->callback.unsolicited != NULL);
2107
2108 hw->callback.unsolicited(hw->args.unsolicited, seq);
2109 }
2110
2111 int32_t
2112 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
2113 {
2114 hw_eq_t *eq;
2115 int32_t rc = 0;
2116
2117 CPUTRACE("");
2118
2119 /*
2120 * The caller should disable interrupts if they wish to prevent us
2121 * from processing during a shutdown. The following states are defined:
2122 * OCS_HW_STATE_UNINITIALIZED - No queues allocated
2123 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2124 * queues are cleared.
2125 * OCS_HW_STATE_ACTIVE - Chip and queues are operational
2126 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2127 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2128 * completions.
2129 */
2130 if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
2131 return 0;
2132 }
2133
2134 /* Get pointer to hw_eq_t */
2135 eq = hw->hw_eq[vector];
2136
2137 OCS_STAT(eq->use_count++);
2138
2139 rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
2140
2141 return rc;
2142 }
2143
2144 /**
2145 * @ingroup interrupt
2146 * @brief Process events associated with an EQ.
2147 *
2148 * @par Description
2149 * Loop termination:
2150 * @n @n Without a mechanism to terminate the completion processing loop, it
2151 * is possible under some workload conditions for the loop to never terminate
2152 * (or at least take longer than the OS is happy to have an interrupt handler
2153 * or kernel thread context hold a CPU without yielding).
2154 * @n @n The approach taken here is to periodically check how much time
2155 * we have been in this
2156 * processing loop, and if we exceed a predetermined time (multiple seconds), the
2157 * loop is terminated, and ocs_hw_process() returns.
2158 *
2159 * @param hw Hardware context.
2160 * @param eq Pointer to HW EQ object.
2161 * @param max_isr_time_msec Maximum time in msec to stay in this function.
2162 *
2163 * @return Returns 0 on success, or a non-zero value on failure.
2164 */
2165 int32_t
2166 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
2167 {
2168 uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 };
2169 uint32_t done = FALSE;
2170 uint32_t tcheck_count;
2171 time_t tstart;
2172 time_t telapsed;
2173
2174 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2175 tstart = ocs_msectime();
2176
2177 CPUTRACE("");
2178
2179 while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
2180 uint16_t cq_id = 0;
2181 int32_t rc;
2182
2183 rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2184 if (unlikely(rc)) {
2185 if (rc > 0) {
2186 uint32_t i;
2187
2188 /*
2189 * Received a sentinel EQE indicating the EQ is full.
2190 * Process all CQs
2191 */
2192 for (i = 0; i < hw->cq_count; i++) {
2193 ocs_hw_cq_process(hw, hw->hw_cq[i]);
2194 }
2195 continue;
2196 } else {
2197 return rc;
2198 }
2199 } else {
2200 int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
2201 if (likely(index >= 0)) {
2202 ocs_hw_cq_process(hw, hw->hw_cq[index]);
2203 } else {
2204 ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2205 }
2206 }
2207
2208 if (eq->queue->n_posted > (eq->queue->posted_limit)) {
2209 sli_queue_arm(&hw->sli, eq->queue, FALSE);
2210 }
2211
2212 if (tcheck_count && (--tcheck_count == 0)) {
2213 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2214 telapsed = ocs_msectime() - tstart;
2215 if (telapsed >= max_isr_time_msec) {
2216 done = TRUE;
2217 }
2218 }
2219 }
2220 sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
2221
2222 return 0;
2223 }
2224
2225 /**
2226 * @brief Submit queued (pending) mbx commands.
2227 *
2228 * @par Description
2229 * Submit queued mailbox commands.
2230 * --- Assumes that hw->cmd_lock is held ---
2231 *
2232 * @param hw Hardware context.
2233 *
2234 * @return Returns 0 on success, or a negative error code value on failure.
2235 */
2236 static int32_t
2237 ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
2238 {
2239 ocs_command_ctx_t *ctx;
2240 int32_t rc = 0;
2241
2242 /* Assumes lock held */
2243
2244 /* Only submit MQE if there's room */
2245 while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
2246 ctx = ocs_list_remove_head(&hw->cmd_pending);
2247 if (ctx == NULL) {
2248 break;
2249 }
2250 ocs_list_add_tail(&hw->cmd_head, ctx);
2251 hw->cmd_head_count++;
2252 if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
2253 ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
2254 rc = -1;
2255 break;
2256 }
2257 }
2258 return rc;
2259 }
2260
2261 /**
2262 * @ingroup io
2263 * @brief Issue a SLI command.
2264 *
2265 * @par Description
2266 * Send a mailbox command to the hardware, and either wait for a completion
2267 * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
2268 *
2269 * @param hw Hardware context.
2270 * @param cmd Buffer containing a formatted command and results.
2271 * @param opts Command options:
2272 * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
2273 * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
2274 * @param cb Function callback used for asynchronous mode. May be NULL.
2275 * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
2276 * @n @n @b Note: If the
2277 * callback function pointer is NULL, the results of the command are silently
2278 * discarded, allowing this pointer to exist solely on the stack.
2279 * @param arg Argument passed to an asynchronous callback.
2280 *
2281 * @return Returns 0 on success, or a non-zero value on failure.
2282 */
2283 ocs_hw_rtn_e
2284 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
2285 {
2286 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2287
2288 /*
2289 * If the chip is in an error state (UE'd) then reject this mailbox
2290 * command.
2291 */
2292 if (sli_fw_error_status(&hw->sli) > 0) {
2293 uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
2294 uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
2295 if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
2296 hw->expiration_logged = 1;
2297 ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
2298 hw->watchdog_timeout);
2299 }
2300 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2301 ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
2302 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
2303 err1, err2);
2304
2305 return OCS_HW_RTN_ERROR;
2306 }
2307
2308 if (OCS_CMD_POLL == opts) {
2309 ocs_lock(&hw->cmd_lock);
2310 if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
2311 /*
2312 * Can't issue Boot-strap mailbox command with other
2313 * mail-queue commands pending as this interaction is
2314 * undefined
2315 */
2316 rc = OCS_HW_RTN_ERROR;
2317 } else {
2318 void *bmbx = hw->sli.bmbx.virt;
2319
2320 ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
2321 ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
2322
2323 if (sli_bmbx_command(&hw->sli) == 0) {
2324 rc = OCS_HW_RTN_SUCCESS;
2325 ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
2326 }
2327 }
2328 ocs_unlock(&hw->cmd_lock);
2329 } else if (OCS_CMD_NOWAIT == opts) {
2330 ocs_command_ctx_t *ctx = NULL;
2331
2332 ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
2333 if (!ctx) {
2334 ocs_log_err(hw->os, "can't allocate command context\n");
2335 return OCS_HW_RTN_NO_RESOURCES;
2336 }
2337
2338 if (hw->state != OCS_HW_STATE_ACTIVE) {
2339 ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
2340 ocs_free(hw->os, ctx, sizeof(*ctx));
2341 return OCS_HW_RTN_ERROR;
2342 }
2343
2344 if (cb) {
2345 ctx->cb = cb;
2346 ctx->arg = arg;
2347 }
2348 ctx->buf = cmd;
2349 ctx->ctx = hw;
2350
2351 ocs_lock(&hw->cmd_lock);
2352
2353 /* Add to pending list */
2354 ocs_list_add_tail(&hw->cmd_pending, ctx);
2355
2356 /* Submit as much of the pending list as we can */
2357 if (ocs_hw_cmd_submit_pending(hw) == 0) {
2358 rc = OCS_HW_RTN_SUCCESS;
2359 }
2360
2361 ocs_unlock(&hw->cmd_lock);
2362 }
2363
2364 return rc;
2365 }
2366
2367 /**
2368 * @ingroup devInitShutdown
2369 * @brief Register a callback for the given event.
2370 *
2371 * @param hw Hardware context.
2372 * @param which Event of interest.
2373 * @param func Function to call when the event occurs.
2374 * @param arg Argument passed to the callback function.
2375 *
2376 * @return Returns 0 on success, or a non-zero value on failure.
2377 */
2378 ocs_hw_rtn_e
2379 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
2380 {
2381
2382 if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
2383 ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
2384 hw, which, func);
2385 return OCS_HW_RTN_ERROR;
2386 }
2387
2388 switch (which) {
2389 case OCS_HW_CB_DOMAIN:
2390 hw->callback.domain = func;
2391 hw->args.domain = arg;
2392 break;
2393 case OCS_HW_CB_PORT:
2394 hw->callback.port = func;
2395 hw->args.port = arg;
2396 break;
2397 case OCS_HW_CB_UNSOLICITED:
2398 hw->callback.unsolicited = func;
2399 hw->args.unsolicited = arg;
2400 break;
2401 case OCS_HW_CB_REMOTE_NODE:
2402 hw->callback.rnode = func;
2403 hw->args.rnode = arg;
2404 break;
2405 case OCS_HW_CB_BOUNCE:
2406 hw->callback.bounce = func;
2407 hw->args.bounce = arg;
2408 break;
2409 default:
2410 ocs_log_test(hw->os, "unknown callback %#x\n", which);
2411 return OCS_HW_RTN_ERROR;
2412 }
2413
2414 return OCS_HW_RTN_SUCCESS;
2415 }
2416
2417 /**
2418 * @ingroup port
2419 * @brief Allocate a port object.
2420 *
2421 * @par Description
2422 * This function allocates a VPI object for the port and stores it in the
2423 * indicator field of the port object.
2424 *
2425 * @param hw Hardware context.
2426 * @param sport SLI port object used to connect to the domain.
2427 * @param domain Domain object associated with this port (may be NULL).
2428 * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
2429 *
2430 * @return Returns 0 on success, or a non-zero value on failure.
2431 */
2432 ocs_hw_rtn_e
2433 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
2434 uint8_t *wwpn)
2435 {
2436 uint8_t *cmd = NULL;
2437 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2438 uint32_t index;
2439
2440 sport->indicator = UINT32_MAX;
2441 sport->hw = hw;
2442 sport->ctx.app = sport;
2443 sport->sm_free_req_pending = 0;
2444
2445 /*
2446 * Check if the chip is in an error state (UE'd) before proceeding.
2447 */
2448 if (sli_fw_error_status(&hw->sli) > 0) {
2449 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2450 return OCS_HW_RTN_ERROR;
2451 }
2452
2453 if (wwpn) {
2454 ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
2455 }
2456
2457 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
2458 ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
2459 return OCS_HW_RTN_ERROR;
2460 }
2461
2462 if (domain != NULL) {
2463 ocs_sm_function_t next = NULL;
2464
2465 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2466 if (!cmd) {
2467 ocs_log_err(hw->os, "command memory allocation failed\n");
2468 rc = OCS_HW_RTN_NO_MEMORY;
2469 goto ocs_hw_port_alloc_out;
2470 }
2471
2472 /* If the WWPN is NULL, fetch the default WWPN and WWNN before
2473 * initializing the VPI
2474 */
2475 if (!wwpn) {
2476 next = __ocs_hw_port_alloc_read_sparm64;
2477 } else {
2478 next = __ocs_hw_port_alloc_init_vpi;
2479 }
2480
2481 ocs_sm_transition(&sport->ctx, next, cmd);
2482 } else if (!wwpn) {
2483 /* This is the convention for the HW, not SLI */
2484 ocs_log_test(hw->os, "need WWN for physical port\n");
2485 rc = OCS_HW_RTN_ERROR;
2486 } else {
2487 /* domain NULL and wwpn non-NULL */
2488 ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
2489 }
2490
2491 ocs_hw_port_alloc_out:
2492 if (rc != OCS_HW_RTN_SUCCESS) {
2493 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2494
2495 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
2496 }
2497
2498 return rc;
2499 }
2500
2501 /**
2502 * @ingroup port
2503 * @brief Attach a physical/virtual SLI port to a domain.
2504 *
2505 * @par Description
2506 * This function registers a previously-allocated VPI with the
2507 * device.
2508 *
2509 * @param hw Hardware context.
2510 * @param sport Pointer to the SLI port object.
2511 * @param fc_id Fibre Channel ID to associate with this port.
2512 *
2513 * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
2514 */
2515 ocs_hw_rtn_e
2516 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
2517 {
2518 uint8_t *buf = NULL;
2519 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2520
2521 if (!hw || !sport) {
2522 ocs_log_err(hw ? hw->os : NULL,
2523 "bad parameter(s) hw=%p sport=%p\n", hw,
2524 sport);
2525 return OCS_HW_RTN_ERROR;
2526 }
2527
2528 /*
2529 * Check if the chip is in an error state (UE'd) before proceeding.
2530 */
2531 if (sli_fw_error_status(&hw->sli) > 0) {
2532 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2533 return OCS_HW_RTN_ERROR;
2534 }
2535
2536 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2537 if (!buf) {
2538 ocs_log_err(hw->os, "no buffer for command\n");
2539 return OCS_HW_RTN_NO_MEMORY;
2540 }
2541
2542 sport->fc_id = fc_id;
2543 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
2544 return rc;
2545 }
2546
2547 /**
2548 * @brief Called when the port control command completes.
2549 *
2550 * @par Description
2551 * We only need to free the mailbox command buffer.
2552 *
2553 * @param hw Hardware context.
2554 * @param status Status field from the mbox completion.
2555 * @param mqe Mailbox response structure.
2556 * @param arg Pointer to a callback function that signals the caller that the command is done.
2557 *
2558 * @return Returns 0.
2559 */
2560 static int32_t
2561 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
2562 {
2563 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2564 return 0;
2565 }
2566
2567 /**
2568 * @ingroup port
2569 * @brief Control a port (initialize, shutdown, or set link configuration).
2570 *
2571 * @par Description
2572 * This function controls a port depending on the @c ctrl parameter:
2573 * - @b OCS_HW_PORT_INIT -
2574 * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
2575 * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
2576 * .
2577 * - @b OCS_HW_PORT_SHUTDOWN -
2578 * Issues the DOWN_LINK command for the specified port.
2579 * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
2580 * .
2581 * - @b OCS_HW_PORT_SET_LINK_CONFIG -
2582 * Sets the link configuration.
2583 *
2584 * @param hw Hardware context.
2585 * @param ctrl Specifies the operation:
2586 * - OCS_HW_PORT_INIT
2587 * - OCS_HW_PORT_SHUTDOWN
2588 * - OCS_HW_PORT_SET_LINK_CONFIG
2589 *
2590 * @param value Operation-specific value.
2591 * - OCS_HW_PORT_INIT - Selective reset AL_PA
2592 * - OCS_HW_PORT_SHUTDOWN - N/A
2593 * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
2594 *
2595 * @param cb Callback function to invoke the following operation.
2596 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2597 * are handled by the OCS_HW_CB_DOMAIN callbacks).
2598 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2599 * completes.
2600 *
2601 * @param arg Callback argument invoked after the command completes.
2602 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2603 * are handled by the OCS_HW_CB_DOMAIN callbacks).
2604 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2605 * completes.
2606 *
2607 * @return Returns 0 on success, or a non-zero value on failure.
2608 */
2609 ocs_hw_rtn_e
2610 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
2611 {
2612 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2613
2614 switch (ctrl) {
2615 case OCS_HW_PORT_INIT:
2616 {
2617 uint8_t *init_link;
2618 uint32_t speed = 0;
2619 uint8_t reset_alpa = 0;
2620
2621 if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
2622 uint8_t *cfg_link;
2623
2624 cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2625 if (cfg_link == NULL) {
2626 ocs_log_err(hw->os, "no buffer for command\n");
2627 return OCS_HW_RTN_NO_MEMORY;
2628 }
2629
2630 if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
2631 rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
2632 ocs_hw_cb_port_control, NULL);
2633 }
2634
2635 if (rc != OCS_HW_RTN_SUCCESS) {
2636 ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
2637 ocs_log_err(hw->os, "CONFIG_LINK failed\n");
2638 break;
2639 }
2640 speed = hw->config.speed;
2641 reset_alpa = (uint8_t)(value & 0xff);
2642 } else {
2643 speed = FC_LINK_SPEED_10G;
2644 }
2645
2646 /*
2647 * Bring link up, unless FW version is not supported
2648 */
2649 if (hw->workaround.fw_version_too_low) {
2650 if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
2651 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
2652 OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
2653 } else {
2654 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
2655 OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
2656 }
2657
2658 return OCS_HW_RTN_ERROR;
2659 }
2660
2661 rc = OCS_HW_RTN_ERROR;
2662
2663 /* Allocate a new buffer for the init_link command */
2664 init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2665 if (init_link == NULL) {
2666 ocs_log_err(hw->os, "no buffer for command\n");
2667 return OCS_HW_RTN_NO_MEMORY;
2668 }
2669
2670 if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
2671 rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
2672 ocs_hw_cb_port_control, NULL);
2673 }
2674 /* Free buffer on error, since no callback is coming */
2675 if (rc != OCS_HW_RTN_SUCCESS) {
2676 ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
2677 ocs_log_err(hw->os, "INIT_LINK failed\n");
2678 }
2679 break;
2680 }
2681 case OCS_HW_PORT_SHUTDOWN:
2682 {
2683 uint8_t *down_link;
2684
2685 down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2686 if (down_link == NULL) {
2687 ocs_log_err(hw->os, "no buffer for command\n");
2688 return OCS_HW_RTN_NO_MEMORY;
2689 }
2690 if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
2691 rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
2692 ocs_hw_cb_port_control, NULL);
2693 }
2694 /* Free buffer on error, since no callback is coming */
2695 if (rc != OCS_HW_RTN_SUCCESS) {
2696 ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
2697 ocs_log_err(hw->os, "DOWN_LINK failed\n");
2698 }
2699 break;
2700 }
2701 case OCS_HW_PORT_SET_LINK_CONFIG:
2702 rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
2703 break;
2704 default:
2705 ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
2706 break;
2707 }
2708
2709 return rc;
2710 }
2711
2712 /**
2713 * @ingroup port
2714 * @brief Free port resources.
2715 *
2716 * @par Description
2717 * Issue the UNREG_VPI command to free the assigned VPI context.
2718 *
2719 * @param hw Hardware context.
2720 * @param sport SLI port object used to connect to the domain.
2721 *
2722 * @return Returns 0 on success, or a non-zero value on failure.
2723 */
2724 ocs_hw_rtn_e
2725 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
2726 {
2727 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2728
2729 if (!hw || !sport) {
2730 ocs_log_err(hw ? hw->os : NULL,
2731 "bad parameter(s) hw=%p sport=%p\n", hw,
2732 sport);
2733 return OCS_HW_RTN_ERROR;
2734 }
2735
2736 /*
2737 * Check if the chip is in an error state (UE'd) before proceeding.
2738 */
2739 if (sli_fw_error_status(&hw->sli) > 0) {
2740 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2741 return OCS_HW_RTN_ERROR;
2742 }
2743
2744 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
2745 return rc;
2746 }
2747
2748 /**
2749 * @ingroup domain
2750 * @brief Allocate a fabric domain object.
2751 *
2752 * @par Description
2753 * This function starts a series of commands needed to connect to the domain, including
2754 * - REG_FCFI
2755 * - INIT_VFI
2756 * - READ_SPARMS
2757 * .
2758 * @b Note: Not all SLI interface types use all of the above commands.
2759 * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
2760 * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
2761 *
2762 * @param hw Hardware context.
2763 * @param domain Pointer to the domain object.
2764 * @param fcf FCF index.
2765 * @param vlan VLAN ID.
2766 *
2767 * @return Returns 0 on success, or a non-zero value on failure.
2768 */
2769 ocs_hw_rtn_e
2770 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
2771 {
2772 uint8_t *cmd = NULL;
2773 uint32_t index;
2774
2775 if (!hw || !domain || !domain->sport) {
2776 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
2777 hw, domain, domain ? domain->sport : NULL);
2778 return OCS_HW_RTN_ERROR;
2779 }
2780
2781 /*
2782 * Check if the chip is in an error state (UE'd) before proceeding.
2783 */
2784 if (sli_fw_error_status(&hw->sli) > 0) {
2785 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2786 return OCS_HW_RTN_ERROR;
2787 }
2788
2789 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2790 if (!cmd) {
2791 ocs_log_err(hw->os, "command memory allocation failed\n");
2792 return OCS_HW_RTN_NO_MEMORY;
2793 }
2794
2795 domain->dma = hw->domain_dmem;
2796
2797 domain->hw = hw;
2798 domain->sm.app = domain;
2799 domain->fcf = fcf;
2800 domain->fcf_indicator = UINT32_MAX;
2801 domain->vlan_id = vlan;
2802 domain->indicator = UINT32_MAX;
2803
2804 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
2805 ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
2806
2807 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2808
2809 return OCS_HW_RTN_ERROR;
2810 }
2811
2812 ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
2813 return OCS_HW_RTN_SUCCESS;
2814 }
2815
2816 /**
2817 * @ingroup domain
2818 * @brief Attach a SLI port to a domain.
2819 *
2820 * @param hw Hardware context.
2821 * @param domain Pointer to the domain object.
2822 * @param fc_id Fibre Channel ID to associate with this port.
2823 *
2824 * @return Returns 0 on success, or a non-zero value on failure.
2825 */
2826 ocs_hw_rtn_e
2827 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
2828 {
2829 uint8_t *buf = NULL;
2830 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2831
2832 if (!hw || !domain) {
2833 ocs_log_err(hw ? hw->os : NULL,
2834 "bad parameter(s) hw=%p domain=%p\n",
2835 hw, domain);
2836 return OCS_HW_RTN_ERROR;
2837 }
2838
2839 /*
2840 * Check if the chip is in an error state (UE'd) before proceeding.
2841 */
2842 if (sli_fw_error_status(&hw->sli) > 0) {
2843 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2844 return OCS_HW_RTN_ERROR;
2845 }
2846
2847 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2848 if (!buf) {
2849 ocs_log_err(hw->os, "no buffer for command\n");
2850 return OCS_HW_RTN_NO_MEMORY;
2851 }
2852
2853 domain->sport->fc_id = fc_id;
2854 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
2855 return rc;
2856 }
2857
2858 /**
2859 * @ingroup domain
2860 * @brief Free a fabric domain object.
2861 *
2862 * @par Description
2863 * Free both the driver and SLI port resources associated with the domain.
2864 *
2865 * @param hw Hardware context.
2866 * @param domain Pointer to the domain object.
2867 *
2868 * @return Returns 0 on success, or a non-zero value on failure.
2869 */
2870 ocs_hw_rtn_e
2871 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
2872 {
2873 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2874
2875 if (!hw || !domain) {
2876 ocs_log_err(hw ? hw->os : NULL,
2877 "bad parameter(s) hw=%p domain=%p\n",
2878 hw, domain);
2879 return OCS_HW_RTN_ERROR;
2880 }
2881
2882 /*
2883 * Check if the chip is in an error state (UE'd) before proceeding.
2884 */
2885 if (sli_fw_error_status(&hw->sli) > 0) {
2886 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2887 return OCS_HW_RTN_ERROR;
2888 }
2889
2890 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
2891 return rc;
2892 }
2893
2894 /**
2895 * @ingroup domain
2896 * @brief Free a fabric domain object.
2897 *
2898 * @par Description
2899 * Free the driver resources associated with the domain. The difference between
2900 * this call and ocs_hw_domain_free() is that this call assumes resources no longer
2901 * exist on the SLI port, due to a reset or after some error conditions.
2902 *
2903 * @param hw Hardware context.
2904 * @param domain Pointer to the domain object.
2905 *
2906 * @return Returns 0 on success, or a non-zero value on failure.
2907 */
2908 ocs_hw_rtn_e
2909 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
2910 {
2911 if (!hw || !domain) {
2912 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
2913 return OCS_HW_RTN_ERROR;
2914 }
2915
2916 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
2917
2918 return OCS_HW_RTN_SUCCESS;
2919 }
2920
2921 /**
2922 * @ingroup node
2923 * @brief Allocate a remote node object.
2924 *
2925 * @param hw Hardware context.
2926 * @param rnode Allocated remote node object to initialize.
2927 * @param fc_addr FC address of the remote node.
2928 * @param sport SLI port used to connect to remote node.
2929 *
2930 * @return Returns 0 on success, or a non-zero value on failure.
2931 */
2932 ocs_hw_rtn_e
2933 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
2934 ocs_sli_port_t *sport)
2935 {
2936 /* Check for invalid indicator */
2937 if (UINT32_MAX != rnode->indicator) {
2938 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
2939 fc_addr, rnode->indicator);
2940 return OCS_HW_RTN_ERROR;
2941 }
2942
2943 /*
2944 * Check if the chip is in an error state (UE'd) before proceeding.
2945 */
2946 if (sli_fw_error_status(&hw->sli) > 0) {
2947 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2948 return OCS_HW_RTN_ERROR;
2949 }
2950
2951 /* NULL SLI port indicates an unallocated remote node */
2952 rnode->sport = NULL;
2953
2954 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
2955 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
2956 fc_addr);
2957 return OCS_HW_RTN_ERROR;
2958 }
2959
2960 rnode->fc_id = fc_addr;
2961 rnode->sport = sport;
2962
2963 return OCS_HW_RTN_SUCCESS;
2964 }
2965
2966 /**
2967 * @ingroup node
2968 * @brief Update a remote node object with the remote port's service parameters.
2969 *
2970 * @param hw Hardware context.
2971 * @param rnode Allocated remote node object to initialize.
2972 * @param sparms DMA buffer containing the remote port's service parameters.
2973 *
2974 * @return Returns 0 on success, or a non-zero value on failure.
2975 */
2976 ocs_hw_rtn_e
2977 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
2978 {
2979 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2980 uint8_t *buf = NULL;
2981 uint32_t count = 0;
2982
2983 if (!hw || !rnode || !sparms) {
2984 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
2985 hw, rnode, sparms);
2986 return OCS_HW_RTN_ERROR;
2987 }
2988
2989 /*
2990 * Check if the chip is in an error state (UE'd) before proceeding.
2991 */
2992 if (sli_fw_error_status(&hw->sli) > 0) {
2993 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2994 return OCS_HW_RTN_ERROR;
2995 }
2996
2997 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2998 if (!buf) {
2999 ocs_log_err(hw->os, "no buffer for command\n");
3000 return OCS_HW_RTN_NO_MEMORY;
3001 }
3002
3003 /*
3004 * If the attach count is non-zero, this RPI has already been registered.
3005 * Otherwise, register the RPI
3006 */
3007 if (rnode->index == UINT32_MAX) {
3008 ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
3009 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3010 return OCS_HW_RTN_ERROR;
3011 }
3012 count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3013 if (count) {
3014 /*
3015 * Can't attach multiple FC_ID's to a node unless High Login
3016 * Mode is enabled
3017 */
3018 if (sli_get_hlm(&hw->sli) == FALSE) {
3019 ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
3020 sli_get_hlm(&hw->sli), count);
3021 rc = OCS_HW_RTN_SUCCESS;
3022 } else {
3023 rnode->node_group = TRUE;
3024 rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
3025 rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
3026 }
3027 } else {
3028 rnode->node_group = FALSE;
3029
3030 ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
3031 if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
3032 rnode->indicator, rnode->sport->indicator,
3033 sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
3034 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
3035 ocs_hw_cb_node_attach, rnode);
3036 }
3037 }
3038
3039 if (count || rc) {
3040 if (rc < OCS_HW_RTN_SUCCESS) {
3041 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3042 ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
3043 }
3044 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3045 }
3046
3047 return rc;
3048 }
3049
3050 /**
3051 * @ingroup node
3052 * @brief Free a remote node resource.
3053 *
3054 * @param hw Hardware context.
3055 * @param rnode Remote node object to free.
3056 *
3057 * @return Returns 0 on success, or a non-zero value on failure.
3058 */
3059 ocs_hw_rtn_e
3060 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3061 {
3062 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
3063
3064 if (!hw || !rnode) {
3065 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3066 hw, rnode);
3067 return OCS_HW_RTN_ERROR;
3068 }
3069
3070 if (rnode->sport) {
3071 if (!rnode->attached) {
3072 if (rnode->indicator != UINT32_MAX) {
3073 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3074 ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
3075 rnode->indicator, rnode->fc_id);
3076 rc = OCS_HW_RTN_ERROR;
3077 } else {
3078 rnode->node_group = FALSE;
3079 rnode->indicator = UINT32_MAX;
3080 rnode->index = UINT32_MAX;
3081 rnode->free_group = FALSE;
3082 }
3083 }
3084 } else {
3085 ocs_log_err(hw->os, "Error: rnode is still attached\n");
3086 rc = OCS_HW_RTN_ERROR;
3087 }
3088 }
3089
3090 return rc;
3091 }
3092
3093 /**
3094 * @ingroup node
3095 * @brief Free a remote node object.
3096 *
3097 * @param hw Hardware context.
3098 * @param rnode Remote node object to free.
3099 *
3100 * @return Returns 0 on success, or a non-zero value on failure.
3101 */
3102 ocs_hw_rtn_e
3103 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3104 {
3105 uint8_t *buf = NULL;
3106 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC;
3107 uint32_t index = UINT32_MAX;
3108
3109 if (!hw || !rnode) {
3110 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3111 hw, rnode);
3112 return OCS_HW_RTN_ERROR;
3113 }
3114
3115 /*
3116 * Check if the chip is in an error state (UE'd) before proceeding.
3117 */
3118 if (sli_fw_error_status(&hw->sli) > 0) {
3119 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3120 return OCS_HW_RTN_ERROR;
3121 }
3122
3123 index = rnode->index;
3124
3125 if (rnode->sport) {
3126 uint32_t count = 0;
3127 uint32_t fc_id;
3128
3129 if (!rnode->attached) {
3130 return OCS_HW_RTN_SUCCESS_SYNC;
3131 }
3132
3133 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3134 if (!buf) {
3135 ocs_log_err(hw->os, "no buffer for command\n");
3136 return OCS_HW_RTN_NO_MEMORY;
3137 }
3138
3139 count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
3140
3141 if (count <= 1) {
3142 /* There are no other references to this RPI
3143 * so unregister it and free the resource. */
3144 fc_id = UINT32_MAX;
3145 rnode->node_group = FALSE;
3146 rnode->free_group = TRUE;
3147 } else {
3148 if (sli_get_hlm(&hw->sli) == FALSE) {
3149 ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
3150 count);
3151 }
3152 fc_id = rnode->fc_id & 0x00ffffff;
3153 }
3154
3155 rc = OCS_HW_RTN_ERROR;
3156
3157 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
3158 SLI_RSRC_FCOE_RPI, fc_id)) {
3159 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
3160 }
3161
3162 if (rc != OCS_HW_RTN_SUCCESS) {
3163 ocs_log_err(hw->os, "UNREG_RPI failed\n");
3164 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3165 rc = OCS_HW_RTN_ERROR;
3166 }
3167 }
3168
3169 return rc;
3170 }
3171
3172 /**
3173 * @ingroup node
3174 * @brief Free all remote node objects.
3175 *
3176 * @param hw Hardware context.
3177 *
3178 * @return Returns 0 on success, or a non-zero value on failure.
3179 */
3180 ocs_hw_rtn_e
3181 ocs_hw_node_free_all(ocs_hw_t *hw)
3182 {
3183 uint8_t *buf = NULL;
3184 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
3185
3186 if (!hw) {
3187 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
3188 return OCS_HW_RTN_ERROR;
3189 }
3190
3191 /*
3192 * Check if the chip is in an error state (UE'd) before proceeding.
3193 */
3194 if (sli_fw_error_status(&hw->sli) > 0) {
3195 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3196 return OCS_HW_RTN_ERROR;
3197 }
3198
3199 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3200 if (!buf) {
3201 ocs_log_err(hw->os, "no buffer for command\n");
3202 return OCS_HW_RTN_NO_MEMORY;
3203 }
3204
3205 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
3206 SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
3207 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
3208 NULL);
3209 }
3210
3211 if (rc != OCS_HW_RTN_SUCCESS) {
3212 ocs_log_err(hw->os, "UNREG_RPI failed\n");
3213 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3214 rc = OCS_HW_RTN_ERROR;
3215 }
3216
3217 return rc;
3218 }
3219
3220 ocs_hw_rtn_e
3221 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3222 {
3223
3224 if (!hw || !ngroup) {
3225 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3226 hw, ngroup);
3227 return OCS_HW_RTN_ERROR;
3228 }
3229
3230 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
3231 &ngroup->index)) {
3232 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
3233 ngroup->indicator);
3234 return OCS_HW_RTN_ERROR;
3235 }
3236
3237 return OCS_HW_RTN_SUCCESS;
3238 }
3239
3240 ocs_hw_rtn_e
3241 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
3242 {
3243
3244 if (!hw || !ngroup || !rnode) {
3245 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
3246 hw, ngroup, rnode);
3247 return OCS_HW_RTN_ERROR;
3248 }
3249
3250 if (rnode->attached) {
3251 ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
3252 rnode->indicator, rnode->fc_id);
3253 return OCS_HW_RTN_ERROR;
3254 }
3255
3256 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3257 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3258 rnode->indicator);
3259 return OCS_HW_RTN_ERROR;
3260 }
3261
3262 rnode->indicator = ngroup->indicator;
3263 rnode->index = ngroup->index;
3264
3265 return OCS_HW_RTN_SUCCESS;
3266 }
3267
3268 ocs_hw_rtn_e
3269 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3270 {
3271 int ref;
3272
3273 if (!hw || !ngroup) {
3274 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3275 hw, ngroup);
3276 return OCS_HW_RTN_ERROR;
3277 }
3278
3279 ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
3280 if (ref) {
3281 /* Hmmm, the reference count is non-zero */
3282 ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
3283 ref, ngroup->indicator);
3284
3285 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
3286 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3287 ngroup->indicator);
3288 return OCS_HW_RTN_ERROR;
3289 }
3290
3291 ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
3292 }
3293
3294 ngroup->indicator = UINT32_MAX;
3295 ngroup->index = UINT32_MAX;
3296
3297 return OCS_HW_RTN_SUCCESS;
3298 }
3299
3300 /**
3301 * @brief Initialize IO fields on each free call.
3302 *
3303 * @n @b Note: This is done on each free call (as opposed to each
3304 * alloc call) because port-owned XRIs are not
3305 * allocated with ocs_hw_io_alloc() but are freed with this
3306 * function.
3307 *
3308 * @param io Pointer to HW IO.
3309 */
3310 static inline void
3311 ocs_hw_init_free_io(ocs_hw_io_t *io)
3312 {
3313 /*
3314 * Set io->done to NULL, to avoid any callbacks, should
3315 * a completion be received for one of these IOs
3316 */
3317 io->done = NULL;
3318 io->abort_done = NULL;
3319 io->status_saved = 0;
3320 io->abort_in_progress = FALSE;
3321 io->port_owned_abort_count = 0;
3322 io->rnode = NULL;
3323 io->type = 0xFFFF;
3324 io->wq = NULL;
3325 io->ul_io = NULL;
3326 io->tgt_wqe_timeout = 0;
3327 }
3328
3329 /**
3330 * @ingroup io
3331 * @brief Lockless allocate a HW IO object.
3332 *
3333 * @par Description
3334 * Assume that hw->ocs_lock is held. This function is only used if
3335 * use_dif_sec_xri workaround is being used.
3336 *
3337 * @param hw Hardware context.
3338 *
3339 * @return Returns a pointer to an object on success, or NULL on failure.
3340 */
3341 static inline ocs_hw_io_t *
3342 _ocs_hw_io_alloc(ocs_hw_t *hw)
3343 {
3344 ocs_hw_io_t *io = NULL;
3345
3346 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
3347 ocs_list_add_tail(&hw->io_inuse, io);
3348 io->state = OCS_HW_IO_STATE_INUSE;
3349 io->quarantine = FALSE;
3350 io->quarantine_first_phase = TRUE;
3351 io->abort_reqtag = UINT32_MAX;
3352 ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
3353 } else {
3354 ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
3355 }
3356
3357 return io;
3358 }
3359 /**
3360 * @ingroup io
3361 * @brief Allocate a HW IO object.
3362 *
3363 * @par Description
3364 * @n @b Note: This function applies to non-port owned XRIs
3365 * only.
3366 *
3367 * @param hw Hardware context.
3368 *
3369 * @return Returns a pointer to an object on success, or NULL on failure.
3370 */
3371 ocs_hw_io_t *
3372 ocs_hw_io_alloc(ocs_hw_t *hw)
3373 {
3374 ocs_hw_io_t *io = NULL;
3375
3376 ocs_lock(&hw->io_lock);
3377 io = _ocs_hw_io_alloc(hw);
3378 ocs_unlock(&hw->io_lock);
3379
3380 return io;
3381 }
3382
3383 /**
3384 * @ingroup io
3385 * @brief Allocate/Activate a port owned HW IO object.
3386 *
3387 * @par Description
3388 * This function is called by the transport layer when an XRI is
3389 * allocated by the SLI-Port. This will "activate" the HW IO
3390 * associated with the XRI received from the SLI-Port to mirror
3391 * the state of the XRI.
3392 * @n @n @b Note: This function applies to port owned XRIs only.
3393 *
3394 * @param hw Hardware context.
3395 * @param io Pointer HW IO to activate/allocate.
3396 *
3397 * @return Returns a pointer to an object on success, or NULL on failure.
3398 */
3399 ocs_hw_io_t *
3400 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
3401 {
3402 if (ocs_ref_read_count(&io->ref) > 0) {
3403 ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
3404 return NULL;
3405 }
3406
3407 if (io->wq != NULL) {
3408 ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
3409 return NULL;
3410 }
3411
3412 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3413 io->xbusy = TRUE;
3414
3415 return io;
3416 }
3417
3418 /**
3419 * @ingroup io
3420 * @brief When an IO is freed, depending on the exchange busy flag, and other
3421 * workarounds, move it to the correct list.
3422 *
3423 * @par Description
3424 * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
3425 * from the busy or wait_free list.
3426 *
3427 * @param hw Hardware context.
3428 * @param io Pointer to the IO object to move.
3429 */
3430 static void
3431 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
3432 {
3433 if (io->xbusy) {
3434 /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
3435 ocs_list_add_tail(&hw->io_wait_free, io);
3436 io->state = OCS_HW_IO_STATE_WAIT_FREE;
3437 } else {
3438 /* IO not busy, add to free list */
3439 ocs_list_add_tail(&hw->io_free, io);
3440 io->state = OCS_HW_IO_STATE_FREE;
3441 }
3442
3443 /* BZ 161832 workaround */
3444 if (hw->workaround.use_dif_sec_xri) {
3445 ocs_hw_check_sec_hio_list(hw);
3446 }
3447 }
3448
3449 /**
3450 * @ingroup io
3451 * @brief Free a HW IO object. Perform cleanup common to
3452 * port and host-owned IOs.
3453 *
3454 * @param hw Hardware context.
3455 * @param io Pointer to the HW IO object.
3456 */
3457 static inline void
3458 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
3459 {
3460 /* initialize IO fields */
3461 ocs_hw_init_free_io(io);
3462
3463 /* Restore default SGL */
3464 ocs_hw_io_restore_sgl(hw, io);
3465 }
3466
3467 /**
3468 * @ingroup io
3469 * @brief Free a HW IO object associated with a port-owned XRI.
3470 *
3471 * @param arg Pointer to the HW IO object.
3472 */
3473 static void
3474 ocs_hw_io_free_port_owned(void *arg)
3475 {
3476 ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3477 ocs_hw_t *hw = io->hw;
3478
3479 /*
3480 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
3481 * waiting for buffers.
3482 */
3483 if (io->auto_xfer_rdy_dnrx) {
3484 ocs_lock(&hw->io_lock);
3485 /* take a reference count because we still own the IO until the buffer is posted */
3486 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3487 ocs_list_add_tail(&hw->io_port_dnrx, io);
3488 ocs_unlock(&hw->io_lock);
3489 }
3490
3491 /* perform common cleanup */
3492 ocs_hw_io_free_common(hw, io);
3493 }
3494
3495 /**
3496 * @ingroup io
3497 * @brief Free a previously-allocated HW IO object. Called when
3498 * IO refcount goes to zero (host-owned IOs only).
3499 *
3500 * @param arg Pointer to the HW IO object.
3501 */
3502 static void
3503 ocs_hw_io_free_internal(void *arg)
3504 {
3505 ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3506 ocs_hw_t *hw = io->hw;
3507
3508 /* perform common cleanup */
3509 ocs_hw_io_free_common(hw, io);
3510
3511 ocs_lock(&hw->io_lock);
3512 /* remove from in-use list */
3513 ocs_list_remove(&hw->io_inuse, io);
3514 ocs_hw_io_free_move_correct_list(hw, io);
3515 ocs_unlock(&hw->io_lock);
3516 }
3517
3518 /**
3519 * @ingroup io
3520 * @brief Free a previously-allocated HW IO object.
3521 *
3522 * @par Description
3523 * @n @b Note: This function applies to port and host owned XRIs.
3524 *
3525 * @param hw Hardware context.
3526 * @param io Pointer to the HW IO object.
3527 *
3528 * @return Returns a non-zero value if HW IO was freed, 0 if references
3529 * on the IO still exist, or a negative value if an error occurred.
3530 */
3531 int32_t
3532 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
3533 {
3534 /* just put refcount */
3535 if (ocs_ref_read_count(&io->ref) <= 0) {
3536 ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
3537 io->indicator, io->reqtag);
3538 return -1;
3539 }
3540
3541 return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
3542 }
3543
3544 /**
3545 * @ingroup io
3546 * @brief Check if given HW IO is in-use
3547 *
3548 * @par Description
3549 * This function returns TRUE if the given HW IO has been
3550 * allocated and is in-use, and FALSE otherwise. It applies to
3551 * port and host owned XRIs.
3552 *
3553 * @param hw Hardware context.
3554 * @param io Pointer to the HW IO object.
3555 *
3556 * @return TRUE if an IO is in use, or FALSE otherwise.
3557 */
3558 uint8_t
3559 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
3560 {
3561 return (ocs_ref_read_count(&io->ref) > 0);
3562 }
3563
3564 /**
3565 * @brief Write a HW IO to a work queue.
3566 *
3567 * @par Description
3568 * A HW IO is written to a work queue.
3569 *
3570 * @param wq Pointer to work queue.
3571 * @param wqe Pointer to WQ entry.
3572 *
3573 * @n @b Note: Assumes the SLI-4 queue lock is held.
3574 *
3575 * @return Returns 0 on success, or a negative error code value on failure.
3576 */
3577 static int32_t
3578 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3579 {
3580 int32_t rc;
3581 int32_t queue_rc;
3582
3583 /* Every so often, set the wqec bit to generate comsummed completions */
3584 if (wq->wqec_count) {
3585 wq->wqec_count--;
3586 }
3587 if (wq->wqec_count == 0) {
3588 sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3589 genwqe->wqec = 1;
3590 wq->wqec_count = wq->wqec_set_count;
3591 }
3592
3593 /* Decrement WQ free count */
3594 wq->free_count--;
3595
3596 queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3597
3598 if (queue_rc < 0) {
3599 rc = -1;
3600 } else {
3601 rc = 0;
3602 ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3603 }
3604
3605 return rc;
3606 }
3607
3608 /**
3609 * @brief Write a HW IO to a work queue.
3610 *
3611 * @par Description
3612 * A HW IO is written to a work queue.
3613 *
3614 * @param wq Pointer to work queue.
3615 * @param wqe Pointer to WQE entry.
3616 *
3617 * @n @b Note: Takes the SLI-4 queue lock.
3618 *
3619 * @return Returns 0 on success, or a negative error code value on failure.
3620 */
3621 int32_t
3622 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3623 {
3624 int32_t rc = 0;
3625
3626 sli_queue_lock(wq->queue);
3627 if ( ! ocs_list_empty(&wq->pending_list)) {
3628 ocs_list_add_tail(&wq->pending_list, wqe);
3629 OCS_STAT(wq->wq_pending_count++;)
3630 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3631 rc = _hw_wq_write(wq, wqe);
3632 if (rc < 0) {
3633 break;
3634 }
3635 if (wqe->abort_wqe_submit_needed) {
3636 wqe->abort_wqe_submit_needed = 0;
3637 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3638 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
3639 ocs_list_add_tail(&wq->pending_list, wqe);
3640 OCS_STAT(wq->wq_pending_count++;)
3641 }
3642 }
3643 } else {
3644 if (wq->free_count > 0) {
3645 rc = _hw_wq_write(wq, wqe);
3646 } else {
3647 ocs_list_add_tail(&wq->pending_list, wqe);
3648 OCS_STAT(wq->wq_pending_count++;)
3649 }
3650 }
3651
3652 sli_queue_unlock(wq->queue);
3653
3654 return rc;
3655
3656 }
3657
3658 /**
3659 * @brief Update free count and submit any pending HW IOs
3660 *
3661 * @par Description
3662 * The WQ free count is updated, and any pending HW IOs are submitted that
3663 * will fit in the queue.
3664 *
3665 * @param wq Pointer to work queue.
3666 * @param update_free_count Value added to WQs free count.
3667 *
3668 * @return None.
3669 */
3670 static void
3671 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
3672 {
3673 ocs_hw_wqe_t *wqe;
3674
3675 sli_queue_lock(wq->queue);
3676
3677 /* Update free count with value passed in */
3678 wq->free_count += update_free_count;
3679
3680 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3681 _hw_wq_write(wq, wqe);
3682
3683 if (wqe->abort_wqe_submit_needed) {
3684 wqe->abort_wqe_submit_needed = 0;
3685 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3686 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
3687 ocs_list_add_tail(&wq->pending_list, wqe);
3688 OCS_STAT(wq->wq_pending_count++;)
3689 }
3690 }
3691
3692 sli_queue_unlock(wq->queue);
3693 }
3694
3695 /**
3696 * @brief Check to see if there are any BZ 161832 workaround waiting IOs
3697 *
3698 * @par Description
3699 * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
3700 * to allocate a secondary HW io, and dispatch it.
3701 *
3702 * @n @b Note: hw->io_lock MUST be taken when called.
3703 *
3704 * @param hw pointer to HW object
3705 *
3706 * @return none
3707 */
3708 static void
3709 ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
3710 {
3711 ocs_hw_io_t *io;
3712 ocs_hw_io_t *sec_io;
3713 int rc = 0;
3714
3715 while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
3716 uint16_t flags;
3717
3718 sec_io = _ocs_hw_io_alloc(hw);
3719 if (sec_io == NULL) {
3720 break;
3721 }
3722
3723 io = ocs_list_remove_head(&hw->sec_hio_wait_list);
3724 ocs_list_add_tail(&hw->io_inuse, io);
3725 io->state = OCS_HW_IO_STATE_INUSE;
3726 io->sec_hio = sec_io;
3727
3728 /* mark secondary XRI for second and subsequent data phase as quarantine */
3729 if (io->xbusy) {
3730 sec_io->quarantine = TRUE;
3731 }
3732
3733 flags = io->sec_iparam.fcp_tgt.flags;
3734 if (io->xbusy) {
3735 flags |= SLI4_IO_CONTINUATION;
3736 } else {
3737 flags &= ~SLI4_IO_CONTINUATION;
3738 }
3739
3740 io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
3741
3742 /* Complete (continue) TRECV IO */
3743 if (io->xbusy) {
3744 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3745 io->first_data_sge,
3746 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
3747 io->reqtag, SLI4_CQ_DEFAULT,
3748 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3749 flags,
3750 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3751 ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3752 break;
3753 }
3754 } else {
3755 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3756 io->first_data_sge,
3757 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
3758 io->reqtag, SLI4_CQ_DEFAULT,
3759 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3760 flags,
3761 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
3762 io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3763 ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3764 break;
3765 }
3766 }
3767
3768 if (io->wq == NULL) {
3769 io->wq = ocs_hw_queue_next_wq(hw, io);
3770 ocs_hw_assert(io->wq != NULL);
3771 }
3772 io->xbusy = TRUE;
3773
3774 /*
3775 * Add IO to active io wqe list before submitting, in case the
3776 * wcqe processing preempts this thread.
3777 */
3778 ocs_hw_add_io_timed_wqe(hw, io);
3779 rc = hw_wq_write(io->wq, &io->wqe);
3780 if (rc >= 0) {
3781 /* non-negative return is success */
3782 rc = 0;
3783 } else {
3784 /* failed to write wqe, remove from active wqe list */
3785 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3786 io->xbusy = FALSE;
3787 ocs_hw_remove_io_timed_wqe(hw, io);
3788 }
3789 }
3790 }
3791
3792 /**
3793 * @ingroup io
3794 * @brief Send a Single Request/Response Sequence (SRRS).
3795 *
3796 * @par Description
3797 * This routine supports communication sequences consisting of a single
3798 * request and single response between two endpoints. Examples include:
3799 * - Sending an ELS request.
3800 * - Sending an ELS response - To send an ELS reponse, the caller must provide
3801 * the OX_ID from the received request.
3802 * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
3803 * the caller must provide the R_CTL, TYPE, and DF_CTL
3804 * values to place in the FC frame header.
3805 * .
3806 * @n @b Note: The caller is expected to provide both send and receive
3807 * buffers for requests. In the case of sending a response, no receive buffer
3808 * is necessary and the caller may pass in a NULL pointer.
3809 *
3810 * @param hw Hardware context.
3811 * @param type Type of sequence (ELS request/response, FC-CT).
3812 * @param io Previously-allocated HW IO object.
3813 * @param send DMA memory holding data to send (for example, ELS request, BLS response).
3814 * @param len Length, in bytes, of data to send.
3815 * @param receive Optional DMA memory to hold a response.
3816 * @param rnode Destination of data (that is, a remote node).
3817 * @param iparam IO parameters (ELS response and FC-CT).
3818 * @param cb Function call upon completion of sending the data (may be NULL).
3819 * @param arg Argument to pass to IO completion function.
3820 *
3821 * @return Returns 0 on success, or a non-zero on failure.
3822 */
3823 ocs_hw_rtn_e
3824 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
3825 ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
3826 ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
3827 ocs_hw_srrs_cb_t cb, void *arg)
3828 {
3829 sli4_sge_t *sge = NULL;
3830 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
3831 uint16_t local_flags = 0;
3832
3833 if (!hw || !io || !rnode || !iparam) {
3834 ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
3835 hw, io, send, receive, rnode, iparam);
3836 return OCS_HW_RTN_ERROR;
3837 }
3838
3839 if (hw->state != OCS_HW_STATE_ACTIVE) {
3840 ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
3841 return OCS_HW_RTN_ERROR;
3842 }
3843
3844 if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
3845 /* We must set the XC bit for port owned XRIs */
3846 local_flags |= SLI4_IO_CONTINUATION;
3847 }
3848 io->rnode = rnode;
3849 io->type = type;
3850 io->done = cb;
3851 io->arg = arg;
3852
3853 sge = io->sgl->virt;
3854
3855 /* clear both SGE */
3856 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
3857
3858 if (send) {
3859 sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
3860 sge[0].buffer_address_low = ocs_addr32_lo(send->phys);
3861 sge[0].sge_type = SLI4_SGE_TYPE_DATA;
3862 sge[0].buffer_length = len;
3863 }
3864
3865 if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
3866 sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
3867 sge[1].buffer_address_low = ocs_addr32_lo(receive->phys);
3868 sge[1].sge_type = SLI4_SGE_TYPE_DATA;
3869 sge[1].buffer_length = receive->size;
3870 sge[1].last = TRUE;
3871 } else {
3872 sge[0].last = TRUE;
3873 }
3874
3875 switch (type) {
3876 case OCS_HW_ELS_REQ:
3877 if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
3878 *((uint8_t *)(send->virt)), /* req_type */
3879 len, receive->size,
3880 iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
3881 ocs_log_err(hw->os, "REQ WQE error\n");
3882 rc = OCS_HW_RTN_ERROR;
3883 }
3884 break;
3885 case OCS_HW_ELS_RSP:
3886 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3887 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3888 iparam->els.ox_id,
3889 rnode, local_flags, UINT32_MAX)) {
3890 ocs_log_err(hw->os, "RSP WQE error\n");
3891 rc = OCS_HW_RTN_ERROR;
3892 }
3893 break;
3894 case OCS_HW_ELS_RSP_SID:
3895 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3896 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3897 iparam->els_sid.ox_id,
3898 rnode, local_flags, iparam->els_sid.s_id)) {
3899 ocs_log_err(hw->os, "RSP (SID) WQE error\n");
3900 rc = OCS_HW_RTN_ERROR;
3901 }
3902 break;
3903 case OCS_HW_FC_CT:
3904 if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3905 receive->size, iparam->fc_ct.timeout, io->indicator,
3906 io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
3907 iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
3908 ocs_log_err(hw->os, "GEN WQE error\n");
3909 rc = OCS_HW_RTN_ERROR;
3910 }
3911 break;
3912 case OCS_HW_FC_CT_RSP:
3913 if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3914 iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
3915 io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
3916 iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
3917 ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
3918 rc = OCS_HW_RTN_ERROR;
3919 }
3920 break;
3921 case OCS_HW_BLS_ACC:
3922 case OCS_HW_BLS_RJT:
3923 {
3924 sli_bls_payload_t bls;
3925
3926 if (OCS_HW_BLS_ACC == type) {
3927 bls.type = SLI_BLS_ACC;
3928 ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
3929 } else {
3930 bls.type = SLI_BLS_RJT;
3931 ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
3932 }
3933
3934 bls.ox_id = iparam->bls.ox_id;
3935 bls.rx_id = iparam->bls.rx_id;
3936
3937 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3938 io->indicator, io->reqtag,
3939 SLI4_CQ_DEFAULT,
3940 rnode, UINT32_MAX)) {
3941 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
3942 rc = OCS_HW_RTN_ERROR;
3943 }
3944 break;
3945 }
3946 case OCS_HW_BLS_ACC_SID:
3947 {
3948 sli_bls_payload_t bls;
3949
3950 bls.type = SLI_BLS_ACC;
3951 ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
3952
3953 bls.ox_id = iparam->bls_sid.ox_id;
3954 bls.rx_id = iparam->bls_sid.rx_id;
3955
3956 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3957 io->indicator, io->reqtag,
3958 SLI4_CQ_DEFAULT,
3959 rnode, iparam->bls_sid.s_id)) {
3960 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
3961 rc = OCS_HW_RTN_ERROR;
3962 }
3963 break;
3964 }
3965 case OCS_HW_BCAST:
3966 if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3967 iparam->bcast.timeout, io->indicator, io->reqtag,
3968 SLI4_CQ_DEFAULT, rnode,
3969 iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
3970 ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
3971 rc = OCS_HW_RTN_ERROR;
3972 }
3973 break;
3974 default:
3975 ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
3976 rc = OCS_HW_RTN_ERROR;
3977 }
3978
3979 if (OCS_HW_RTN_SUCCESS == rc) {
3980 if (io->wq == NULL) {
3981 io->wq = ocs_hw_queue_next_wq(hw, io);
3982 ocs_hw_assert(io->wq != NULL);
3983 }
3984 io->xbusy = TRUE;
3985
3986 /*
3987 * Add IO to active io wqe list before submitting, in case the
3988 * wcqe processing preempts this thread.
3989 */
3990 OCS_STAT(io->wq->use_count++);
3991 ocs_hw_add_io_timed_wqe(hw, io);
3992 rc = hw_wq_write(io->wq, &io->wqe);
3993 if (rc >= 0) {
3994 /* non-negative return is success */
3995 rc = 0;
3996 } else {
3997 /* failed to write wqe, remove from active wqe list */
3998 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3999 io->xbusy = FALSE;
4000 ocs_hw_remove_io_timed_wqe(hw, io);
4001 }
4002 }
4003
4004 return rc;
4005 }
4006
4007 /**
4008 * @ingroup io
4009 * @brief Send a read, write, or response IO.
4010 *
4011 * @par Description
4012 * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
4013 * as a target or initiator. Examples include:
4014 * - Sending read data and good response (target).
4015 * - Sending a response (target with no data or after receiving write data).
4016 * .
4017 * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
4018 * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
4019 *
4020 * @param hw Hardware context.
4021 * @param type Type of IO (target read, target response, and so on).
4022 * @param io Previously-allocated HW IO object.
4023 * @param len Length, in bytes, of data to send.
4024 * @param iparam IO parameters.
4025 * @param rnode Destination of data (that is, a remote node).
4026 * @param cb Function call upon completion of sending data (may be NULL).
4027 * @param arg Argument to pass to IO completion function.
4028 *
4029 * @return Returns 0 on success, or a non-zero value on failure.
4030 *
4031 * @todo
4032 * - Support specifiying relative offset.
4033 * - Use a WQ other than 0.
4034 */
4035 ocs_hw_rtn_e
4036 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
4037 uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
4038 void *cb, void *arg)
4039 {
4040 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
4041 uint32_t rpi;
4042 uint8_t send_wqe = TRUE;
4043
4044 CPUTRACE("");
4045
4046 if (!hw || !io || !rnode || !iparam) {
4047 ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
4048 hw, io, iparam, rnode);
4049 return OCS_HW_RTN_ERROR;
4050 }
4051
4052 if (hw->state != OCS_HW_STATE_ACTIVE) {
4053 ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
4054 return OCS_HW_RTN_ERROR;
4055 }
4056
4057 rpi = rnode->indicator;
4058
4059 if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
4060 rpi = hw->workaround.unregistered_rid;
4061 ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
4062 }
4063
4064 /*
4065 * Save state needed during later stages
4066 */
4067 io->rnode = rnode;
4068 io->type = type;
4069 io->done = cb;
4070 io->arg = arg;
4071
4072 /*
4073 * Format the work queue entry used to send the IO
4074 */
4075 switch (type) {
4076 case OCS_HW_IO_INITIATOR_READ:
4077 /*
4078 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
4079 * initiator read IO for quarantine
4080 */
4081 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4082 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4083 io->quarantine = TRUE;
4084 }
4085
4086 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4087 iparam->fcp_ini.rsp);
4088
4089 if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
4090 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
4091 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4092 iparam->fcp_ini.timeout)) {
4093 ocs_log_err(hw->os, "IREAD WQE error\n");
4094 rc = OCS_HW_RTN_ERROR;
4095 }
4096 break;
4097 case OCS_HW_IO_INITIATOR_WRITE:
4098 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4099 iparam->fcp_ini.rsp);
4100
4101 if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4102 len, iparam->fcp_ini.first_burst,
4103 io->indicator, io->reqtag,
4104 SLI4_CQ_DEFAULT, rpi, rnode,
4105 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4106 iparam->fcp_ini.timeout)) {
4107 ocs_log_err(hw->os, "IWRITE WQE error\n");
4108 rc = OCS_HW_RTN_ERROR;
4109 }
4110 break;
4111 case OCS_HW_IO_INITIATOR_NODATA:
4112 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4113 iparam->fcp_ini.rsp);
4114
4115 if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
4116 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
4117 rpi, rnode, iparam->fcp_ini.timeout)) {
4118 ocs_log_err(hw->os, "ICMND WQE error\n");
4119 rc = OCS_HW_RTN_ERROR;
4120 }
4121 break;
4122 case OCS_HW_IO_TARGET_WRITE: {
4123 uint16_t flags = iparam->fcp_tgt.flags;
4124 fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
4125
4126 /*
4127 * Fill in the XFER_RDY for IF_TYPE 0 devices
4128 */
4129 *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
4130 *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
4131 *((uint32_t *)xfer->rsvd) = 0;
4132
4133 if (io->xbusy) {
4134 flags |= SLI4_IO_CONTINUATION;
4135 } else {
4136 flags &= ~SLI4_IO_CONTINUATION;
4137 }
4138
4139 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4140
4141 /*
4142 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
4143 * then mark the target write IO for quarantine
4144 */
4145 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4146 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4147 io->quarantine = TRUE;
4148 }
4149
4150 /*
4151 * BZ 161832 Workaround:
4152 * Check for use_dif_sec_xri workaround. Note, even though the first dataphase
4153 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
4154 * potential for deadlock where all XRI's are allocated as primaries to IOs that
4155 * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first
4156 * data phase, it is marked for quarantine.
4157 */
4158 if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4159 /*
4160 * If we have allocated a chained SGL for skyhawk, then
4161 * we can re-use this for the sec_hio.
4162 */
4163 if (io->ovfl_io != NULL) {
4164 io->sec_hio = io->ovfl_io;
4165 io->sec_hio->quarantine = TRUE;
4166 } else {
4167 io->sec_hio = ocs_hw_io_alloc(hw);
4168 }
4169 if (io->sec_hio == NULL) {
4170 /* Failed to allocate, so save full request context and put
4171 * this IO on the wait list
4172 */
4173 io->sec_iparam = *iparam;
4174 io->sec_len = len;
4175 ocs_lock(&hw->io_lock);
4176 ocs_list_remove(&hw->io_inuse, io);
4177 ocs_list_add_tail(&hw->sec_hio_wait_list, io);
4178 io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
4179 hw->sec_hio_wait_count++;
4180 ocs_unlock(&hw->io_lock);
4181 send_wqe = FALSE;
4182 /* Done */
4183 break;
4184 }
4185 /* We quarantine the secondary IO if this is the second or subsequent data phase */
4186 if (io->xbusy) {
4187 io->sec_hio->quarantine = TRUE;
4188 }
4189 }
4190
4191 /*
4192 * If not the first data phase, and io->sec_hio has been allocated, then issue
4193 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
4194 */
4195 if (io->xbusy && (io->sec_hio != NULL)) {
4196 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4197 iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
4198 io->reqtag, SLI4_CQ_DEFAULT,
4199 iparam->fcp_tgt.ox_id, rpi, rnode,
4200 flags,
4201 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4202 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4203 ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4204 rc = OCS_HW_RTN_ERROR;
4205 }
4206 } else {
4207 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4208 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4209 SLI4_CQ_DEFAULT,
4210 iparam->fcp_tgt.ox_id, rpi, rnode,
4211 flags,
4212 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4213 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4214 ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4215 rc = OCS_HW_RTN_ERROR;
4216 }
4217 }
4218 break;
4219 }
4220 case OCS_HW_IO_TARGET_READ: {
4221 uint16_t flags = iparam->fcp_tgt.flags;
4222
4223 if (io->xbusy) {
4224 flags |= SLI4_IO_CONTINUATION;
4225 } else {
4226 flags &= ~SLI4_IO_CONTINUATION;
4227 }
4228
4229 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4230 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4231 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4232 SLI4_CQ_DEFAULT,
4233 iparam->fcp_tgt.ox_id, rpi, rnode,
4234 flags,
4235 iparam->fcp_tgt.dif_oper,
4236 iparam->fcp_tgt.blk_size,
4237 iparam->fcp_tgt.cs_ctl,
4238 iparam->fcp_tgt.app_id)) {
4239 ocs_log_err(hw->os, "TSEND WQE error\n");
4240 rc = OCS_HW_RTN_ERROR;
4241 } else if (hw->workaround.retain_tsend_io_length) {
4242 io->length = len;
4243 }
4244 break;
4245 }
4246 case OCS_HW_IO_TARGET_RSP: {
4247 uint16_t flags = iparam->fcp_tgt.flags;
4248
4249 if (io->xbusy) {
4250 flags |= SLI4_IO_CONTINUATION;
4251 } else {
4252 flags &= ~SLI4_IO_CONTINUATION;
4253 }
4254
4255 /* post a new auto xfer ready buffer */
4256 if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
4257 if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
4258 flags |= SLI4_IO_DNRX;
4259 }
4260 }
4261
4262 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4263 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
4264 &io->def_sgl,
4265 len,
4266 io->indicator, io->reqtag,
4267 SLI4_CQ_DEFAULT,
4268 iparam->fcp_tgt.ox_id,
4269 rpi, rnode,
4270 flags, iparam->fcp_tgt.cs_ctl,
4271 io->is_port_owned,
4272 iparam->fcp_tgt.app_id)) {
4273 ocs_log_err(hw->os, "TRSP WQE error\n");
4274 rc = OCS_HW_RTN_ERROR;
4275 }
4276
4277 break;
4278 }
4279 default:
4280 ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4281 rc = OCS_HW_RTN_ERROR;
4282 }
4283
4284 if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
4285 if (io->wq == NULL) {
4286 io->wq = ocs_hw_queue_next_wq(hw, io);
4287 ocs_hw_assert(io->wq != NULL);
4288 }
4289
4290 io->xbusy = TRUE;
4291
4292 /*
4293 * Add IO to active io wqe list before submitting, in case the
4294 * wcqe processing preempts this thread.
4295 */
4296 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
4297 OCS_STAT(io->wq->use_count++);
4298 ocs_hw_add_io_timed_wqe(hw, io);
4299 rc = hw_wq_write(io->wq, &io->wqe);
4300 if (rc >= 0) {
4301 /* non-negative return is success */
4302 rc = 0;
4303 } else {
4304 /* failed to write wqe, remove from active wqe list */
4305 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4306 io->xbusy = FALSE;
4307 ocs_hw_remove_io_timed_wqe(hw, io);
4308 }
4309 }
4310
4311 return rc;
4312 }
4313
4314 /**
4315 * @brief Send a raw frame
4316 *
4317 * @par Description
4318 * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
4319 *
4320 * @param hw Pointer to HW object.
4321 * @param hdr Pointer to a little endian formatted FC header.
4322 * @param sof Value to use as the frame SOF.
4323 * @param eof Value to use as the frame EOF.
4324 * @param payload Pointer to payload DMA buffer.
4325 * @param ctx Pointer to caller provided send frame context.
4326 * @param callback Callback function.
4327 * @param arg Callback function argument.
4328 *
4329 * @return Returns 0 on success, or a negative error code value on failure.
4330 */
4331 ocs_hw_rtn_e
4332 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
4333 ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
4334 {
4335 int32_t rc;
4336 ocs_hw_wqe_t *wqe;
4337 uint32_t xri;
4338 hw_wq_t *wq;
4339
4340 wqe = &ctx->wqe;
4341
4342 /* populate the callback object */
4343 ctx->hw = hw;
4344
4345 /* Fetch and populate request tag */
4346 ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
4347 if (ctx->wqcb == NULL) {
4348 ocs_log_err(hw->os, "can't allocate request tag\n");
4349 return OCS_HW_RTN_NO_RESOURCES;
4350 }
4351
4352 /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
4353 wq = ocs_varray_iter_next(hw->wq_class_array[1]);
4354 if (wq == NULL) {
4355 wq = hw->hw_wq[0];
4356 }
4357
4358 /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
4359 xri = wq->send_frame_io->indicator;
4360
4361 /* Build the send frame WQE */
4362 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
4363 payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
4364 if (rc) {
4365 ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
4366 return OCS_HW_RTN_ERROR;
4367 }
4368
4369 /* Write to WQ */
4370 rc = hw_wq_write(wq, wqe);
4371 if (rc) {
4372 ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
4373 return OCS_HW_RTN_ERROR;
4374 }
4375
4376 OCS_STAT(wq->use_count++);
4377
4378 return OCS_HW_RTN_SUCCESS;
4379 }
4380
4381 ocs_hw_rtn_e
4382 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
4383 {
4384 if (sli_get_sgl_preregister(&hw->sli)) {
4385 ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
4386 return OCS_HW_RTN_ERROR;
4387 }
4388 io->ovfl_sgl = sgl;
4389 io->ovfl_sgl_count = sgl_count;
4390 io->ovfl_io = NULL;
4391
4392 return OCS_HW_RTN_SUCCESS;
4393 }
4394
4395 static void
4396 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4397 {
4398 /* Restore the default */
4399 io->sgl = &io->def_sgl;
4400 io->sgl_count = io->def_sgl_count;
4401
4402 /*
4403 * For skyhawk, we need to free the IO allocated for the chained
4404 * SGL. For all devices, clear the overflow fields on the IO.
4405 *
4406 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
4407 * the chained SGLs. If so, then we clear the ovfl_io field
4408 * when the sec_hio is freed.
4409 */
4410 if (io->ovfl_io != NULL) {
4411 ocs_hw_io_free(hw, io->ovfl_io);
4412 io->ovfl_io = NULL;
4413 }
4414
4415 /* Clear the overflow SGL */
4416 io->ovfl_sgl = NULL;
4417 io->ovfl_sgl_count = 0;
4418 io->ovfl_lsp = NULL;
4419 }
4420
4421 /**
4422 * @ingroup io
4423 * @brief Initialize the scatter gather list entries of an IO.
4424 *
4425 * @param hw Hardware context.
4426 * @param io Previously-allocated HW IO object.
4427 * @param type Type of IO (target read, target response, and so on).
4428 *
4429 * @return Returns 0 on success, or a non-zero value on failure.
4430 */
4431 ocs_hw_rtn_e
4432 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
4433 {
4434 sli4_sge_t *data = NULL;
4435 uint32_t i = 0;
4436 uint32_t skips = 0;
4437
4438 if (!hw || !io) {
4439 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
4440 hw, io);
4441 return OCS_HW_RTN_ERROR;
4442 }
4443
4444 /* Clear / reset the scatter-gather list */
4445 io->sgl = &io->def_sgl;
4446 io->sgl_count = io->def_sgl_count;
4447 io->first_data_sge = 0;
4448
4449 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
4450 io->n_sge = 0;
4451 io->sge_offset = 0;
4452
4453 io->type = type;
4454
4455 data = io->sgl->virt;
4456
4457 /*
4458 * Some IO types have underlying hardware requirements on the order
4459 * of SGEs. Process all special entries here.
4460 */
4461 switch (type) {
4462 case OCS_HW_IO_INITIATOR_READ:
4463 case OCS_HW_IO_INITIATOR_WRITE:
4464 case OCS_HW_IO_INITIATOR_NODATA:
4465 /*
4466 * No skips, 2 special for initiator I/Os
4467 * The addresses and length are written later
4468 */
4469 /* setup command pointer */
4470 data->sge_type = SLI4_SGE_TYPE_DATA;
4471 data++;
4472
4473 /* setup response pointer */
4474 data->sge_type = SLI4_SGE_TYPE_DATA;
4475
4476 if (OCS_HW_IO_INITIATOR_NODATA == type) {
4477 data->last = TRUE;
4478 }
4479 data++;
4480
4481 io->n_sge = 2;
4482 break;
4483 case OCS_HW_IO_TARGET_WRITE:
4484 #define OCS_TARGET_WRITE_SKIPS 2
4485 skips = OCS_TARGET_WRITE_SKIPS;
4486
4487 /* populate host resident XFER_RDY buffer */
4488 data->sge_type = SLI4_SGE_TYPE_DATA;
4489 data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
4490 data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys);
4491 data->buffer_length = io->xfer_rdy.size;
4492 data++;
4493
4494 skips--;
4495
4496 io->n_sge = 1;
4497 break;
4498 case OCS_HW_IO_TARGET_READ:
4499 /*
4500 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
4501 */
4502 #define OCS_TARGET_READ_SKIPS 2
4503 skips = OCS_TARGET_READ_SKIPS;
4504 break;
4505 case OCS_HW_IO_TARGET_RSP:
4506 /*
4507 * No skips, etc. for FCP_TRSP64
4508 */
4509 break;
4510 default:
4511 ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4512 return OCS_HW_RTN_ERROR;
4513 }
4514
4515 /*
4516 * Write skip entries
4517 */
4518 for (i = 0; i < skips; i++) {
4519 data->sge_type = SLI4_SGE_TYPE_SKIP;
4520 data++;
4521 }
4522
4523 io->n_sge += skips;
4524
4525 /*
4526 * Set last
4527 */
4528 data->last = TRUE;
4529
4530 return OCS_HW_RTN_SUCCESS;
4531 }
4532
4533 /**
4534 * @ingroup io
4535 * @brief Add a T10 PI seed scatter gather list entry.
4536 *
4537 * @param hw Hardware context.
4538 * @param io Previously-allocated HW IO object.
4539 * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
4540 *
4541 * @return Returns 0 on success, or a non-zero value on failure.
4542 */
4543 ocs_hw_rtn_e
4544 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
4545 {
4546 sli4_sge_t *data = NULL;
4547 sli4_diseed_sge_t *dif_seed;
4548
4549 /* If no dif_info, or dif_oper is disabled, then just return success */
4550 if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
4551 return OCS_HW_RTN_SUCCESS;
4552 }
4553
4554 if (!hw || !io) {
4555 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
4556 hw, io, dif_info);
4557 return OCS_HW_RTN_ERROR;
4558 }
4559
4560 data = io->sgl->virt;
4561 data += io->n_sge;
4562
4563 /* If we are doing T10 DIF add the DIF Seed SGE */
4564 ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
4565 dif_seed = (sli4_diseed_sge_t *)data;
4566 dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
4567 dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
4568 dif_seed->app_tag_repl = dif_info->app_tag_repl;
4569 dif_seed->repl_app_tag = dif_info->repl_app_tag;
4570 if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
4571 dif_seed->atrt = dif_info->disable_app_ref_ffff;
4572 dif_seed->at = dif_info->disable_app_ffff;
4573 }
4574 dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
4575 /* Workaround for SKH (BZ157233) */
4576 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4577 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
4578 dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
4579 }
4580
4581 dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
4582 dif_seed->dif_blk_size = dif_info->blk_size;
4583 dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
4584 dif_seed->check_app_tag = dif_info->check_app_tag;
4585 dif_seed->check_ref_tag = dif_info->check_ref_tag;
4586 dif_seed->check_crc = dif_info->check_guard;
4587 dif_seed->new_ref_tag = dif_info->repl_ref_tag;
4588
4589 switch(dif_info->dif_oper) {
4590 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
4591 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4592 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4593 break;
4594 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
4595 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4596 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4597 break;
4598 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
4599 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4600 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4601 break;
4602 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
4603 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4604 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4605 break;
4606 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
4607 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4608 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4609 break;
4610 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
4611 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4612 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4613 break;
4614 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
4615 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4616 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4617 break;
4618 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
4619 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4620 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4621 break;
4622 case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
4623 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4624 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4625 break;
4626 default:
4627 ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
4628 dif_info->dif_oper);
4629 return OCS_HW_RTN_ERROR;
4630 }
4631
4632 /*
4633 * Set last, clear previous last
4634 */
4635 data->last = TRUE;
4636 if (io->n_sge) {
4637 data[-1].last = FALSE;
4638 }
4639
4640 io->n_sge++;
4641
4642 return OCS_HW_RTN_SUCCESS;
4643 }
4644
4645 static ocs_hw_rtn_e
4646 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4647 {
4648 sli4_lsp_sge_t *lsp;
4649
4650 /* fail if we're already pointing to the overflow SGL */
4651 if (io->sgl == io->ovfl_sgl) {
4652 return OCS_HW_RTN_ERROR;
4653 }
4654
4655 /*
4656 * For skyhawk, we can use another SGL to extend the SGL list. The
4657 * Chained entry must not be in the first 4 entries.
4658 *
4659 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
4660 */
4661 if (sli_get_sgl_preregister(&hw->sli) &&
4662 io->def_sgl_count > 4 &&
4663 io->ovfl_io == NULL &&
4664 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4665 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
4666 io->ovfl_io = ocs_hw_io_alloc(hw);
4667 if (io->ovfl_io != NULL) {
4668 /*
4669 * Note: We can't call ocs_hw_io_register_sgl() here
4670 * because it checks that SGLs are not pre-registered
4671 * and for shyhawk, preregistered SGLs are required.
4672 */
4673 io->ovfl_sgl = &io->ovfl_io->def_sgl;
4674 io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
4675 }
4676 }
4677
4678 /* fail if we don't have an overflow SGL registered */
4679 if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) {
4680 return OCS_HW_RTN_ERROR;
4681 }
4682
4683 /*
4684 * Overflow, we need to put a link SGE in the last location of the current SGL, after
4685 * copying the the last SGE to the overflow SGL
4686 */
4687
4688 ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
4689
4690 lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
4691 ocs_memset(lsp, 0, sizeof(*lsp));
4692
4693 if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4694 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
4695 sli_skh_chain_sge_build(&hw->sli,
4696 (sli4_sge_t*)lsp,
4697 io->ovfl_io->indicator,
4698 0, /* frag_num */
4699 0); /* offset */
4700 } else {
4701 lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
4702 lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys);
4703 lsp->sge_type = SLI4_SGE_TYPE_LSP;
4704 lsp->last = 0;
4705 io->ovfl_lsp = lsp;
4706 io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
4707 }
4708
4709 /* Update the current SGL pointer, and n_sgl */
4710 io->sgl = io->ovfl_sgl;
4711 io->sgl_count = io->ovfl_sgl_count;
4712 io->n_sge = 1;
4713
4714 return OCS_HW_RTN_SUCCESS;
4715 }
4716
4717 /**
4718 * @ingroup io
4719 * @brief Add a scatter gather list entry to an IO.
4720 *
4721 * @param hw Hardware context.
4722 * @param io Previously-allocated HW IO object.
4723 * @param addr Physical address.
4724 * @param length Length of memory pointed to by @c addr.
4725 *
4726 * @return Returns 0 on success, or a non-zero value on failure.
4727 */
4728 ocs_hw_rtn_e
4729 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
4730 {
4731 sli4_sge_t *data = NULL;
4732
4733 if (!hw || !io || !addr || !length) {
4734 ocs_log_err(hw ? hw->os : NULL,
4735 "bad parameter hw=%p io=%p addr=%lx length=%u\n",
4736 hw, io, addr, length);
4737 return OCS_HW_RTN_ERROR;
4738 }
4739
4740 if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
4741 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
4742 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4743 return OCS_HW_RTN_ERROR;
4744 }
4745 }
4746
4747 if (length > sli_get_max_sge(&hw->sli)) {
4748 ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
4749 length, sli_get_max_sge(&hw->sli));
4750 return OCS_HW_RTN_ERROR;
4751 }
4752
4753 data = io->sgl->virt;
4754 data += io->n_sge;
4755
4756 data->sge_type = SLI4_SGE_TYPE_DATA;
4757 data->buffer_address_high = ocs_addr32_hi(addr);
4758 data->buffer_address_low = ocs_addr32_lo(addr);
4759 data->buffer_length = length;
4760 data->data_offset = io->sge_offset;
4761 /*
4762 * Always assume this is the last entry and mark as such.
4763 * If this is not the first entry unset the "last SGE"
4764 * indication for the previous entry
4765 */
4766 data->last = TRUE;
4767 if (io->n_sge) {
4768 data[-1].last = FALSE;
4769 }
4770
4771 /* Set first_data_bde if not previously set */
4772 if (io->first_data_sge == 0) {
4773 io->first_data_sge = io->n_sge;
4774 }
4775
4776 io->sge_offset += length;
4777 io->n_sge++;
4778
4779 /* Update the linked segment length (only executed after overflow has begun) */
4780 if (io->ovfl_lsp != NULL) {
4781 io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
4782 }
4783
4784 return OCS_HW_RTN_SUCCESS;
4785 }
4786
4787 /**
4788 * @ingroup io
4789 * @brief Add a T10 DIF scatter gather list entry to an IO.
4790 *
4791 * @param hw Hardware context.
4792 * @param io Previously-allocated HW IO object.
4793 * @param addr DIF physical address.
4794 *
4795 * @return Returns 0 on success, or a non-zero value on failure.
4796 */
4797 ocs_hw_rtn_e
4798 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
4799 {
4800 sli4_dif_sge_t *data = NULL;
4801
4802 if (!hw || !io || !addr) {
4803 ocs_log_err(hw ? hw->os : NULL,
4804 "bad parameter hw=%p io=%p addr=%lx\n",
4805 hw, io, addr);
4806 return OCS_HW_RTN_ERROR;
4807 }
4808
4809 if ((io->n_sge + 1) > hw->config.n_sgl) {
4810 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
4811 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4812 return OCS_HW_RTN_ERROR;
4813 }
4814 }
4815
4816 data = io->sgl->virt;
4817 data += io->n_sge;
4818
4819 data->sge_type = SLI4_SGE_TYPE_DIF;
4820 /* Workaround for SKH (BZ157233) */
4821 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4822 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
4823 data->sge_type = SLI4_SGE_TYPE_SKIP;
4824 }
4825
4826 data->buffer_address_high = ocs_addr32_hi(addr);
4827 data->buffer_address_low = ocs_addr32_lo(addr);
4828
4829 /*
4830 * Always assume this is the last entry and mark as such.
4831 * If this is not the first entry unset the "last SGE"
4832 * indication for the previous entry
4833 */
4834 data->last = TRUE;
4835 if (io->n_sge) {
4836 data[-1].last = FALSE;
4837 }
4838
4839 io->n_sge++;
4840
4841 return OCS_HW_RTN_SUCCESS;
4842 }
4843
4844 /**
4845 * @ingroup io
4846 * @brief Abort a previously-started IO.
4847 *
4848 * @param hw Hardware context.
4849 * @param io_to_abort The IO to abort.
4850 * @param send_abts Boolean to have the hardware automatically
4851 * generate an ABTS.
4852 * @param cb Function call upon completion of the abort (may be NULL).
4853 * @param arg Argument to pass to abort completion function.
4854 *
4855 * @return Returns 0 on success, or a non-zero value on failure.
4856 */
4857 ocs_hw_rtn_e
4858 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
4859 {
4860 sli4_abort_type_e atype = SLI_ABORT_MAX;
4861 uint32_t id = 0, mask = 0;
4862 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
4863 hw_wq_callback_t *wqcb;
4864
4865 if (!hw || !io_to_abort) {
4866 ocs_log_err(hw ? hw->os : NULL,
4867 "bad parameter hw=%p io=%p\n",
4868 hw, io_to_abort);
4869 return OCS_HW_RTN_ERROR;
4870 }
4871
4872 if (hw->state != OCS_HW_STATE_ACTIVE) {
4873 ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
4874 hw->state);
4875 return OCS_HW_RTN_ERROR;
4876 }
4877
4878 /* take a reference on IO being aborted */
4879 if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
4880 /* command no longer active */
4881 ocs_log_test(hw ? hw->os : NULL,
4882 "io not active xri=0x%x tag=0x%x\n",
4883 io_to_abort->indicator, io_to_abort->reqtag);
4884 return OCS_HW_RTN_IO_NOT_ACTIVE;
4885 }
4886
4887 /* non-port owned XRI checks */
4888 /* Must have a valid WQ reference */
4889 if (io_to_abort->wq == NULL) {
4890 ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
4891 io_to_abort->indicator);
4892 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4893 return OCS_HW_RTN_IO_NOT_ACTIVE;
4894 }
4895
4896 /* Validation checks complete; now check to see if already being aborted */
4897 ocs_lock(&hw->io_abort_lock);
4898 if (io_to_abort->abort_in_progress) {
4899 ocs_unlock(&hw->io_abort_lock);
4900 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4901 ocs_log_debug(hw ? hw->os : NULL,
4902 "io already being aborted xri=0x%x tag=0x%x\n",
4903 io_to_abort->indicator, io_to_abort->reqtag);
4904 return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
4905 }
4906
4907 /*
4908 * This IO is not already being aborted. Set flag so we won't try to
4909 * abort it again. After all, we only have one abort_done callback.
4910 */
4911 io_to_abort->abort_in_progress = 1;
4912 ocs_unlock(&hw->io_abort_lock);
4913
4914 /*
4915 * If we got here, the possibilities are:
4916 * - host owned xri
4917 * - io_to_abort->wq_index != UINT32_MAX
4918 * - submit ABORT_WQE to same WQ
4919 * - port owned xri:
4920 * - rxri: io_to_abort->wq_index == UINT32_MAX
4921 * - submit ABORT_WQE to any WQ
4922 * - non-rxri
4923 * - io_to_abort->index != UINT32_MAX
4924 * - submit ABORT_WQE to same WQ
4925 * - io_to_abort->index == UINT32_MAX
4926 * - submit ABORT_WQE to any WQ
4927 */
4928 io_to_abort->abort_done = cb;
4929 io_to_abort->abort_arg = arg;
4930
4931 atype = SLI_ABORT_XRI;
4932 id = io_to_abort->indicator;
4933
4934 /* Allocate a request tag for the abort portion of this IO */
4935 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
4936 if (wqcb == NULL) {
4937 ocs_log_err(hw->os, "can't allocate request tag\n");
4938 return OCS_HW_RTN_NO_RESOURCES;
4939 }
4940 io_to_abort->abort_reqtag = wqcb->instance_index;
4941
4942 /*
4943 * If the wqe is on the pending list, then set this wqe to be
4944 * aborted when the IO's wqe is removed from the list.
4945 */
4946 if (io_to_abort->wq != NULL) {
4947 sli_queue_lock(io_to_abort->wq->queue);
4948 if (ocs_list_on_list(&io_to_abort->wqe.link)) {
4949 io_to_abort->wqe.abort_wqe_submit_needed = 1;
4950 io_to_abort->wqe.send_abts = send_abts;
4951 io_to_abort->wqe.id = id;
4952 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
4953 sli_queue_unlock(io_to_abort->wq->queue);
4954 return 0;
4955 }
4956 sli_queue_unlock(io_to_abort->wq->queue);
4957 }
4958
4959 if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
4960 io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
4961 ocs_log_err(hw->os, "ABORT WQE error\n");
4962 io_to_abort->abort_reqtag = UINT32_MAX;
4963 ocs_hw_reqtag_free(hw, wqcb);
4964 rc = OCS_HW_RTN_ERROR;
4965 }
4966
4967 if (OCS_HW_RTN_SUCCESS == rc) {
4968 if (io_to_abort->wq == NULL) {
4969 io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
4970 ocs_hw_assert(io_to_abort->wq != NULL);
4971 }
4972 /* ABORT_WQE does not actually utilize an XRI on the Port,
4973 * therefore, keep xbusy as-is to track the exchange's state,
4974 * not the ABORT_WQE's state
4975 */
4976 rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
4977 if (rc > 0) {
4978 /* non-negative return is success */
4979 rc = 0;
4980 /* can't abort an abort so skip adding to timed wqe list */
4981 }
4982 }
4983
4984 if (OCS_HW_RTN_SUCCESS != rc) {
4985 ocs_lock(&hw->io_abort_lock);
4986 io_to_abort->abort_in_progress = 0;
4987 ocs_unlock(&hw->io_abort_lock);
4988 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4989 }
4990 return rc;
4991 }
4992
4993 /**
4994 * @ingroup io
4995 * @brief Return the OX_ID/RX_ID of the IO.
4996 *
4997 * @param hw Hardware context.
4998 * @param io HW IO object.
4999 *
5000 * @return Returns X_ID on success, or -1 on failure.
5001 */
5002 int32_t
5003 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
5004 {
5005 if (!hw || !io) {
5006 ocs_log_err(hw ? hw->os : NULL,
5007 "bad parameter hw=%p io=%p\n", hw, io);
5008 return -1;
5009 }
5010
5011 return io->indicator;
5012 }
5013
5014 typedef struct ocs_hw_fw_write_cb_arg {
5015 ocs_hw_fw_cb_t cb;
5016 void *arg;
5017 } ocs_hw_fw_write_cb_arg_t;
5018
5019 typedef struct ocs_hw_sfp_cb_arg {
5020 ocs_hw_sfp_cb_t cb;
5021 void *arg;
5022 ocs_dma_t payload;
5023 } ocs_hw_sfp_cb_arg_t;
5024
5025 typedef struct ocs_hw_temp_cb_arg {
5026 ocs_hw_temp_cb_t cb;
5027 void *arg;
5028 } ocs_hw_temp_cb_arg_t;
5029
5030 typedef struct ocs_hw_link_stat_cb_arg {
5031 ocs_hw_link_stat_cb_t cb;
5032 void *arg;
5033 } ocs_hw_link_stat_cb_arg_t;
5034
5035 typedef struct ocs_hw_host_stat_cb_arg {
5036 ocs_hw_host_stat_cb_t cb;
5037 void *arg;
5038 } ocs_hw_host_stat_cb_arg_t;
5039
5040 typedef struct ocs_hw_dump_get_cb_arg {
5041 ocs_hw_dump_get_cb_t cb;
5042 void *arg;
5043 void *mbox_cmd;
5044 } ocs_hw_dump_get_cb_arg_t;
5045
5046 typedef struct ocs_hw_dump_clear_cb_arg {
5047 ocs_hw_dump_clear_cb_t cb;
5048 void *arg;
5049 void *mbox_cmd;
5050 } ocs_hw_dump_clear_cb_arg_t;
5051
5052 /**
5053 * @brief Write a portion of a firmware image to the device.
5054 *
5055 * @par Description
5056 * Calls the correct firmware write function based on the device type.
5057 *
5058 * @param hw Hardware context.
5059 * @param dma DMA structure containing the firmware image chunk.
5060 * @param size Size of the firmware image chunk.
5061 * @param offset Offset, in bytes, from the beginning of the firmware image.
5062 * @param last True if this is the last chunk of the image.
5063 * Causes the image to be committed to flash.
5064 * @param cb Pointer to a callback function that is called when the command completes.
5065 * The callback function prototype is
5066 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5067 * @param arg Pointer to be passed to the callback function.
5068 *
5069 * @return Returns 0 on success, or a non-zero value on failure.
5070 */
5071 ocs_hw_rtn_e
5072 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5073 {
5074 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
5075 return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
5076 } else {
5077 /* Write firmware_write for BE3/Skyhawk not supported */
5078 return -1;
5079 }
5080 }
5081
5082 /**
5083 * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
5084 *
5085 * @par Description
5086 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
5087 * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
5088 * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
5089 * and to signal the caller that the write has completed.
5090 *
5091 * @param hw Hardware context.
5092 * @param dma DMA structure containing the firmware image chunk.
5093 * @param size Size of the firmware image chunk.
5094 * @param offset Offset, in bytes, from the beginning of the firmware image.
5095 * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
5096 * @param cb Pointer to a callback function that is called when the command completes.
5097 * The callback function prototype is
5098 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5099 * @param arg Pointer to be passed to the callback function.
5100 *
5101 * @return Returns 0 on success, or a non-zero value on failure.
5102 */
5103 ocs_hw_rtn_e
5104 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5105 {
5106 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5107 uint8_t *mbxdata;
5108 ocs_hw_fw_write_cb_arg_t *cb_arg;
5109 int noc=0; /* No Commit bit - set to 1 for testing */
5110
5111 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
5112 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
5113 return OCS_HW_RTN_ERROR;
5114 }
5115
5116 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5117 if (mbxdata == NULL) {
5118 ocs_log_err(hw->os, "failed to malloc mbox\n");
5119 return OCS_HW_RTN_NO_MEMORY;
5120 }
5121
5122 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
5123 if (cb_arg == NULL) {
5124 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5125 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5126 return OCS_HW_RTN_NO_MEMORY;
5127 }
5128
5129 cb_arg->cb = cb;
5130 cb_arg->arg = arg;
5131
5132 if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
5133 size, offset, "/prg/", dma)) {
5134 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
5135 }
5136
5137 if (rc != OCS_HW_RTN_SUCCESS) {
5138 ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
5139 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5140 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5141 }
5142
5143 return rc;
5144
5145 }
5146
5147 /**
5148 * @brief Called when the WRITE OBJECT command completes.
5149 *
5150 * @par Description
5151 * Get the number of bytes actually written out of the response, free the mailbox
5152 * that was malloc'd by ocs_hw_firmware_write(),
5153 * then call the callback and pass the status and bytes written.
5154 *
5155 * @param hw Hardware context.
5156 * @param status Status field from the mbox completion.
5157 * @param mqe Mailbox response structure.
5158 * @param arg Pointer to a callback function that signals the caller that the command is done.
5159 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
5160 *
5161 * @return Returns 0.
5162 */
5163 static int32_t
5164 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5165 {
5166
5167 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
5168 sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
5169 ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
5170 uint32_t bytes_written;
5171 uint16_t mbox_status;
5172 uint32_t change_status;
5173
5174 bytes_written = wr_obj_rsp->actual_write_length;
5175 mbox_status = mbox_rsp->hdr.status;
5176 change_status = wr_obj_rsp->change_status;
5177
5178 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5179
5180 if (cb_arg) {
5181 if (cb_arg->cb) {
5182 if ((status == 0) && mbox_status) {
5183 status = mbox_status;
5184 }
5185 cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
5186 }
5187
5188 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5189 }
5190
5191 return 0;
5192
5193 }
5194
5195 /**
5196 * @brief Called when the READ_TRANSCEIVER_DATA command completes.
5197 *
5198 * @par Description
5199 * Get the number of bytes read out of the response, free the mailbox that was malloc'd
5200 * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
5201 *
5202 * @param hw Hardware context.
5203 * @param status Status field from the mbox completion.
5204 * @param mqe Mailbox response structure.
5205 * @param arg Pointer to a callback function that signals the caller that the command is done.
5206 * The callback function prototype is
5207 * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
5208 *
5209 * @return Returns 0.
5210 */
5211 static int32_t
5212 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5213 {
5214
5215 ocs_hw_sfp_cb_arg_t *cb_arg = arg;
5216 ocs_dma_t *payload = NULL;
5217 sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
5218 uint32_t bytes_written;
5219
5220 if (cb_arg) {
5221 payload = &(cb_arg->payload);
5222 if (cb_arg->cb) {
5223 mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
5224 bytes_written = mbox_rsp->hdr.response_length;
5225 if ((status == 0) && mbox_rsp->hdr.status) {
5226 status = mbox_rsp->hdr.status;
5227 }
5228 cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
5229 }
5230
5231 ocs_dma_free(hw->os, &cb_arg->payload);
5232 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5233 }
5234
5235 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5236 return 0;
5237 }
5238
5239 /**
5240 * @ingroup io
5241 * @brief Function to retrieve the SFP information.
5242 *
5243 * @param hw Hardware context.
5244 * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
5245 * @param cb Function call upon completion of sending the data (may be NULL).
5246 * @param arg Argument to pass to IO completion function.
5247 *
5248 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5249 */
5250 ocs_hw_rtn_e
5251 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
5252 {
5253 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5254 ocs_hw_sfp_cb_arg_t *cb_arg;
5255 uint8_t *mbxdata;
5256
5257 /* mbxdata holds the header of the command */
5258 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5259 if (mbxdata == NULL) {
5260 ocs_log_err(hw->os, "failed to malloc mbox\n");
5261 return OCS_HW_RTN_NO_MEMORY;
5262 }
5263
5264 /* cb_arg holds the data that will be passed to the callback on completion */
5265 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
5266 if (cb_arg == NULL) {
5267 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5268 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5269 return OCS_HW_RTN_NO_MEMORY;
5270 }
5271
5272 cb_arg->cb = cb;
5273 cb_arg->arg = arg;
5274
5275 /* payload holds the non-embedded portion */
5276 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
5277 OCS_MIN_DMA_ALIGNMENT)) {
5278 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
5279 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5280 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5281 return OCS_HW_RTN_NO_MEMORY;
5282 }
5283
5284 /* Send the HW command */
5285 if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
5286 &cb_arg->payload)) {
5287 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
5288 }
5289
5290 if (rc != OCS_HW_RTN_SUCCESS) {
5291 ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
5292 rc);
5293 ocs_dma_free(hw->os, &cb_arg->payload);
5294 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5295 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5296 }
5297
5298 return rc;
5299 }
5300
5301 /**
5302 * @brief Function to retrieve the temperature information.
5303 *
5304 * @param hw Hardware context.
5305 * @param cb Function call upon completion of sending the data (may be NULL).
5306 * @param arg Argument to pass to IO completion function.
5307 *
5308 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5309 */
5310 ocs_hw_rtn_e
5311 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
5312 {
5313 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5314 ocs_hw_temp_cb_arg_t *cb_arg;
5315 uint8_t *mbxdata;
5316
5317 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5318 if (mbxdata == NULL) {
5319 ocs_log_err(hw->os, "failed to malloc mbox");
5320 return OCS_HW_RTN_NO_MEMORY;
5321 }
5322
5323 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
5324 if (cb_arg == NULL) {
5325 ocs_log_err(hw->os, "failed to malloc cb_arg");
5326 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5327 return OCS_HW_RTN_NO_MEMORY;
5328 }
5329
5330 cb_arg->cb = cb;
5331 cb_arg->arg = arg;
5332
5333 if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5334 SLI4_WKI_TAG_SAT_TEM)) {
5335 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
5336 }
5337
5338 if (rc != OCS_HW_RTN_SUCCESS) {
5339 ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
5340 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5341 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5342 }
5343
5344 return rc;
5345 }
5346
5347 /**
5348 * @brief Called when the DUMP command completes.
5349 *
5350 * @par Description
5351 * Get the temperature data out of the response, free the mailbox that was malloc'd
5352 * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
5353 *
5354 * @param hw Hardware context.
5355 * @param status Status field from the mbox completion.
5356 * @param mqe Mailbox response structure.
5357 * @param arg Pointer to a callback function that signals the caller that the command is done.
5358 * The callback function prototype is defined by ocs_hw_temp_cb_t.
5359 *
5360 * @return Returns 0.
5361 */
5362 static int32_t
5363 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5364 {
5365
5366 sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
5367 ocs_hw_temp_cb_arg_t *cb_arg = arg;
5368 uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
5369 uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
5370 uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
5371 uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
5372 uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */
5373 uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */
5374
5375 if (cb_arg) {
5376 if (cb_arg->cb) {
5377 if ((status == 0) && mbox_rsp->hdr.status) {
5378 status = mbox_rsp->hdr.status;
5379 }
5380 cb_arg->cb(status,
5381 curr_temp,
5382 crit_temp_thrshld,
5383 warn_temp_thrshld,
5384 norm_temp_thrshld,
5385 fan_off_thrshld,
5386 fan_on_thrshld,
5387 cb_arg->arg);
5388 }
5389
5390 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5391 }
5392 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5393
5394 return 0;
5395 }
5396
5397 /**
5398 * @brief Function to retrieve the link statistics.
5399 *
5400 * @param hw Hardware context.
5401 * @param req_ext_counters If TRUE, then the extended counters will be requested.
5402 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
5403 * @param clear_all_counters If TRUE, the counters will be cleared.
5404 * @param cb Function call upon completion of sending the data (may be NULL).
5405 * @param arg Argument to pass to IO completion function.
5406 *
5407 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5408 */
5409 ocs_hw_rtn_e
5410 ocs_hw_get_link_stats(ocs_hw_t *hw,
5411 uint8_t req_ext_counters,
5412 uint8_t clear_overflow_flags,
5413 uint8_t clear_all_counters,
5414 ocs_hw_link_stat_cb_t cb,
5415 void *arg)
5416 {
5417 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5418 ocs_hw_link_stat_cb_arg_t *cb_arg;
5419 uint8_t *mbxdata;
5420
5421 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5422 if (mbxdata == NULL) {
5423 ocs_log_err(hw->os, "failed to malloc mbox");
5424 return OCS_HW_RTN_NO_MEMORY;
5425 }
5426
5427 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
5428 if (cb_arg == NULL) {
5429 ocs_log_err(hw->os, "failed to malloc cb_arg");
5430 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5431 return OCS_HW_RTN_NO_MEMORY;
5432 }
5433
5434 cb_arg->cb = cb;
5435 cb_arg->arg = arg;
5436
5437 if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5438 req_ext_counters,
5439 clear_overflow_flags,
5440 clear_all_counters)) {
5441 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
5442 }
5443
5444 if (rc != OCS_HW_RTN_SUCCESS) {
5445 ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
5446 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5447 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5448 }
5449
5450 return rc;
5451 }
5452
5453 /**
5454 * @brief Called when the READ_LINK_STAT command completes.
5455 *
5456 * @par Description
5457 * Get the counters out of the response, free the mailbox that was malloc'd
5458 * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
5459 *
5460 * @param hw Hardware context.
5461 * @param status Status field from the mbox completion.
5462 * @param mqe Mailbox response structure.
5463 * @param arg Pointer to a callback function that signals the caller that the command is done.
5464 * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
5465 *
5466 * @return Returns 0.
5467 */
5468 static int32_t
5469 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5470 {
5471
5472 sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
5473 ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
5474 ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
5475 uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
5476
5477 ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
5478 OCS_HW_LINK_STAT_MAX);
5479
5480 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
5481 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
5482 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
5483 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
5484 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
5485 counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
5486 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
5487 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
5488 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
5489 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
5490 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
5491 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
5492 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
5493 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
5494 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
5495 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
5496 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
5497 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
5498 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
5499 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
5500
5501 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
5502 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
5503 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
5504 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
5505 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
5506 counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
5507 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
5508 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
5509 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
5510 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
5511 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
5512 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
5513 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
5514 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
5515 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
5516 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
5517 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
5518 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
5519 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
5520 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
5521
5522 if (cb_arg) {
5523 if (cb_arg->cb) {
5524 if ((status == 0) && mbox_rsp->hdr.status) {
5525 status = mbox_rsp->hdr.status;
5526 }
5527 cb_arg->cb(status,
5528 num_counters,
5529 counts,
5530 cb_arg->arg);
5531 }
5532
5533 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5534 }
5535 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5536
5537 return 0;
5538 }
5539
5540 /**
5541 * @brief Function to retrieve the link and host statistics.
5542 *
5543 * @param hw Hardware context.
5544 * @param cc clear counters, if TRUE all counters will be cleared.
5545 * @param cb Function call upon completion of receiving the data.
5546 * @param arg Argument to pass to pointer fc hosts statistics structure.
5547 *
5548 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5549 */
5550 ocs_hw_rtn_e
5551 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
5552 {
5553 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5554 ocs_hw_host_stat_cb_arg_t *cb_arg;
5555 uint8_t *mbxdata;
5556
5557 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
5558 if (mbxdata == NULL) {
5559 ocs_log_err(hw->os, "failed to malloc mbox");
5560 return OCS_HW_RTN_NO_MEMORY;
5561 }
5562
5563 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
5564 if (cb_arg == NULL) {
5565 ocs_log_err(hw->os, "failed to malloc cb_arg");
5566 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5567 return OCS_HW_RTN_NO_MEMORY;
5568 }
5569
5570 cb_arg->cb = cb;
5571 cb_arg->arg = arg;
5572
5573 /* Send the HW command to get the host stats */
5574 if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
5575 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
5576 }
5577
5578 if (rc != OCS_HW_RTN_SUCCESS) {
5579 ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
5580 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5581 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5582 }
5583
5584 return rc;
5585 }
5586
5587 /**
5588 * @brief Called when the READ_STATUS command completes.
5589 *
5590 * @par Description
5591 * Get the counters out of the response, free the mailbox that was malloc'd
5592 * by ocs_hw_get_host_stats(), then call the callback and pass
5593 * the status and data.
5594 *
5595 * @param hw Hardware context.
5596 * @param status Status field from the mbox completion.
5597 * @param mqe Mailbox response structure.
5598 * @param arg Pointer to a callback function that signals the caller that the command is done.
5599 * The callback function prototype is defined by
5600 * ocs_hw_host_stat_cb_t.
5601 *
5602 * @return Returns 0.
5603 */
5604 static int32_t
5605 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5606 {
5607
5608 sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
5609 ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
5610 ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
5611 uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
5612
5613 ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
5614 OCS_HW_HOST_STAT_MAX);
5615
5616 counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
5617 counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
5618 counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
5619 counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
5620 counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
5621 counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
5622 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
5623 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
5624 counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
5625 counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
5626 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
5627 counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
5628 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
5629 counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
5630
5631 if (cb_arg) {
5632 if (cb_arg->cb) {
5633 if ((status == 0) && mbox_rsp->hdr.status) {
5634 status = mbox_rsp->hdr.status;
5635 }
5636 cb_arg->cb(status,
5637 num_counters,
5638 counts,
5639 cb_arg->arg);
5640 }
5641
5642 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5643 }
5644 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5645
5646 return 0;
5647 }
5648
5649 /**
5650 * @brief HW link configuration enum to the CLP string value mapping.
5651 *
5652 * This structure provides a mapping from the ocs_hw_linkcfg_e
5653 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5654 * control) to the CLP string that is used
5655 * in the DMTF_CLP_CMD mailbox command.
5656 */
5657 typedef struct ocs_hw_linkcfg_map_s {
5658 ocs_hw_linkcfg_e linkcfg;
5659 const char *clp_str;
5660 } ocs_hw_linkcfg_map_t;
5661
5662 /**
5663 * @brief Mapping from the HW linkcfg enum to the CLP command value
5664 * string.
5665 */
5666 static ocs_hw_linkcfg_map_t linkcfg_map[] = {
5667 {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
5668 {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
5669 {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
5670 {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
5671 {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
5672 {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
5673 {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
5674
5675 /**
5676 * @brief HW link configuration enum to Skyhawk link config ID mapping.
5677 *
5678 * This structure provides a mapping from the ocs_hw_linkcfg_e
5679 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5680 * control) to the link config ID numbers used by Skyhawk
5681 */
5682 typedef struct ocs_hw_skyhawk_linkcfg_map_s {
5683 ocs_hw_linkcfg_e linkcfg;
5684 uint32_t config_id;
5685 } ocs_hw_skyhawk_linkcfg_map_t;
5686
5687 /**
5688 * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
5689 */
5690 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
5691 {OCS_HW_LINKCFG_4X10G, 0x0a},
5692 {OCS_HW_LINKCFG_1X40G, 0x09},
5693 };
5694
5695 /**
5696 * @brief Helper function for getting the HW linkcfg enum from the CLP
5697 * string value
5698 *
5699 * @param clp_str CLP string value from OEMELX_LinkConfig.
5700 *
5701 * @return Returns the HW linkcfg enum corresponding to clp_str.
5702 */
5703 static ocs_hw_linkcfg_e
5704 ocs_hw_linkcfg_from_clp(const char *clp_str)
5705 {
5706 uint32_t i;
5707 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5708 if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
5709 return linkcfg_map[i].linkcfg;
5710 }
5711 }
5712 return OCS_HW_LINKCFG_NA;
5713 }
5714
5715 /**
5716 * @brief Helper function for getting the CLP string value from the HW
5717 * linkcfg enum.
5718 *
5719 * @param linkcfg HW linkcfg enum.
5720 *
5721 * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
5722 * given linkcfg.
5723 */
5724 static const char *
5725 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5726 {
5727 uint32_t i;
5728 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5729 if (linkcfg_map[i].linkcfg == linkcfg) {
5730 return linkcfg_map[i].clp_str;
5731 }
5732 }
5733 return NULL;
5734 }
5735
5736 /**
5737 * @brief Helper function for getting a Skyhawk link config ID from the HW
5738 * linkcfg enum.
5739 *
5740 * @param linkcfg HW linkcfg enum.
5741 *
5742 * @return Returns the Skyhawk link config ID corresponding to
5743 * given linkcfg.
5744 */
5745 static uint32_t
5746 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5747 {
5748 uint32_t i;
5749 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5750 if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
5751 return skyhawk_linkcfg_map[i].config_id;
5752 }
5753 }
5754 return 0;
5755 }
5756
5757 /**
5758 * @brief Helper function for getting the HW linkcfg enum from a
5759 * Skyhawk config ID.
5760 *
5761 * @param config_id Skyhawk link config ID.
5762 *
5763 * @return Returns the HW linkcfg enum corresponding to config_id.
5764 */
5765 static ocs_hw_linkcfg_e
5766 ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
5767 {
5768 uint32_t i;
5769 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5770 if (skyhawk_linkcfg_map[i].config_id == config_id) {
5771 return skyhawk_linkcfg_map[i].linkcfg;
5772 }
5773 }
5774 return OCS_HW_LINKCFG_NA;
5775 }
5776
5777 /**
5778 * @brief Link configuration callback argument.
5779 */
5780 typedef struct ocs_hw_linkcfg_cb_arg_s {
5781 ocs_hw_port_control_cb_t cb;
5782 void *arg;
5783 uint32_t opts;
5784 int32_t status;
5785 ocs_dma_t dma_cmd;
5786 ocs_dma_t dma_resp;
5787 uint32_t result_len;
5788 } ocs_hw_linkcfg_cb_arg_t;
5789
5790 /**
5791 * @brief Set link configuration.
5792 *
5793 * @param hw Hardware context.
5794 * @param value Link configuration enum to which the link configuration is
5795 * set.
5796 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5797 * @param cb Callback function to invoke following mbx command.
5798 * @param arg Callback argument.
5799 *
5800 * @return Returns OCS_HW_RTN_SUCCESS on success.
5801 */
5802 static ocs_hw_rtn_e
5803 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5804 {
5805 if (!sli_link_is_configurable(&hw->sli)) {
5806 ocs_log_debug(hw->os, "Function not supported\n");
5807 return OCS_HW_RTN_ERROR;
5808 }
5809
5810 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
5811 return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
5812 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
5813 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
5814 return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
5815 } else {
5816 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
5817 return OCS_HW_RTN_ERROR;
5818 }
5819 }
5820
5821 /**
5822 * @brief Set link configuration for Lancer
5823 *
5824 * @param hw Hardware context.
5825 * @param value Link configuration enum to which the link configuration is
5826 * set.
5827 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5828 * @param cb Callback function to invoke following mbx command.
5829 * @param arg Callback argument.
5830 *
5831 * @return Returns OCS_HW_RTN_SUCCESS on success.
5832 */
5833 static ocs_hw_rtn_e
5834 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5835 {
5836 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
5837 ocs_hw_linkcfg_cb_arg_t *cb_arg;
5838 const char *value_str = NULL;
5839 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5840
5841 /* translate ocs_hw_linkcfg_e to CLP string */
5842 value_str = ocs_hw_clp_from_linkcfg(value);
5843
5844 /* allocate memory for callback argument */
5845 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
5846 if (cb_arg == NULL) {
5847 ocs_log_err(hw->os, "failed to malloc cb_arg");
5848 return OCS_HW_RTN_NO_MEMORY;
5849 }
5850
5851 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
5852 /* allocate DMA for command */
5853 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
5854 ocs_log_err(hw->os, "malloc failed\n");
5855 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5856 return OCS_HW_RTN_NO_MEMORY;
5857 }
5858 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
5859 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
5860
5861 /* allocate DMA for response */
5862 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
5863 ocs_log_err(hw->os, "malloc failed\n");
5864 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5865 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5866 return OCS_HW_RTN_NO_MEMORY;
5867 }
5868 cb_arg->cb = cb;
5869 cb_arg->arg = arg;
5870 cb_arg->opts = opts;
5871
5872 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
5873 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
5874
5875 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
5876 /* if failed, or polling, free memory here; if success and not
5877 * polling, will free in callback function
5878 */
5879 if (rc) {
5880 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
5881 (char *)cb_arg->dma_cmd.virt);
5882 }
5883 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5884 ocs_dma_free(hw->os, &cb_arg->dma_resp);
5885 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5886 }
5887 return rc;
5888 }
5889
5890 /**
5891 * @brief Callback for ocs_hw_set_linkcfg_skyhawk
5892 *
5893 * @param hw Hardware context.
5894 * @param status Status from the RECONFIG_GET_LINK_INFO command.
5895 * @param mqe Mailbox response structure.
5896 * @param arg Pointer to a callback argument.
5897 *
5898 * @return none
5899 */
5900 static void
5901 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5902 {
5903 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
5904
5905 if (status) {
5906 ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
5907 }
5908
5909 /* invoke callback */
5910 if (cb_arg->cb) {
5911 cb_arg->cb(status, 0, cb_arg->arg);
5912 }
5913
5914 /* if polling, will free memory in calling function */
5915 if (cb_arg->opts != OCS_CMD_POLL) {
5916 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5917 }
5918 }
5919
5920 /**
5921 * @brief Set link configuration for a Skyhawk
5922 *
5923 * @param hw Hardware context.
5924 * @param value Link configuration enum to which the link configuration is
|