FreeBSD/Linux Kernel Cross Reference
sys/dev/ocs_fc/sli4.c
1 /*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34 /**
35 * @defgroup sli SLI-4 Base APIs
36 */
37
38 /**
39 * @file
40 * All common (i.e. transport-independent) SLI-4 functions are implemented
41 * in this file.
42 */
43
44 #include "sli4.h"
45
46 #if defined(OCS_INCLUDE_DEBUG)
47 #include "ocs_utils.h"
48 #endif
49
50 #define SLI4_BMBX_DELAY_US 1000 /* 1 ms */
51 #define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */
52
53 static int32_t sli_fw_init(sli4_t *);
54 static int32_t sli_fw_term(sli4_t *);
55 static int32_t sli_sliport_control(sli4_t *sli4, uint32_t endian);
56 static int32_t sli_cmd_fw_deinitialize(sli4_t *, void *, size_t);
57 static int32_t sli_cmd_fw_initialize(sli4_t *, void *, size_t);
58 static int32_t sli_queue_doorbell(sli4_t *, sli4_queue_t *);
59 static uint8_t sli_queue_entry_is_valid(sli4_queue_t *, uint8_t *, uint8_t);
60
61 const uint8_t sli4_fw_initialize[] = {
62 0xff, 0x12, 0x34, 0xff,
63 0xff, 0x56, 0x78, 0xff,
64 };
65
66 const uint8_t sli4_fw_deinitialize[] = {
67 0xff, 0xaa, 0xbb, 0xff,
68 0xff, 0xcc, 0xdd, 0xff,
69 };
70
71 typedef struct {
72 uint32_t rev_id;
73 uint32_t family; /* generation */
74 sli4_asic_type_e type;
75 sli4_asic_rev_e rev;
76 } sli4_asic_entry_t;
77
78 sli4_asic_entry_t sli4_asic_table[] = {
79 { 0x00, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A0},
80 { 0x01, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A1},
81 { 0x02, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A2},
82 { 0x00, 4, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0},
83 { 0x00, 2, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0},
84 { 0x10, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_B0},
85 { 0x10, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B0},
86 { 0x11, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B1},
87 { 0x0, 0x0a, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0},
88 { 0x10, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_B0},
89 { 0x30, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_D0},
90 { 0x3, 0x0b, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3},
91 { 0x0, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A0},
92 { 0x1, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A1},
93 { 0x3, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3},
94 { 0x1, 0x0d, SLI4_ASIC_TYPE_LANCERG7,SLI4_ASIC_REV_A1},
95 { 0x10, 0x0d, SLI4_ASIC_TYPE_LANCERG7,SLI4_ASIC_REV_B0},
96 { 0x00, 0x05, SLI4_ASIC_TYPE_CORSAIR, SLI4_ASIC_REV_A0},
97 };
98
99 /*
100 * @brief Convert queue type enum (SLI_QTYPE_*) into a string.
101 */
102 const char *SLI_QNAME[] = {
103 "Event Queue",
104 "Completion Queue",
105 "Mailbox Queue",
106 "Work Queue",
107 "Receive Queue",
108 "Undefined"
109 };
110
111 /**
112 * @brief Define the mapping of registers to their BAR and offset.
113 *
114 * @par Description
115 * Although SLI-4 specification defines a common set of registers, their locations
116 * (both BAR and offset) depend on the interface type. This array maps a register
117 * enum to an array of BAR/offset pairs indexed by the interface type. For
118 * example, to access the bootstrap mailbox register on an interface type 0
119 * device, code can refer to the offset using regmap[SLI4_REG_BMBX][0].offset.
120 *
121 * @b Note: A value of UINT32_MAX for either the register set (rset) or offset (off)
122 * indicates an invalid mapping.
123 */
124 const sli4_reg_t regmap[SLI4_REG_MAX][SLI4_MAX_IF_TYPES] = {
125 /* SLI4_REG_BMBX */
126 {
127 { 2, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG },
128 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } , { 0, SLI4_BMBX_REG },
129 },
130 /* SLI4_REG_EQCQ_DOORBELL */
131 {
132 { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
133 { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
134 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
135 { 1, SLI4_IF6_EQ_DOORBELL_REG }
136 },
137 // SLI4_REG_CQ_DOORBELL
138 {
139 { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
140 { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
141 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
142 { 1, SLI4_IF6_CQ_DOORBELL_REG }
143 },
144 /* SLI4_REG_FCOE_RQ_DOORBELL */
145 {
146 { 2, SLI4_RQ_DOORBELL_REG }, { 0, SLI4_RQ_DOORBELL_REG },
147 { 0, SLI4_RQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX },
148 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
149 { 1, SLI4_IF6_RQ_DOORBELL_REG }
150 },
151 /* SLI4_REG_IO_WQ_DOORBELL */
152 {
153 { 2, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG },
154 { 0, SLI4_IO_WQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX },
155 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
156 { 1, SLI4_IF6_WQ_DOORBELL_REG }
157 },
158 /* SLI4_REG_MQ_DOORBELL */
159 {
160 { 2, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG },
161 { 0, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG },
162 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
163 { 1, SLI4_IF6_MQ_DOORBELL_REG }
164 },
165 /* SLI4_REG_PHYSDEV_CONTROL */
166 {
167 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
168 { 0, SLI4_PHSDEV_CONTROL_REG_236 }, { 0, SLI4_PHSDEV_CONTROL_REG_236 },
169 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
170 { 0, SLI4_PHSDEV_CONTROL_REG_236 }
171 },
172 /* SLI4_REG_SLIPORT_CONTROL */
173 {
174 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
175 { 0, SLI4_SLIPORT_CONTROL_REG }, { UINT32_MAX, UINT32_MAX },
176 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
177 { 0, SLI4_SLIPORT_CONTROL_REG },
178 },
179 /* SLI4_REG_SLIPORT_ERROR1 */
180 {
181 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
182 { 0, SLI4_SLIPORT_ERROR1 }, { UINT32_MAX, UINT32_MAX },
183 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
184 { 0, SLI4_SLIPORT_ERROR1 },
185 },
186 /* SLI4_REG_SLIPORT_ERROR2 */
187 {
188 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
189 { 0, SLI4_SLIPORT_ERROR2 }, { UINT32_MAX, UINT32_MAX },
190 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
191 { 0, SLI4_SLIPORT_ERROR2 },
192 },
193 /* SLI4_REG_SLIPORT_SEMAPHORE */
194 {
195 { 1, SLI4_PORT_SEMAPHORE_REG_0 }, { 0, SLI4_PORT_SEMAPHORE_REG_1 },
196 { 0, SLI4_PORT_SEMAPHORE_REG_236 }, { 0, SLI4_PORT_SEMAPHORE_REG_236 },
197 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
198 { 0, SLI4_PORT_SEMAPHORE_REG_236 },
199 },
200 /* SLI4_REG_SLIPORT_STATUS */
201 {
202 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
203 { 0, SLI4_PORT_STATUS_REG_236 }, { 0, SLI4_PORT_STATUS_REG_236 },
204 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
205 { 0, SLI4_PORT_STATUS_REG_236 },
206 },
207 /* SLI4_REG_UERR_MASK_HI */
208 {
209 { 0, SLI4_UERR_MASK_HIGH_REG }, { UINT32_MAX, UINT32_MAX },
210 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
211 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
212 { UINT32_MAX, UINT32_MAX }
213 },
214 /* SLI4_REG_UERR_MASK_LO */
215 {
216 { 0, SLI4_UERR_MASK_LOW_REG }, { UINT32_MAX, UINT32_MAX },
217 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
218 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
219 { UINT32_MAX, UINT32_MAX }
220 },
221 /* SLI4_REG_UERR_STATUS_HI */
222 {
223 { 0, SLI4_UERR_STATUS_HIGH_REG }, { UINT32_MAX, UINT32_MAX },
224 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
225 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
226 { UINT32_MAX, UINT32_MAX }
227 },
228 /* SLI4_REG_UERR_STATUS_LO */
229 {
230 { 0, SLI4_UERR_STATUS_LOW_REG }, { UINT32_MAX, UINT32_MAX },
231 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
232 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
233 { UINT32_MAX, UINT32_MAX }
234 },
235 /* SLI4_REG_SW_UE_CSR1 */
236 {
237 { 1, SLI4_SW_UE_CSR1}, { UINT32_MAX, UINT32_MAX },
238 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
239 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
240 { UINT32_MAX, UINT32_MAX }
241 },
242 /* SLI4_REG_SW_UE_CSR2 */
243 {
244 { 1, SLI4_SW_UE_CSR2}, { UINT32_MAX, UINT32_MAX },
245 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
246 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
247 { UINT32_MAX, UINT32_MAX }
248 },
249 };
250
251 /**
252 * @brief Read the given SLI register.
253 *
254 * @param sli Pointer to the SLI context.
255 * @param reg Register name enum.
256 *
257 * @return Returns the register value.
258 */
259 uint32_t
260 sli_reg_read(sli4_t *sli, sli4_regname_e reg)
261 {
262 const sli4_reg_t *r = &(regmap[reg][sli->if_type]);
263
264 if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) {
265 ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type);
266 return UINT32_MAX;
267 }
268
269 return ocs_reg_read32(sli->os, r->rset, r->off);
270 }
271
272 /**
273 * @brief Write the value to the given SLI register.
274 *
275 * @param sli Pointer to the SLI context.
276 * @param reg Register name enum.
277 * @param val Value to write.
278 *
279 * @return None.
280 */
281 void
282 sli_reg_write(sli4_t *sli, sli4_regname_e reg, uint32_t val)
283 {
284 const sli4_reg_t *r = &(regmap[reg][sli->if_type]);
285
286 if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) {
287 ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type);
288 return;
289 }
290
291 ocs_reg_write32(sli->os, r->rset, r->off, val);
292 }
293
294 /**
295 * @brief Check if the SLI_INTF register is valid.
296 *
297 * @param val 32-bit SLI_INTF register value.
298 *
299 * @return Returns 0 on success, or a non-zero value on failure.
300 */
301 static uint8_t
302 sli_intf_valid_check(uint32_t val)
303 {
304 return ((val >> SLI4_INTF_VALID_SHIFT) & SLI4_INTF_VALID_MASK) != SLI4_INTF_VALID;
305 }
306
307 /**
308 * @brief Retrieve the SLI revision level.
309 *
310 * @param val 32-bit SLI_INTF register value.
311 *
312 * @return Returns the SLI revision level.
313 */
314 static uint8_t
315 sli_intf_sli_revision(uint32_t val)
316 {
317 return ((val >> SLI4_INTF_SLI_REVISION_SHIFT) & SLI4_INTF_SLI_REVISION_MASK);
318 }
319
320 static uint8_t
321 sli_intf_sli_family(uint32_t val)
322 {
323 return ((val >> SLI4_INTF_SLI_FAMILY_SHIFT) & SLI4_INTF_SLI_FAMILY_MASK);
324 }
325
326 /**
327 * @brief Retrieve the SLI interface type.
328 *
329 * @param val 32-bit SLI_INTF register value.
330 *
331 * @return Returns the SLI interface type.
332 */
333 static uint8_t
334 sli_intf_if_type(uint32_t val)
335 {
336 return ((val >> SLI4_INTF_IF_TYPE_SHIFT) & SLI4_INTF_IF_TYPE_MASK);
337 }
338
339 /**
340 * @brief Retrieve PCI revision ID.
341 *
342 * @param val 32-bit PCI CLASS_REVISION register value.
343 *
344 * @return Returns the PCI revision ID.
345 */
346 static uint8_t
347 sli_pci_rev_id(uint32_t val)
348 {
349 return ((val >> SLI4_PCI_REV_ID_SHIFT) & SLI4_PCI_REV_ID_MASK);
350 }
351
352 /**
353 * @brief retrieve SLI ASIC generation
354 *
355 * @param val 32-bit SLI_ASIC_ID register value
356 *
357 * @return SLI ASIC generation
358 */
359 static uint8_t
360 sli_asic_gen(uint32_t val)
361 {
362 return ((val >> SLI4_ASIC_GEN_SHIFT) & SLI4_ASIC_GEN_MASK);
363 }
364
365 /**
366 * @brief Wait for the bootstrap mailbox to report "ready".
367 *
368 * @param sli4 SLI context pointer.
369 * @param msec Number of milliseconds to wait.
370 *
371 * @return Returns 0 if BMBX is ready, or non-zero otherwise (i.e. time out occurred).
372 */
373 static int32_t
374 sli_bmbx_wait(sli4_t *sli4, uint32_t msec)
375 {
376 uint32_t val = 0;
377
378 do {
379 ocs_udelay(SLI4_BMBX_DELAY_US);
380 val = sli_reg_read(sli4, SLI4_REG_BMBX);
381 msec--;
382 } while(msec && !(val & SLI4_BMBX_RDY));
383
384 return(!(val & SLI4_BMBX_RDY));
385 }
386
387 /**
388 * @brief Write bootstrap mailbox.
389 *
390 * @param sli4 SLI context pointer.
391 *
392 * @return Returns 0 if command succeeded, or non-zero otherwise.
393 */
394 static int32_t
395 sli_bmbx_write(sli4_t *sli4)
396 {
397 uint32_t val = 0;
398
399 /* write buffer location to bootstrap mailbox register */
400 ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_PREWRITE);
401 val = SLI4_BMBX_WRITE_HI(sli4->bmbx.phys);
402 sli_reg_write(sli4, SLI4_REG_BMBX, val);
403
404 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
405 ocs_log_crit(sli4->os, "BMBX WRITE_HI failed\n");
406 return -1;
407 }
408 val = SLI4_BMBX_WRITE_LO(sli4->bmbx.phys);
409 sli_reg_write(sli4, SLI4_REG_BMBX, val);
410
411 /* wait for SLI Port to set ready bit */
412 return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC/*XXX*/);
413 }
414
415 #if defined(OCS_INCLUDE_DEBUG)
416 /**
417 * @ingroup sli
418 * @brief Dump BMBX mailbox command.
419 *
420 * @par Description
421 * Convenience function for dumping BMBX mailbox commands. Takes
422 * into account which mailbox command is given since SLI_CONFIG
423 * commands are special.
424 *
425 * @b Note: This function takes advantage of
426 * the one-command-at-a-time nature of the BMBX to be able to
427 * display non-embedded SLI_CONFIG commands. This will not work
428 * for mailbox commands on the MQ. Luckily, all current non-emb
429 * mailbox commands go through the BMBX.
430 *
431 * @param sli4 SLI context pointer.
432 * @param mbx Pointer to mailbox command to dump.
433 * @param prefix Prefix for dump label.
434 *
435 * @return None.
436 */
437 static void
438 sli_dump_bmbx_command(sli4_t *sli4, void *mbx, const char *prefix)
439 {
440 uint32_t size = 0;
441 char label[64];
442 uint32_t i;
443 /* Mailbox diagnostic logging */
444 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mbx;
445
446 if (!ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)) {
447 return;
448 }
449
450 if (hdr->command == SLI4_MBOX_COMMAND_SLI_CONFIG) {
451 sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)hdr;
452 sli4_req_hdr_t *sli_config_hdr;
453 if (sli_config->emb) {
454 ocs_snprintf(label, sizeof(label), "%s (emb)", prefix);
455
456 /* if embedded, dump entire command */
457 sli_config_hdr = (sli4_req_hdr_t *)sli_config->payload.embed;
458 size = sizeof(*sli_config) - sizeof(sli_config->payload) +
459 sli_config_hdr->request_length + (4*sizeof(uint32_t));
460 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
461 (uint8_t *)sli4->bmbx.virt, size);
462 } else {
463 sli4_sli_config_pmd_t *pmd;
464 ocs_snprintf(label, sizeof(label), "%s (non-emb hdr)", prefix);
465
466 /* if non-embedded, break up into two parts: SLI_CONFIG hdr
467 and the payload(s) */
468 size = sizeof(*sli_config) - sizeof(sli_config->payload) + (12 * sli_config->pmd_count);
469 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
470 (uint8_t *)sli4->bmbx.virt, size);
471
472 /* as sanity check, make sure first PMD matches what was saved */
473 pmd = &sli_config->payload.mem;
474 if ((pmd->address_high == ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys)) &&
475 (pmd->address_low == ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys))) {
476 for (i = 0; i < sli_config->pmd_count; i++, pmd++) {
477 sli_config_hdr = sli4->bmbx_non_emb_pmd->virt;
478 ocs_snprintf(label, sizeof(label), "%s (non-emb pay[%d])",
479 prefix, i);
480 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
481 (uint8_t *)sli4->bmbx_non_emb_pmd->virt,
482 sli_config_hdr->request_length + (4*sizeof(uint32_t)));
483 }
484 } else {
485 ocs_log_debug(sli4->os, "pmd addr does not match pmd:%x %x (%x %x)\n",
486 pmd->address_high, pmd->address_low,
487 ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys),
488 ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys));
489 }
490 }
491 } else {
492 /* not an SLI_CONFIG command, just display first 64 bytes, like we do
493 for MQEs */
494 size = 64;
495 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, prefix,
496 (uint8_t *)mbx, size);
497 }
498 }
499 #endif
500
501 /**
502 * @ingroup sli
503 * @brief Submit a command to the bootstrap mailbox and check the status.
504 *
505 * @param sli4 SLI context pointer.
506 *
507 * @return Returns 0 on success, or a non-zero value on failure.
508 */
509 int32_t
510 sli_bmbx_command(sli4_t *sli4)
511 {
512 void *cqe = (uint8_t *)sli4->bmbx.virt + SLI4_BMBX_SIZE;
513
514 #if defined(OCS_INCLUDE_DEBUG)
515 sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmd");
516 #endif
517
518 if (sli_fw_error_status(sli4) > 0) {
519 ocs_log_crit(sli4->os, "Chip is in an error state - Mailbox "
520 "command rejected status=%#x error1=%#x error2=%#x\n",
521 sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS),
522 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1),
523 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2));
524 return -1;
525 }
526
527 if (sli_bmbx_write(sli4)) {
528 ocs_log_crit(sli4->os, "bootstrap mailbox write fail phys=%p reg=%#x\n",
529 (void*)sli4->bmbx.phys,
530 sli_reg_read(sli4, SLI4_REG_BMBX));
531 return -1;
532 }
533
534 /* check completion queue entry status */
535 ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_POSTREAD);
536 if (((sli4_mcqe_t *)cqe)->val) {
537 #if defined(OCS_INCLUDE_DEBUG)
538 sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmpl");
539 ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "bmbx cqe", cqe, sizeof(sli4_mcqe_t));
540 #endif
541 return sli_cqe_mq(cqe);
542 } else {
543 ocs_log_err(sli4->os, "invalid or wrong type\n");
544 return -1;
545 }
546 }
547
548 /****************************************************************************
549 * Messages
550 */
551
552 /**
553 * @ingroup sli
554 * @brief Write a CONFIG_LINK command to the provided buffer.
555 *
556 * @param sli4 SLI context pointer.
557 * @param buf Virtual pointer to the destination buffer.
558 * @param size Buffer size, in bytes.
559 *
560 * @return Returns the number of bytes written.
561 */
562 int32_t
563 sli_cmd_config_link(sli4_t *sli4, void *buf, size_t size)
564 {
565 sli4_cmd_config_link_t *config_link = buf;
566
567 ocs_memset(buf, 0, size);
568
569 config_link->hdr.command = SLI4_MBOX_COMMAND_CONFIG_LINK;
570
571 /* Port interprets zero in a field as "use default value" */
572
573 return sizeof(sli4_cmd_config_link_t);
574 }
575
576 /**
577 * @ingroup sli
578 * @brief Write a DOWN_LINK command to the provided buffer.
579 *
580 * @param sli4 SLI context pointer.
581 * @param buf Virtual pointer to the destination buffer.
582 * @param size Buffer size, in bytes.
583 *
584 * @return Returns the number of bytes written.
585 */
586 int32_t
587 sli_cmd_down_link(sli4_t *sli4, void *buf, size_t size)
588 {
589 sli4_mbox_command_header_t *hdr = buf;
590
591 ocs_memset(buf, 0, size);
592
593 hdr->command = SLI4_MBOX_COMMAND_DOWN_LINK;
594
595 /* Port interprets zero in a field as "use default value" */
596
597 return sizeof(sli4_mbox_command_header_t);
598 }
599
600 /**
601 * @ingroup sli
602 * @brief Write a DUMP Type 4 command to the provided buffer.
603 *
604 * @param sli4 SLI context pointer.
605 * @param buf Virtual pointer to the destination buffer.
606 * @param size Buffer size, in bytes.
607 * @param wki The well known item ID.
608 *
609 * @return Returns the number of bytes written.
610 */
611 int32_t
612 sli_cmd_dump_type4(sli4_t *sli4, void *buf, size_t size, uint16_t wki)
613 {
614 sli4_cmd_dump4_t *cmd = buf;
615
616 ocs_memset(buf, 0, size);
617
618 cmd->hdr.command = SLI4_MBOX_COMMAND_DUMP;
619 cmd->type = 4;
620 cmd->wki_selection = wki;
621 return sizeof(sli4_cmd_dump4_t);
622 }
623
624 /**
625 * @ingroup sli
626 * @brief Write a COMMON_READ_TRANSCEIVER_DATA command.
627 *
628 * @param sli4 SLI context.
629 * @param buf Destination buffer for the command.
630 * @param size Buffer size, in bytes.
631 * @param page_num The page of SFP data to retrieve (0xa0 or 0xa2).
632 * @param dma DMA structure from which the data will be copied.
633 *
634 * @note This creates a Version 0 message.
635 *
636 * @return Returns the number of bytes written.
637 */
638 int32_t
639 sli_cmd_common_read_transceiver_data(sli4_t *sli4, void *buf, size_t size, uint32_t page_num,
640 ocs_dma_t *dma)
641 {
642 sli4_req_common_read_transceiver_data_t *req = NULL;
643 uint32_t sli_config_off = 0;
644 uint32_t payload_size;
645
646 if (dma == NULL) {
647 /* Payload length must accommodate both request and response */
648 payload_size = max(sizeof(sli4_req_common_read_transceiver_data_t),
649 sizeof(sli4_res_common_read_transceiver_data_t));
650 } else {
651 payload_size = dma->size;
652 }
653
654 if (sli4->port_type == SLI4_PORT_TYPE_FC) {
655 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma);
656 }
657
658 if (dma == NULL) {
659 req = (sli4_req_common_read_transceiver_data_t *)((uint8_t *)buf + sli_config_off);
660 } else {
661 req = (sli4_req_common_read_transceiver_data_t *)dma->virt;
662 ocs_memset(req, 0, dma->size);
663 }
664
665 req->hdr.opcode = SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA;
666 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
667 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
668
669 req->page_number = page_num;
670 req->port = sli4->physical_port;
671
672 return(sli_config_off + sizeof(sli4_req_common_read_transceiver_data_t));
673 }
674
675 /**
676 * @ingroup sli
677 * @brief Write a READ_LINK_STAT command to the provided buffer.
678 *
679 * @param sli4 SLI context pointer.
680 * @param buf Virtual pointer to the destination buffer.
681 * @param size Buffer size, in bytes.
682 * @param req_ext_counters If TRUE, then the extended counters will be requested.
683 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
684 * @param clear_all_counters If TRUE, the counters will be cleared.
685 *
686 * @return Returns the number of bytes written.
687 */
688 int32_t
689 sli_cmd_read_link_stats(sli4_t *sli4, void *buf, size_t size,
690 uint8_t req_ext_counters,
691 uint8_t clear_overflow_flags,
692 uint8_t clear_all_counters)
693 {
694 sli4_cmd_read_link_stats_t *cmd = buf;
695
696 ocs_memset(buf, 0, size);
697
698 cmd->hdr.command = SLI4_MBOX_COMMAND_READ_LNK_STAT;
699 cmd->rec = req_ext_counters;
700 cmd->clrc = clear_all_counters;
701 cmd->clof = clear_overflow_flags;
702 return sizeof(sli4_cmd_read_link_stats_t);
703 }
704
705 /**
706 * @ingroup sli
707 * @brief Write a READ_STATUS command to the provided buffer.
708 *
709 * @param sli4 SLI context pointer.
710 * @param buf Virtual pointer to the destination buffer.
711 * @param size Buffer size, in bytes.
712 * @param clear_counters If TRUE, the counters will be cleared.
713 *
714 * @return Returns the number of bytes written.
715 */
716 int32_t
717 sli_cmd_read_status(sli4_t *sli4, void *buf, size_t size,
718 uint8_t clear_counters)
719 {
720 sli4_cmd_read_status_t *cmd = buf;
721
722 ocs_memset(buf, 0, size);
723
724 cmd->hdr.command = SLI4_MBOX_COMMAND_READ_STATUS;
725 cmd->cc = clear_counters;
726 return sizeof(sli4_cmd_read_status_t);
727 }
728
729 /**
730 * @brief Write a FW_DEINITIALIZE command to the provided buffer.
731 *
732 * @param sli4 SLI context pointer.
733 * @param buf Virtual pointer to the destination buffer.
734 * @param size Buffer size, in bytes.
735 *
736 * @return Returns the number of bytes written.
737 */
738 static int32_t
739 sli_cmd_fw_deinitialize(sli4_t *sli4, void *buf, size_t size)
740 {
741
742 ocs_memset(buf, 0, size);
743 ocs_memcpy(buf, sli4_fw_deinitialize, sizeof(sli4_fw_deinitialize));
744
745 return sizeof(sli4_fw_deinitialize);
746 }
747
748 /**
749 * @brief Write a FW_INITIALIZE command to the provided buffer.
750 *
751 * @param sli4 SLI context pointer.
752 * @param buf Virtual pointer to the destination buffer.
753 * @param size Buffer size, in bytes.
754 *
755 * @return Returns the number of bytes written.
756 */
757 static int32_t
758 sli_cmd_fw_initialize(sli4_t *sli4, void *buf, size_t size)
759 {
760
761 ocs_memset(buf, 0, size);
762 ocs_memcpy(buf, sli4_fw_initialize, sizeof(sli4_fw_initialize));
763
764 return sizeof(sli4_fw_initialize);
765 }
766
767 /**
768 * @ingroup sli
769 * @brief update INIT_LINK flags with the sli config topology.
770 *
771 * @param sli4 SLI context pointer.
772 * @param init_link Pointer to the init link command
773 *
774 * @return Returns 0 on success, -1 on failure
775 */
776 static int32_t
777 sli4_set_link_flags_config_topo(sli4_t *sli4, sli4_cmd_init_link_t *init_link)
778 {
779
780 switch (sli4->config.topology) {
781 case SLI4_READ_CFG_TOPO_FC:
782 // Attempt P2P but failover to FC-AL
783 init_link->link_flags.enable_topology_failover = TRUE;
784 init_link->link_flags.topology = SLI4_INIT_LINK_F_P2P_FAIL_OVER;
785 break;
786 case SLI4_READ_CFG_TOPO_FC_AL:
787 init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_ONLY;
788 return (!sli_fcal_is_speed_supported(init_link->link_speed_selection_code));
789
790 case SLI4_READ_CFG_TOPO_FC_DA:
791 init_link->link_flags.topology = FC_TOPOLOGY_P2P;
792 break;
793 default:
794 ocs_log_err(sli4->os, "unsupported topology %#x\n", sli4->config.topology);
795 return -1;
796 }
797
798 return 0;
799 }
800
801 /**
802 * @ingroup sli
803 * @brief update INIT_LINK flags with the persistent topology.
804 * PT stores value in compatible form, directly assign to link_flags
805 *
806 * @param sli4 SLI context pointer.
807 * @param init_link Pointer to the init link command
808 *
809 * @return Returns 0 on success, -1 on failure
810 */
811 static int32_t
812 sli4_set_link_flags_persistent_topo(sli4_t *sli4, sli4_cmd_init_link_t *init_link)
813 {
814 if ((sli4->config.pt == SLI4_INIT_LINK_F_FCAL_ONLY) &&
815 (!sli_fcal_is_speed_supported(init_link->link_speed_selection_code)))
816 return -1;
817
818 init_link->link_flags.enable_topology_failover = sli4->config.tf;
819 init_link->link_flags.topology = sli4->config.pt;
820
821 return 0;
822 }
823
824 /**
825 * @ingroup sli
826 * @brief Write an INIT_LINK command to the provided buffer.
827 *
828 * @param sli4 SLI context pointer.
829 * @param buf Virtual pointer to the destination buffer.
830 * @param size Buffer size, in bytes.
831 * @param speed Link speed.
832 * @param reset_alpa For native FC, this is the selective reset AL_PA
833 *
834 * @return Returns the number of bytes written.
835 */
836 int32_t
837 sli_cmd_init_link(sli4_t *sli4, void *buf, size_t size, uint32_t speed, uint8_t reset_alpa)
838 {
839 sli4_cmd_init_link_t *init_link = buf;
840 int32_t rc = 0;
841
842 ocs_memset(buf, 0, size);
843
844 init_link->hdr.command = SLI4_MBOX_COMMAND_INIT_LINK;
845
846 /* Most fields only have meaning for FC links */
847 if (sli4->config.topology != SLI4_READ_CFG_TOPO_FCOE) {
848 init_link->selective_reset_al_pa = reset_alpa;
849 init_link->link_flags.loopback = FALSE;
850
851 init_link->link_speed_selection_code = speed;
852 switch (speed) {
853 case FC_LINK_SPEED_1G:
854 case FC_LINK_SPEED_2G:
855 case FC_LINK_SPEED_4G:
856 case FC_LINK_SPEED_8G:
857 case FC_LINK_SPEED_16G:
858 case FC_LINK_SPEED_32G:
859 init_link->link_flags.fixed_speed = TRUE;
860 break;
861 case FC_LINK_SPEED_10G:
862 ocs_log_test(sli4->os, "unsupported FC speed %d\n", speed);
863 return 0;
864 }
865
866 init_link->link_flags.unfair = FALSE;
867 init_link->link_flags.skip_lirp_lilp = FALSE;
868 init_link->link_flags.gen_loop_validity_check = FALSE;
869 init_link->link_flags.skip_lisa = FALSE;
870 init_link->link_flags.select_hightest_al_pa = FALSE;
871
872 //update topology in the link flags for link bring up
873 ocs_log_info(sli4->os, "bring up link with topology: %d, PTV: %d, TF: %d, PT: %d \n",
874 sli4->config.topology, sli4->config.ptv, sli4->config.tf, sli4->config.pt);
875 if (sli4->config.ptv)
876 rc = sli4_set_link_flags_persistent_topo(sli4, init_link);
877 else
878 rc = sli4_set_link_flags_config_topo(sli4, init_link);
879
880 }
881
882 return rc ? 0 : sizeof(sli4_cmd_init_link_t);
883 }
884
885 /**
886 * @ingroup sli
887 * @brief Write an INIT_VFI command to the provided buffer.
888 *
889 * @param sli4 SLI context pointer.
890 * @param buf Virtual pointer to the destination buffer.
891 * @param size Buffer size, in bytes.
892 * @param vfi VFI
893 * @param fcfi FCFI
894 * @param vpi VPI (Set to -1 if unused.)
895 *
896 * @return Returns the number of bytes written.
897 */
898 int32_t
899 sli_cmd_init_vfi(sli4_t *sli4, void *buf, size_t size, uint16_t vfi,
900 uint16_t fcfi, uint16_t vpi)
901 {
902 sli4_cmd_init_vfi_t *init_vfi = buf;
903
904 ocs_memset(buf, 0, size);
905
906 init_vfi->hdr.command = SLI4_MBOX_COMMAND_INIT_VFI;
907
908 init_vfi->vfi = vfi;
909 init_vfi->fcfi = fcfi;
910
911 /*
912 * If the VPI is valid, initialize it at the same time as
913 * the VFI
914 */
915 if (0xffff != vpi) {
916 init_vfi->vp = TRUE;
917 init_vfi->vpi = vpi;
918 }
919
920 return sizeof(sli4_cmd_init_vfi_t);
921 }
922
923 /**
924 * @ingroup sli
925 * @brief Write an INIT_VPI command to the provided buffer.
926 *
927 * @param sli4 SLI context pointer.
928 * @param buf Virtual pointer to the destination buffer.
929 * @param size Buffer size, in bytes.
930 * @param vpi VPI allocated.
931 * @param vfi VFI associated with this VPI.
932 *
933 * @return Returns the number of bytes written.
934 */
935 int32_t
936 sli_cmd_init_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t vpi, uint16_t vfi)
937 {
938 sli4_cmd_init_vpi_t *init_vpi = buf;
939
940 ocs_memset(buf, 0, size);
941
942 init_vpi->hdr.command = SLI4_MBOX_COMMAND_INIT_VPI;
943 init_vpi->vpi = vpi;
944 init_vpi->vfi = vfi;
945
946 return sizeof(sli4_cmd_init_vpi_t);
947 }
948
949 /**
950 * @ingroup sli
951 * @brief Write a POST_XRI command to the provided buffer.
952 *
953 * @param sli4 SLI context pointer.
954 * @param buf Virtual pointer to the destination buffer.
955 * @param size Buffer size, in bytes.
956 * @param xri_base Starting XRI value for range of XRI given to SLI Port.
957 * @param xri_count Number of XRIs provided to the SLI Port.
958 *
959 * @return Returns the number of bytes written.
960 */
961 int32_t
962 sli_cmd_post_xri(sli4_t *sli4, void *buf, size_t size, uint16_t xri_base, uint16_t xri_count)
963 {
964 sli4_cmd_post_xri_t *post_xri = buf;
965
966 ocs_memset(buf, 0, size);
967
968 post_xri->hdr.command = SLI4_MBOX_COMMAND_POST_XRI;
969 post_xri->xri_base = xri_base;
970 post_xri->xri_count = xri_count;
971
972 if (sli4->config.auto_xfer_rdy == 0) {
973 post_xri->enx = TRUE;
974 post_xri->val = TRUE;
975 }
976
977 return sizeof(sli4_cmd_post_xri_t);
978 }
979
980 /**
981 * @ingroup sli
982 * @brief Write a RELEASE_XRI command to the provided buffer.
983 *
984 * @param sli4 SLI context pointer.
985 * @param buf Virtual pointer to the destination buffer.
986 * @param size Buffer size, in bytes.
987 * @param num_xri The number of XRIs to be released.
988 *
989 * @return Returns the number of bytes written.
990 */
991 int32_t
992 sli_cmd_release_xri(sli4_t *sli4, void *buf, size_t size, uint8_t num_xri)
993 {
994 sli4_cmd_release_xri_t *release_xri = buf;
995
996 ocs_memset(buf, 0, size);
997
998 release_xri->hdr.command = SLI4_MBOX_COMMAND_RELEASE_XRI;
999 release_xri->xri_count = num_xri;
1000
1001 return sizeof(sli4_cmd_release_xri_t);
1002 }
1003
1004 /**
1005 * @brief Write a READ_CONFIG command to the provided buffer.
1006 *
1007 * @param sli4 SLI context pointer.
1008 * @param buf Virtual pointer to the destination buffer.
1009 * @param size Buffer size, in bytes
1010 *
1011 * @return Returns the number of bytes written.
1012 */
1013 static int32_t
1014 sli_cmd_read_config(sli4_t *sli4, void *buf, size_t size)
1015 {
1016 sli4_cmd_read_config_t *read_config = buf;
1017
1018 ocs_memset(buf, 0, size);
1019
1020 read_config->hdr.command = SLI4_MBOX_COMMAND_READ_CONFIG;
1021
1022 return sizeof(sli4_cmd_read_config_t);
1023 }
1024
1025 /**
1026 * @brief Write a READ_NVPARMS command to the provided buffer.
1027 *
1028 * @param sli4 SLI context pointer.
1029 * @param buf Virtual pointer to the destination buffer.
1030 * @param size Buffer size, in bytes.
1031 *
1032 * @return Returns the number of bytes written.
1033 */
1034 int32_t
1035 sli_cmd_read_nvparms(sli4_t *sli4, void *buf, size_t size)
1036 {
1037 sli4_cmd_read_nvparms_t *read_nvparms = buf;
1038
1039 ocs_memset(buf, 0, size);
1040
1041 read_nvparms->hdr.command = SLI4_MBOX_COMMAND_READ_NVPARMS;
1042
1043 return sizeof(sli4_cmd_read_nvparms_t);
1044 }
1045
1046 /**
1047 * @brief Write a WRITE_NVPARMS command to the provided buffer.
1048 *
1049 * @param sli4 SLI context pointer.
1050 * @param buf Virtual pointer to the destination buffer.
1051 * @param size Buffer size, in bytes.
1052 * @param wwpn WWPN to write - pointer to array of 8 uint8_t.
1053 * @param wwnn WWNN to write - pointer to array of 8 uint8_t.
1054 * @param hard_alpa Hard ALPA to write.
1055 * @param preferred_d_id Preferred D_ID to write.
1056 *
1057 * @return Returns the number of bytes written.
1058 */
1059 int32_t
1060 sli_cmd_write_nvparms(sli4_t *sli4, void *buf, size_t size, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa,
1061 uint32_t preferred_d_id)
1062 {
1063 sli4_cmd_write_nvparms_t *write_nvparms = buf;
1064
1065 ocs_memset(buf, 0, size);
1066
1067 write_nvparms->hdr.command = SLI4_MBOX_COMMAND_WRITE_NVPARMS;
1068 ocs_memcpy(write_nvparms->wwpn, wwpn, 8);
1069 ocs_memcpy(write_nvparms->wwnn, wwnn, 8);
1070 write_nvparms->hard_alpa = hard_alpa;
1071 write_nvparms->preferred_d_id = preferred_d_id;
1072
1073 return sizeof(sli4_cmd_write_nvparms_t);
1074 }
1075
1076 /**
1077 * @brief Write a READ_REV command to the provided buffer.
1078 *
1079 * @param sli4 SLI context pointer.
1080 * @param buf Virtual pointer to the destination buffer.
1081 * @param size Buffer size, in bytes.
1082 * @param vpd Pointer to the buffer.
1083 *
1084 * @return Returns the number of bytes written.
1085 */
1086 static int32_t
1087 sli_cmd_read_rev(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *vpd)
1088 {
1089 sli4_cmd_read_rev_t *read_rev = buf;
1090
1091 ocs_memset(buf, 0, size);
1092
1093 read_rev->hdr.command = SLI4_MBOX_COMMAND_READ_REV;
1094
1095 if (vpd && vpd->size) {
1096 read_rev->vpd = TRUE;
1097
1098 read_rev->available_length = vpd->size;
1099
1100 read_rev->physical_address_low = ocs_addr32_lo(vpd->phys);
1101 read_rev->physical_address_high = ocs_addr32_hi(vpd->phys);
1102 }
1103
1104 return sizeof(sli4_cmd_read_rev_t);
1105 }
1106
1107 /**
1108 * @ingroup sli
1109 * @brief Write a READ_SPARM64 command to the provided buffer.
1110 *
1111 * @param sli4 SLI context pointer.
1112 * @param buf Virtual pointer to the destination buffer.
1113 * @param size Buffer size, in bytes.
1114 * @param dma DMA buffer for the service parameters.
1115 * @param vpi VPI used to determine the WWN.
1116 *
1117 * @return Returns the number of bytes written.
1118 */
1119 int32_t
1120 sli_cmd_read_sparm64(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
1121 uint16_t vpi)
1122 {
1123 sli4_cmd_read_sparm64_t *read_sparm64 = buf;
1124
1125 ocs_memset(buf, 0, size);
1126
1127 if (SLI4_READ_SPARM64_VPI_SPECIAL == vpi) {
1128 ocs_log_test(sli4->os, "special VPI not supported!!!\n");
1129 return -1;
1130 }
1131
1132 if (!dma || !dma->phys) {
1133 ocs_log_test(sli4->os, "bad DMA buffer\n");
1134 return -1;
1135 }
1136
1137 read_sparm64->hdr.command = SLI4_MBOX_COMMAND_READ_SPARM64;
1138
1139 read_sparm64->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64;
1140 read_sparm64->bde_64.buffer_length = dma->size;
1141 read_sparm64->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
1142 read_sparm64->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
1143
1144 read_sparm64->vpi = vpi;
1145
1146 return sizeof(sli4_cmd_read_sparm64_t);
1147 }
1148
1149 /**
1150 * @ingroup sli
1151 * @brief Write a READ_TOPOLOGY command to the provided buffer.
1152 *
1153 * @param sli4 SLI context pointer.
1154 * @param buf Virtual pointer to the destination buffer.
1155 * @param size Buffer size, in bytes.
1156 * @param dma DMA buffer for loop map (optional).
1157 *
1158 * @return Returns the number of bytes written.
1159 */
1160 int32_t
1161 sli_cmd_read_topology(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
1162 {
1163 sli4_cmd_read_topology_t *read_topo = buf;
1164
1165 ocs_memset(buf, 0, size);
1166
1167 read_topo->hdr.command = SLI4_MBOX_COMMAND_READ_TOPOLOGY;
1168
1169 if (dma && dma->size) {
1170 if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
1171 ocs_log_test(sli4->os, "loop map buffer too small %jd\n",
1172 dma->size);
1173 return 0;
1174 }
1175
1176 ocs_memset(dma->virt, 0, dma->size);
1177
1178 read_topo->bde_loop_map.bde_type = SLI4_BDE_TYPE_BDE_64;
1179 read_topo->bde_loop_map.buffer_length = dma->size;
1180 read_topo->bde_loop_map.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
1181 read_topo->bde_loop_map.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
1182 }
1183
1184 return sizeof(sli4_cmd_read_topology_t);
1185 }
1186
1187 /**
1188 * @ingroup sli
1189 * @brief Write a REG_FCFI command to the provided buffer.
1190 *
1191 * @param sli4 SLI context pointer.
1192 * @param buf Virtual pointer to the destination buffer.
1193 * @param size Buffer size, in bytes.
1194 * @param index FCF index returned by READ_FCF_TABLE.
1195 * @param rq_cfg RQ_ID/R_CTL/TYPE routing information
1196 * @param vlan_id VLAN ID tag.
1197 *
1198 * @return Returns the number of bytes written.
1199 */
1200 int32_t
1201 sli_cmd_reg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t index, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG], uint16_t vlan_id)
1202 {
1203 sli4_cmd_reg_fcfi_t *reg_fcfi = buf;
1204 uint32_t i;
1205
1206 ocs_memset(buf, 0, size);
1207
1208 reg_fcfi->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI;
1209
1210 reg_fcfi->fcf_index = index;
1211
1212 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1213 switch(i) {
1214 case 0: reg_fcfi->rq_id_0 = rq_cfg[0].rq_id; break;
1215 case 1: reg_fcfi->rq_id_1 = rq_cfg[1].rq_id; break;
1216 case 2: reg_fcfi->rq_id_2 = rq_cfg[2].rq_id; break;
1217 case 3: reg_fcfi->rq_id_3 = rq_cfg[3].rq_id; break;
1218 }
1219 reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
1220 reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
1221 reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
1222 reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match;
1223 }
1224
1225 if (vlan_id) {
1226 reg_fcfi->vv = TRUE;
1227 reg_fcfi->vlan_tag = vlan_id;
1228 }
1229
1230 return sizeof(sli4_cmd_reg_fcfi_t);
1231 }
1232
1233 /**
1234 * @brief Write REG_FCFI_MRQ to provided command buffer
1235 *
1236 * @param sli4 SLI context pointer.
1237 * @param buf Virtual pointer to the destination buffer.
1238 * @param size Buffer size, in bytes.
1239 * @param fcf_index FCF index returned by READ_FCF_TABLE.
1240 * @param vlan_id VLAN ID tag.
1241 * @param rr_quant Round robin quanta if RQ selection policy is 2
1242 * @param rq_selection_policy RQ selection policy
1243 * @param num_rqs Array of count of RQs per filter
1244 * @param rq_ids Array of RQ ids per filter
1245 * @param rq_cfg RQ_ID/R_CTL/TYPE routing information
1246 *
1247 * @return returns 0 for success, a negative error code value for failure.
1248 */
1249 int32_t
1250 sli_cmd_reg_fcfi_mrq(sli4_t *sli4, void *buf, size_t size, uint8_t mode,
1251 uint16_t fcf_index, uint16_t vlan_id, uint8_t rq_selection_policy,
1252 uint8_t mrq_bit_mask, uint16_t num_mrqs,
1253 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG])
1254 {
1255 sli4_cmd_reg_fcfi_mrq_t *reg_fcfi_mrq = buf;
1256 uint32_t i;
1257
1258 ocs_memset(buf, 0, size);
1259
1260 reg_fcfi_mrq->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI_MRQ;
1261 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1262 reg_fcfi_mrq->fcf_index = fcf_index;
1263 if (vlan_id) {
1264 reg_fcfi_mrq->vv = TRUE;
1265 reg_fcfi_mrq->vlan_tag = vlan_id;
1266 }
1267 goto done;
1268 }
1269
1270 reg_fcfi_mrq->mode = mode;
1271 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1272 reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
1273 reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
1274 reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
1275 reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match;
1276
1277 switch(i) {
1278 case 3: reg_fcfi_mrq->rq_id_3 = rq_cfg[i].rq_id; break;
1279 case 2: reg_fcfi_mrq->rq_id_2 = rq_cfg[i].rq_id; break;
1280 case 1: reg_fcfi_mrq->rq_id_1 = rq_cfg[i].rq_id; break;
1281 case 0: reg_fcfi_mrq->rq_id_0 = rq_cfg[i].rq_id; break;
1282 }
1283 }
1284
1285 reg_fcfi_mrq->rq_selection_policy = rq_selection_policy;
1286 reg_fcfi_mrq->mrq_filter_bitmask = mrq_bit_mask;
1287 reg_fcfi_mrq->num_mrq_pairs = num_mrqs;
1288 done:
1289 return sizeof(sli4_cmd_reg_fcfi_mrq_t);
1290 }
1291
1292 /**
1293 * @ingroup sli
1294 * @brief Write a REG_RPI command to the provided buffer.
1295 *
1296 * @param sli4 SLI context pointer.
1297 * @param buf Virtual pointer to the destination buffer.
1298 * @param size Buffer size, in bytes.
1299 * @param nport_id Remote F/N_Port_ID.
1300 * @param rpi Previously-allocated Remote Port Indicator.
1301 * @param vpi Previously-allocated Virtual Port Indicator.
1302 * @param dma DMA buffer that contains the remote port's service parameters.
1303 * @param update Boolean indicating an update to an existing RPI (TRUE)
1304 * or a new registration (FALSE).
1305 *
1306 * @return Returns the number of bytes written.
1307 */
1308 int32_t
1309 sli_cmd_reg_rpi(sli4_t *sli4, void *buf, size_t size, uint32_t nport_id, uint16_t rpi,
1310 uint16_t vpi, ocs_dma_t *dma, uint8_t update, uint8_t enable_t10_pi)
1311 {
1312 sli4_cmd_reg_rpi_t *reg_rpi = buf;
1313
1314 ocs_memset(buf, 0, size);
1315
1316 reg_rpi->hdr.command = SLI4_MBOX_COMMAND_REG_RPI;
1317
1318 reg_rpi->rpi = rpi;
1319 reg_rpi->remote_n_port_id = nport_id;
1320 reg_rpi->upd = update;
1321 reg_rpi->etow = enable_t10_pi;
1322
1323 reg_rpi->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64;
1324 reg_rpi->bde_64.buffer_length = SLI4_REG_RPI_BUF_LEN;
1325 reg_rpi->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
1326 reg_rpi->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
1327
1328 reg_rpi->vpi = vpi;
1329
1330 return sizeof(sli4_cmd_reg_rpi_t);
1331 }
1332
1333 /**
1334 * @ingroup sli
1335 * @brief Write a REG_VFI command to the provided buffer.
1336 *
1337 * @param sli4 SLI context pointer.
1338 * @param buf Virtual pointer to the destination buffer.
1339 * @param size Buffer size, in bytes.
1340 * @param domain Pointer to the domain object.
1341 *
1342 * @return Returns the number of bytes written.
1343 */
1344 int32_t
1345 sli_cmd_reg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain)
1346 {
1347 sli4_cmd_reg_vfi_t *reg_vfi = buf;
1348
1349 if (!sli4 || !buf || !domain) {
1350 return 0;
1351 }
1352
1353 ocs_memset(buf, 0, size);
1354
1355 reg_vfi->hdr.command = SLI4_MBOX_COMMAND_REG_VFI;
1356
1357 reg_vfi->vfi = domain->indicator;
1358
1359 reg_vfi->fcfi = domain->fcf_indicator;
1360
1361 /* TODO contents of domain->dma only valid if topo == FABRIC */
1362 reg_vfi->sparm.bde_type = SLI4_BDE_TYPE_BDE_64;
1363 reg_vfi->sparm.buffer_length = 0x70;
1364 reg_vfi->sparm.u.data.buffer_address_low = ocs_addr32_lo(domain->dma.phys);
1365 reg_vfi->sparm.u.data.buffer_address_high = ocs_addr32_hi(domain->dma.phys);
1366
1367 reg_vfi->e_d_tov = sli4->config.e_d_tov;
1368 reg_vfi->r_a_tov = sli4->config.r_a_tov;
1369
1370 reg_vfi->vp = TRUE;
1371 reg_vfi->vpi = domain->sport->indicator;
1372 ocs_memcpy(reg_vfi->wwpn, &domain->sport->sli_wwpn, sizeof(reg_vfi->wwpn));
1373 reg_vfi->local_n_port_id = domain->sport->fc_id;
1374
1375 return sizeof(sli4_cmd_reg_vfi_t);
1376 }
1377
1378 /**
1379 * @ingroup sli
1380 * @brief Write a REG_VPI command to the provided buffer.
1381 *
1382 * @param sli4 SLI context pointer.
1383 * @param buf Virtual pointer to the destination buffer.
1384 * @param size Buffer size, in bytes.
1385 * @param sport Point to SLI Port object.
1386 * @param update Boolean indicating whether to update the existing VPI (true)
1387 * or create a new VPI (false).
1388 *
1389 * @return Returns the number of bytes written.
1390 */
1391 int32_t
1392 sli_cmd_reg_vpi(sli4_t *sli4, void *buf, size_t size, ocs_sli_port_t *sport, uint8_t update)
1393 {
1394 sli4_cmd_reg_vpi_t *reg_vpi = buf;
1395
1396 if (!sli4 || !buf || !sport) {
1397 return 0;
1398 }
1399
1400 ocs_memset(buf, 0, size);
1401
1402 reg_vpi->hdr.command = SLI4_MBOX_COMMAND_REG_VPI;
1403
1404 reg_vpi->local_n_port_id = sport->fc_id;
1405 reg_vpi->upd = update != 0;
1406 ocs_memcpy(reg_vpi->wwpn, &sport->sli_wwpn, sizeof(reg_vpi->wwpn));
1407 reg_vpi->vpi = sport->indicator;
1408 reg_vpi->vfi = sport->domain->indicator;
1409
1410 return sizeof(sli4_cmd_reg_vpi_t);
1411 }
1412
1413 /**
1414 * @brief Write a REQUEST_FEATURES command to the provided buffer.
1415 *
1416 * @param sli4 SLI context pointer.
1417 * @param buf Virtual pointer to the destination buffer.
1418 * @param size Buffer size, in bytes.
1419 * @param mask Features to request.
1420 * @param query Use feature query mode (does not change FW).
1421 *
1422 * @return Returns the number of bytes written.
1423 */
1424 static int32_t
1425 sli_cmd_request_features(sli4_t *sli4, void *buf, size_t size, sli4_features_t mask, uint8_t query)
1426 {
1427 sli4_cmd_request_features_t *features = buf;
1428
1429 ocs_memset(buf, 0, size);
1430
1431 features->hdr.command = SLI4_MBOX_COMMAND_REQUEST_FEATURES;
1432
1433 if (query) {
1434 features->qry = TRUE;
1435 }
1436 features->command.dword = mask.dword;
1437
1438 return sizeof(sli4_cmd_request_features_t);
1439 }
1440
1441 /**
1442 * @ingroup sli
1443 * @brief Write a SLI_CONFIG command to the provided buffer.
1444 *
1445 * @param sli4 SLI context pointer.
1446 * @param buf Virtual pointer to the destination buffer.
1447 * @param size Buffer size, in bytes.
1448 * @param length Length in bytes of attached command.
1449 * @param dma DMA buffer for non-embedded commands.
1450 *
1451 * @return Returns the number of bytes written.
1452 */
1453 int32_t
1454 sli_cmd_sli_config(sli4_t *sli4, void *buf, size_t size, uint32_t length, ocs_dma_t *dma)
1455 {
1456 sli4_cmd_sli_config_t *sli_config = NULL;
1457
1458 if ((length > sizeof(sli_config->payload.embed)) && (dma == NULL)) {
1459 ocs_log_test(sli4->os, "length(%d) > payload(%ld)\n",
1460 length, sizeof(sli_config->payload.embed));
1461 return -1;
1462 }
1463
1464 sli_config = buf;
1465
1466 ocs_memset(buf, 0, size);
1467
1468 sli_config->hdr.command = SLI4_MBOX_COMMAND_SLI_CONFIG;
1469 if (NULL == dma) {
1470 sli_config->emb = TRUE;
1471 sli_config->payload_length = length;
1472 } else {
1473 sli_config->emb = FALSE;
1474
1475 sli_config->pmd_count = 1;
1476
1477 sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys);
1478 sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys);
1479 sli_config->payload.mem.length = dma->size;
1480 sli_config->payload_length = dma->size;
1481 #if defined(OCS_INCLUDE_DEBUG)
1482 /* save pointer to DMA for BMBX dumping purposes */
1483 sli4->bmbx_non_emb_pmd = dma;
1484 #endif
1485 }
1486
1487 return offsetof(sli4_cmd_sli_config_t, payload.embed);
1488 }
1489
1490 /**
1491 * @brief Initialize SLI Port control register.
1492 *
1493 * @param sli4 SLI context pointer.
1494 * @param endian Endian value to write.
1495 *
1496 * @return Returns 0 on success, or a negative error code value on failure.
1497 */
1498
1499 static int32_t
1500 sli_sliport_control(sli4_t *sli4, uint32_t endian)
1501 {
1502 uint32_t iter;
1503 int32_t rc;
1504
1505 rc = -1;
1506
1507 /* Initialize port, endian */
1508 sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, endian | SLI4_SLIPORT_CONTROL_IP);
1509
1510 for (iter = 0; iter < 3000; iter ++) {
1511 ocs_udelay(SLI4_INIT_PORT_DELAY_US);
1512 if (sli_fw_ready(sli4) == 1) {
1513 rc = 0;
1514 break;
1515 }
1516 }
1517
1518 if (rc != 0) {
1519 ocs_log_crit(sli4->os, "port failed to become ready after initialization\n");
1520 }
1521
1522 return rc;
1523 }
1524
1525 /**
1526 * @ingroup sli
1527 * @brief Write a UNREG_FCFI command to the provided buffer.
1528 *
1529 * @param sli4 SLI context pointer.
1530 * @param buf Virtual pointer to the destination buffer.
1531 * @param size Buffer size, in bytes.
1532 * @param indicator Indicator value.
1533 *
1534 * @return Returns the number of bytes written.
1535 */
1536 int32_t
1537 sli_cmd_unreg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator)
1538 {
1539 sli4_cmd_unreg_fcfi_t *unreg_fcfi = buf;
1540
1541 if (!sli4 || !buf) {
1542 return 0;
1543 }
1544
1545 ocs_memset(buf, 0, size);
1546
1547 unreg_fcfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_FCFI;
1548
1549 unreg_fcfi->fcfi = indicator;
1550
1551 return sizeof(sli4_cmd_unreg_fcfi_t);
1552 }
1553
1554 /**
1555 * @ingroup sli
1556 * @brief Write an UNREG_RPI command to the provided buffer.
1557 *
1558 * @param sli4 SLI context pointer.
1559 * @param buf Virtual pointer to the destination buffer.
1560 * @param size Buffer size, in bytes.
1561 * @param indicator Indicator value.
1562 * @param which Type of unregister, such as node, port, domain, or FCF.
1563 * @param fc_id FC address.
1564 *
1565 * @return Returns the number of bytes written.
1566 */
1567 int32_t
1568 sli_cmd_unreg_rpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, sli4_resource_e which,
1569 uint32_t fc_id)
1570 {
1571 sli4_cmd_unreg_rpi_t *unreg_rpi = buf;
1572 uint8_t index_indicator = 0;
1573
1574 if (!sli4 || !buf) {
1575 return 0;
1576 }
1577
1578 ocs_memset(buf, 0, size);
1579
1580 unreg_rpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_RPI;
1581
1582 switch (which) {
1583 case SLI_RSRC_FCOE_RPI:
1584 index_indicator = SLI4_UNREG_RPI_II_RPI;
1585 if (fc_id != UINT32_MAX) {
1586 unreg_rpi->dp = TRUE;
1587 unreg_rpi->destination_n_port_id = fc_id & 0x00ffffff;
1588 }
1589 break;
1590 case SLI_RSRC_FCOE_VPI:
1591 index_indicator = SLI4_UNREG_RPI_II_VPI;
1592 break;
1593 case SLI_RSRC_FCOE_VFI:
1594 index_indicator = SLI4_UNREG_RPI_II_VFI;
1595 break;
1596 case SLI_RSRC_FCOE_FCFI:
1597 index_indicator = SLI4_UNREG_RPI_II_FCFI;
1598 break;
1599 default:
1600 ocs_log_test(sli4->os, "unknown type %#x\n", which);
1601 return 0;
1602 }
1603
1604 unreg_rpi->ii = index_indicator;
1605 unreg_rpi->index = indicator;
1606
1607 return sizeof(sli4_cmd_unreg_rpi_t);
1608 }
1609
1610 /**
1611 * @ingroup sli
1612 * @brief Write an UNREG_VFI command to the provided buffer.
1613 *
1614 * @param sli4 SLI context pointer.
1615 * @param buf Virtual pointer to the destination buffer.
1616 * @param size Buffer size, in bytes.
1617 * @param domain Pointer to the domain object
1618 * @param which Type of unregister, such as domain, FCFI, or everything.
1619 *
1620 * @return Returns the number of bytes written.
1621 */
1622 int32_t
1623 sli_cmd_unreg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain, uint32_t which)
1624 {
1625 sli4_cmd_unreg_vfi_t *unreg_vfi = buf;
1626
1627 if (!sli4 || !buf || !domain) {
1628 return 0;
1629 }
1630
1631 ocs_memset(buf, 0, size);
1632
1633 unreg_vfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VFI;
1634 switch (which) {
1635 case SLI4_UNREG_TYPE_DOMAIN:
1636 unreg_vfi->index = domain->indicator;
1637 break;
1638 case SLI4_UNREG_TYPE_FCF:
1639 unreg_vfi->index = domain->fcf_indicator;
1640 break;
1641 case SLI4_UNREG_TYPE_ALL:
1642 unreg_vfi->index = UINT16_MAX;
1643 break;
1644 default:
1645 return 0;
1646 }
1647
1648 if (SLI4_UNREG_TYPE_DOMAIN != which) {
1649 unreg_vfi->ii = SLI4_UNREG_VFI_II_FCFI;
1650 }
1651
1652 return sizeof(sli4_cmd_unreg_vfi_t);
1653 }
1654
1655 /**
1656 * @ingroup sli
1657 * @brief Write an UNREG_VPI command to the provided buffer.
1658 *
1659 * @param sli4 SLI context pointer.
1660 * @param buf Virtual pointer to the destination buffer.
1661 * @param size Buffer size, in bytes.
1662 * @param indicator Indicator value.
1663 * @param which Type of unregister: port, domain, FCFI, everything
1664 *
1665 * @return Returns the number of bytes written.
1666 */
1667 int32_t
1668 sli_cmd_unreg_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, uint32_t which)
1669 {
1670 sli4_cmd_unreg_vpi_t *unreg_vpi = buf;
1671
1672 if (!sli4 || !buf) {
1673 return 0;
1674 }
1675
1676 ocs_memset(buf, 0, size);
1677
1678 unreg_vpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VPI;
1679 unreg_vpi->index = indicator;
1680 switch (which) {
1681 case SLI4_UNREG_TYPE_PORT:
1682 unreg_vpi->ii = SLI4_UNREG_VPI_II_VPI;
1683 break;
1684 case SLI4_UNREG_TYPE_DOMAIN:
1685 unreg_vpi->ii = SLI4_UNREG_VPI_II_VFI;
1686 break;
1687 case SLI4_UNREG_TYPE_FCF:
1688 unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI;
1689 break;
1690 case SLI4_UNREG_TYPE_ALL:
1691 unreg_vpi->index = UINT16_MAX; /* override indicator */
1692 unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI;
1693 break;
1694 default:
1695 return 0;
1696 }
1697
1698 return sizeof(sli4_cmd_unreg_vpi_t);
1699 }
1700
1701 /**
1702 * @ingroup sli
1703 * @brief Write an CONFIG_AUTO_XFER_RDY command to the provided buffer.
1704 *
1705 * @param sli4 SLI context pointer.
1706 * @param buf Virtual pointer to the destination buffer.
1707 * @param size Buffer size, in bytes.
1708 * @param max_burst_len if the write FCP_DL is less than this size,
1709 * then the SLI port will generate the auto XFER_RDY.
1710 *
1711 * @return Returns the number of bytes written.
1712 */
1713 int32_t
1714 sli_cmd_config_auto_xfer_rdy(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len)
1715 {
1716 sli4_cmd_config_auto_xfer_rdy_t *req = buf;
1717
1718 if (!sli4 || !buf) {
1719 return 0;
1720 }
1721
1722 ocs_memset(buf, 0, size);
1723
1724 req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY;
1725 req->max_burst_len = max_burst_len;
1726
1727 return sizeof(sli4_cmd_config_auto_xfer_rdy_t);
1728 }
1729
1730 /**
1731 * @ingroup sli
1732 * @brief Write an CONFIG_AUTO_XFER_RDY_HP command to the provided buffer.
1733 *
1734 * @param sli4 SLI context pointer.
1735 * @param buf Virtual pointer to the destination buffer.
1736 * @param size Buffer size, in bytes.
1737 * @param max_burst_len if the write FCP_DL is less than this size,
1738 * @param esoc enable start offset computation,
1739 * @param block_size block size,
1740 * then the SLI port will generate the auto XFER_RDY.
1741 *
1742 * @return Returns the number of bytes written.
1743 */
1744 int32_t
1745 sli_cmd_config_auto_xfer_rdy_hp(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len,
1746 uint32_t esoc, uint32_t block_size )
1747 {
1748 sli4_cmd_config_auto_xfer_rdy_hp_t *req = buf;
1749
1750 if (!sli4 || !buf) {
1751 return 0;
1752 }
1753
1754 ocs_memset(buf, 0, size);
1755
1756 req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY_HP;
1757 req->max_burst_len = max_burst_len;
1758 req->esoc = esoc;
1759 req->block_size = block_size;
1760 return sizeof(sli4_cmd_config_auto_xfer_rdy_hp_t);
1761 }
1762
1763 /**
1764 * @brief Write a COMMON_FUNCTION_RESET command.
1765 *
1766 * @param sli4 SLI context.
1767 * @param buf Destination buffer for the command.
1768 * @param size Buffer size, in bytes.
1769 *
1770 * @return Returns the number of bytes written.
1771 */
1772 static int32_t
1773 sli_cmd_common_function_reset(sli4_t *sli4, void *buf, size_t size)
1774 {
1775 sli4_req_common_function_reset_t *reset = NULL;
1776 uint32_t sli_config_off = 0;
1777
1778 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
1779 uint32_t payload_size;
1780
1781 /* Payload length must accommodate both request and response */
1782 payload_size = max(sizeof(sli4_req_common_function_reset_t),
1783 sizeof(sli4_res_common_function_reset_t));
1784
1785 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
1786 NULL);
1787 }
1788 reset = (sli4_req_common_function_reset_t *)((uint8_t *)buf + sli_config_off);
1789
1790 reset->hdr.opcode = SLI4_OPC_COMMON_FUNCTION_RESET;
1791 reset->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
1792
1793 return(sli_config_off + sizeof(sli4_req_common_function_reset_t));
1794 }
1795
1796 /**
1797 * @brief Write a COMMON_CREATE_CQ command.
1798 *
1799 * @param sli4 SLI context.
1800 * @param buf Destination buffer for the command.
1801 * @param size Buffer size, in bytes.
1802 * @param qmem DMA memory for the queue.
1803 * @param eq_id Associated EQ_ID
1804 * @param ignored This parameter carries the ULP which is only used for WQ and RQs
1805 *
1806 * @note This creates a Version 0 message.
1807 *
1808 * @return Returns the number of bytes written.
1809 */
1810 static int32_t
1811 sli_cmd_common_create_cq(sli4_t *sli4, void *buf, size_t size,
1812 ocs_dma_t *qmem, uint16_t eq_id, uint16_t ignored)
1813 {
1814 sli4_req_common_create_cq_v0_t *cqv0 = NULL;
1815 sli4_req_common_create_cq_v2_t *cqv2 = NULL;
1816 uint32_t sli_config_off = 0;
1817 uint32_t p;
1818 uintptr_t addr;
1819 uint32_t if_type = sli4->if_type;
1820 uint32_t page_bytes = 0;
1821 uint32_t num_pages = 0;
1822 uint32_t cmd_size = 0;
1823 uint32_t page_size = 0;
1824 uint32_t n_cqe = 0;
1825
1826 /* First calculate number of pages and the mailbox cmd length */
1827 switch (if_type)
1828 {
1829 case SLI4_IF_TYPE_BE3_SKH_PF:
1830 page_bytes = SLI_PAGE_SIZE;
1831 num_pages = sli_page_count(qmem->size, page_bytes);
1832 cmd_size = sizeof(sli4_req_common_create_cq_v0_t) + (8 * num_pages);
1833 break;
1834 case SLI4_IF_TYPE_LANCER_FC_ETH:
1835 case SLI4_IF_TYPE_LANCER_G7:
1836 n_cqe = qmem->size / SLI4_CQE_BYTES;
1837 switch (n_cqe) {
1838 case 256:
1839 case 512:
1840 case 1024:
1841 case 2048:
1842 page_size = 1;
1843 break;
1844 case 4096:
1845 page_size = 2;
1846 break;
1847 default:
1848 return 0;
1849 }
1850 page_bytes = page_size * SLI_PAGE_SIZE;
1851 num_pages = sli_page_count(qmem->size, page_bytes);
1852 cmd_size = sizeof(sli4_req_common_create_cq_v2_t) + (8 * num_pages);
1853 break;
1854 default:
1855 ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type);
1856 return -1;
1857 }
1858
1859 /* now that we have the mailbox command size, we can set SLI_CONFIG fields */
1860 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
1861 uint32_t payload_size;
1862
1863 /* Payload length must accommodate both request and response */
1864 payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_t));
1865
1866 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
1867 NULL);
1868 }
1869
1870 switch (if_type)
1871 {
1872 case SLI4_IF_TYPE_BE3_SKH_PF:
1873 cqv0 = (sli4_req_common_create_cq_v0_t *)((uint8_t *)buf + sli_config_off);
1874 cqv0->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ;
1875 cqv0->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
1876 cqv0->hdr.version = 0;
1877 cqv0->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
1878
1879 /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
1880 cqv0->num_pages = num_pages;
1881 switch (cqv0->num_pages) {
1882 case 1:
1883 cqv0->cqecnt = SLI4_CQ_CNT_256;
1884 break;
1885 case 2:
1886 cqv0->cqecnt = SLI4_CQ_CNT_512;
1887 break;
1888 case 4:
1889 cqv0->cqecnt = SLI4_CQ_CNT_1024;
1890 break;
1891 default:
1892 ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv0->num_pages);
1893 return -1;
1894 }
1895 cqv0->evt = TRUE;
1896 cqv0->valid = TRUE;
1897 /* TODO cq->nodelay = ???; */
1898 /* TODO cq->clswm = ???; */
1899 cqv0->arm = FALSE;
1900 cqv0->eq_id = eq_id;
1901
1902 for (p = 0, addr = qmem->phys;
1903 p < cqv0->num_pages;
1904 p++, addr += page_bytes) {
1905 cqv0->page_physical_address[p].low = ocs_addr32_lo(addr);
1906 cqv0->page_physical_address[p].high = ocs_addr32_hi(addr);
1907 }
1908
1909 break;
1910 case SLI4_IF_TYPE_LANCER_FC_ETH:
1911 case SLI4_IF_TYPE_LANCER_G7:
1912 {
1913 cqv2 = (sli4_req_common_create_cq_v2_t *)((uint8_t *)buf + sli_config_off);
1914 cqv2->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ;
1915 cqv2->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
1916 cqv2->hdr.version = 2;
1917 cqv2->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
1918
1919 if (if_type == SLI4_IF_TYPE_LANCER_G7)
1920 cqv2->autovalid = TRUE;
1921
1922 cqv2->page_size = page_size;
1923
1924 /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */
1925 cqv2->num_pages = num_pages;
1926 if (!cqv2->num_pages || (cqv2->num_pages > SLI4_COMMON_CREATE_CQ_V2_MAX_PAGES)) {
1927 return 0;
1928 }
1929
1930 switch (cqv2->num_pages) {
1931 case 1:
1932 cqv2->cqecnt = SLI4_CQ_CNT_256;
1933 break;
1934 case 2:
1935 cqv2->cqecnt = SLI4_CQ_CNT_512;
1936 break;
1937 case 4:
1938 cqv2->cqecnt = SLI4_CQ_CNT_1024;
1939 break;
1940 case 8:
1941 cqv2->cqecnt = SLI4_CQ_CNT_LARGE;
1942 cqv2->cqe_count = n_cqe;
1943 break;
1944 default:
1945 ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv2->num_pages);
1946 return -1;
1947 }
1948
1949 cqv2->evt = TRUE;
1950 cqv2->valid = TRUE;
1951 /* TODO cq->nodelay = ???; */
1952 /* TODO cq->clswm = ???; */
1953 cqv2->arm = FALSE;
1954 cqv2->eq_id = eq_id;
1955
1956 for (p = 0, addr = qmem->phys;
1957 p < cqv2->num_pages;
1958 p++, addr += page_bytes) {
1959 cqv2->page_physical_address[p].low = ocs_addr32_lo(addr);
1960 cqv2->page_physical_address[p].high = ocs_addr32_hi(addr);
1961 }
1962 }
1963 break;
1964 }
1965
1966 return (sli_config_off + cmd_size);
1967 }
1968
1969 /**
1970 * @brief Write a COMMON_DESTROY_CQ command.
1971 *
1972 * @param sli4 SLI context.
1973 * @param buf Destination buffer for the command.
1974 * @param size Buffer size, in bytes.
1975 * @param cq_id CQ ID
1976 *
1977 * @note This creates a Version 0 message.
1978 *
1979 * @return Returns the number of bytes written.
1980 */
1981 static int32_t
1982 sli_cmd_common_destroy_cq(sli4_t *sli4, void *buf, size_t size, uint16_t cq_id)
1983 {
1984 sli4_req_common_destroy_cq_t *cq = NULL;
1985 uint32_t sli_config_off = 0;
1986
1987 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
1988 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
1989 /* Payload length must accommodate both request and response */
1990 max(sizeof(sli4_req_common_destroy_cq_t),
1991 sizeof(sli4_res_hdr_t)),
1992 NULL);
1993 }
1994 cq = (sli4_req_common_destroy_cq_t *)((uint8_t *)buf + sli_config_off);
1995
1996 cq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_CQ;
1997 cq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
1998 cq->hdr.request_length = sizeof(sli4_req_common_destroy_cq_t) -
1999 sizeof(sli4_req_hdr_t);
2000 cq->cq_id = cq_id;
2001
2002 return(sli_config_off + sizeof(sli4_req_common_destroy_cq_t));
2003 }
2004
2005 /**
2006 * @brief Write a COMMON_MODIFY_EQ_DELAY command.
2007 *
2008 * @param sli4 SLI context.
2009 * @param buf Destination buffer for the command.
2010 * @param size Buffer size, in bytes.
2011 * @param q Queue object array.
2012 * @param num_q Queue object array count.
2013 * @param shift Phase shift for staggering interrupts.
2014 * @param delay_mult Delay multiplier for limiting interrupt frequency.
2015 *
2016 * @return Returns the number of bytes written.
2017 */
2018 static int32_t
2019 sli_cmd_common_modify_eq_delay(sli4_t *sli4, void *buf, size_t size, sli4_queue_t *q, int num_q, uint32_t shift,
2020 uint32_t delay_mult)
2021 {
2022 sli4_req_common_modify_eq_delay_t *modify_delay = NULL;
2023 uint32_t sli_config_off = 0;
2024 int i;
2025
2026 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2027 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2028 /* Payload length must accommodate both request and response */
2029 max(sizeof(sli4_req_common_modify_eq_delay_t), sizeof(sli4_res_hdr_t)),
2030 NULL);
2031 }
2032
2033 modify_delay = (sli4_req_common_modify_eq_delay_t *)((uint8_t *)buf + sli_config_off);
2034
2035 modify_delay->hdr.opcode = SLI4_OPC_COMMON_MODIFY_EQ_DELAY;
2036 modify_delay->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2037 modify_delay->hdr.request_length = sizeof(sli4_req_common_modify_eq_delay_t) -
2038 sizeof(sli4_req_hdr_t);
2039
2040 modify_delay->num_eq = num_q;
2041
2042 for (i = 0; i<num_q; i++) {
2043 modify_delay->eq_delay_record[i].eq_id = q[i].id;
2044 modify_delay->eq_delay_record[i].phase = shift;
2045 modify_delay->eq_delay_record[i].delay_multiplier = delay_mult;
2046 }
2047
2048 return(sli_config_off + sizeof(sli4_req_common_modify_eq_delay_t));
2049 }
2050
2051 /**
2052 * @brief Write a COMMON_CREATE_EQ command.
2053 *
2054 * @param sli4 SLI context.
2055 * @param buf Destination buffer for the command.
2056 * @param size Buffer size, in bytes.
2057 * @param qmem DMA memory for the queue.
2058 * @param ignored1 Ignored (used for consistency among queue creation functions).
2059 * @param ignored2 Ignored (used for consistency among queue creation functions).
2060 *
2061 * @note Other queue creation routines use the last parameter to pass in
2062 * the associated Q_ID and ULP. EQ doesn't have an associated queue or ULP,
2063 * so these parameters are ignored
2064 *
2065 * @note This creates a Version 0 message
2066 *
2067 * @return Returns the number of bytes written.
2068 */
2069 static int32_t
2070 sli_cmd_common_create_eq(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem,
2071 uint16_t ignored1, uint16_t ignored2)
2072 {
2073 sli4_req_common_create_eq_t *eq = NULL;
2074 uint32_t sli_config_off = 0;
2075 uint32_t p;
2076 uintptr_t addr;
2077
2078 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2079 uint32_t payload_size;
2080
2081 /* Payload length must accommodate both request and response */
2082 payload_size = max(sizeof(sli4_req_common_create_eq_t),
2083 sizeof(sli4_res_common_create_queue_t));
2084
2085 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
2086 NULL);
2087 }
2088 eq = (sli4_req_common_create_eq_t *)((uint8_t *)buf + sli_config_off);
2089
2090 eq->hdr.opcode = SLI4_OPC_COMMON_CREATE_EQ;
2091 eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2092 eq->hdr.request_length = sizeof(sli4_req_common_create_eq_t) -
2093 sizeof(sli4_req_hdr_t);
2094 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) {
2095 eq->hdr.version = 2;
2096 eq->autovalid = TRUE;
2097 }
2098 /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
2099 eq->num_pages = qmem->size / SLI_PAGE_SIZE;
2100 switch (eq->num_pages) {
2101 case 1:
2102 eq->eqesz = SLI4_EQE_SIZE_4;
2103 eq->count = SLI4_EQ_CNT_1024;
2104 break;
2105 case 2:
2106 eq->eqesz = SLI4_EQE_SIZE_4;
2107 eq->count = SLI4_EQ_CNT_2048;
2108 break;
2109 case 4:
2110 eq->eqesz = SLI4_EQE_SIZE_4;
2111 eq->count = SLI4_EQ_CNT_4096;
2112 break;
2113 default:
2114 ocs_log_test(sli4->os, "num_pages %d not valid\n", eq->num_pages);
2115 return -1;
2116 }
2117 eq->valid = TRUE;
2118 eq->arm = FALSE;
2119 eq->delay_multiplier = 32;
2120
2121 for (p = 0, addr = qmem->phys;
2122 p < eq->num_pages;
2123 p++, addr += SLI_PAGE_SIZE) {
2124 eq->page_address[p].low = ocs_addr32_lo(addr);
2125 eq->page_address[p].high = ocs_addr32_hi(addr);
2126 }
2127
2128 return(sli_config_off + sizeof(sli4_req_common_create_eq_t));
2129 }
2130
2131 /**
2132 * @brief Write a COMMON_DESTROY_EQ command.
2133 *
2134 * @param sli4 SLI context.
2135 * @param buf Destination buffer for the command.
2136 * @param size Buffer size, in bytes.
2137 * @param eq_id Queue ID to destroy.
2138 *
2139 * @note Other queue creation routines use the last parameter to pass in
2140 * the associated Q_ID. EQ doesn't have an associated queue so this
2141 * parameter is ignored.
2142 *
2143 * @note This creates a Version 0 message.
2144 *
2145 * @return Returns the number of bytes written.
2146 */
2147 static int32_t
2148 sli_cmd_common_destroy_eq(sli4_t *sli4, void *buf, size_t size, uint16_t eq_id)
2149 {
2150 sli4_req_common_destroy_eq_t *eq = NULL;
2151 uint32_t sli_config_off = 0;
2152
2153 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2154 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2155 /* Payload length must accommodate both request and response */
2156 max(sizeof(sli4_req_common_destroy_eq_t),
2157 sizeof(sli4_res_hdr_t)),
2158 NULL);
2159 }
2160 eq = (sli4_req_common_destroy_eq_t *)((uint8_t *)buf + sli_config_off);
2161
2162 eq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_EQ;
2163 eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2164 eq->hdr.request_length = sizeof(sli4_req_common_destroy_eq_t) -
2165 sizeof(sli4_req_hdr_t);
2166
2167 eq->eq_id = eq_id;
2168
2169 return(sli_config_off + sizeof(sli4_req_common_destroy_eq_t));
2170 }
2171
2172 /**
2173 * @brief Write a LOWLEVEL_SET_WATCHDOG command.
2174 *
2175 * @param sli4 SLI context.
2176 * @param buf Destination buffer for the command.
2177 * @param size Buffer size, in bytes.
2178 * @param timeout watchdog timer timeout in seconds
2179 *
2180 * @return void
2181 */
2182 void
2183 sli4_cmd_lowlevel_set_watchdog(sli4_t *sli4, void *buf, size_t size, uint16_t timeout)
2184 {
2185
2186 sli4_req_lowlevel_set_watchdog_t *req = NULL;
2187 uint32_t sli_config_off = 0;
2188
2189 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2190 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2191 /* Payload length must accommodate both request and response */
2192 max(sizeof(sli4_req_lowlevel_set_watchdog_t),
2193 sizeof(sli4_res_lowlevel_set_watchdog_t)),
2194 NULL);
2195 }
2196 req = (sli4_req_lowlevel_set_watchdog_t *)((uint8_t *)buf + sli_config_off);
2197
2198 req->hdr.opcode = SLI4_OPC_LOWLEVEL_SET_WATCHDOG;
2199 req->hdr.subsystem = SLI4_SUBSYSTEM_LOWLEVEL;
2200 req->hdr.request_length = sizeof(sli4_req_lowlevel_set_watchdog_t) - sizeof(sli4_req_hdr_t);
2201 req->watchdog_timeout = timeout;
2202
2203 return;
2204 }
2205
2206 static int32_t
2207 sli_cmd_common_get_cntl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
2208 {
2209 sli4_req_hdr_t *hdr = NULL;
2210 uint32_t sli_config_off = 0;
2211
2212 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2213 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2214 sizeof(sli4_req_hdr_t),
2215 dma);
2216 }
2217
2218 if (dma == NULL) {
2219 return 0;
2220 }
2221
2222 ocs_memset(dma->virt, 0, dma->size);
2223
2224 hdr = dma->virt;
2225
2226 hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ATTRIBUTES;
2227 hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
2228 hdr->request_length = dma->size;
2229
2230 return(sli_config_off + sizeof(sli4_req_hdr_t));
2231 }
2232
2233 /**
2234 * @brief Write a COMMON_GET_CNTL_ADDL_ATTRIBUTES command.
2235 *
2236 * @param sli4 SLI context.
2237 * @param buf Destination buffer for the command.
2238 * @param size Buffer size, in bytes.
2239 * @param dma DMA structure from which the data will be copied.
2240 *
2241 * @note This creates a Version 0 message.
2242 *
2243 * @return Returns the number of bytes written.
2244 */
2245 static int32_t
2246 sli_cmd_common_get_cntl_addl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
2247 {
2248 sli4_req_hdr_t *hdr = NULL;
2249 uint32_t sli_config_off = 0;
2250
2251 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2252 sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_hdr_t), dma);
2253 }
2254
2255 if (dma == NULL) {
2256 return 0;
2257 }
2258
2259 ocs_memset(dma->virt, 0, dma->size);
2260
2261 hdr = dma->virt;
2262
2263 hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ADDL_ATTRIBUTES;
2264 hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
2265 hdr->request_length = dma->size;
2266
2267 return(sli_config_off + sizeof(sli4_req_hdr_t));
2268 }
2269
2270 /**
2271 * @brief Write a COMMON_CREATE_MQ_EXT command.
2272 *
2273 * @param sli4 SLI context.
2274 * @param buf Destination buffer for the command.
2275 * @param size Buffer size, in bytes.
2276 * @param qmem DMA memory for the queue.
2277 * @param cq_id Associated CQ_ID.
2278 * @param ignored This parameter carries the ULP which is only used for WQ and RQs
2279 *
2280 * @note This creates a Version 0 message.
2281 *
2282 * @return Returns the number of bytes written.
2283 */
2284 static int32_t
2285 sli_cmd_common_create_mq_ext(sli4_t *sli4, void *buf, size_t size,
2286 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ignored)
2287 {
2288 sli4_req_common_create_mq_ext_t *mq = NULL;
2289 uint32_t sli_config_off = 0;
2290 uint32_t p;
2291 uintptr_t addr;
2292
2293 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2294 uint32_t payload_size;
2295
2296 /* Payload length must accommodate both request and response */
2297 payload_size = max(sizeof(sli4_req_common_create_mq_ext_t),
2298 sizeof(sli4_res_common_create_queue_t));
2299
2300 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
2301 NULL);
2302 }
2303 mq = (sli4_req_common_create_mq_ext_t *)((uint8_t *)buf + sli_config_off);
2304
2305 mq->hdr.opcode = SLI4_OPC_COMMON_CREATE_MQ_EXT;
2306 mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2307 mq->hdr.request_length = sizeof(sli4_req_common_create_mq_ext_t) -
2308 sizeof(sli4_req_hdr_t);
2309 /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */
2310 mq->num_pages = qmem->size / SLI_PAGE_SIZE;
2311 switch (mq->num_pages) {
2312 case 1:
2313 mq->ring_size = SLI4_MQE_SIZE_16;
2314 break;
2315 case 2:
2316 mq->ring_size = SLI4_MQE_SIZE_32;
2317 break;
2318 case 4:
2319 mq->ring_size = SLI4_MQE_SIZE_64;
2320 break;
2321 case 8:
2322 mq->ring_size = SLI4_MQE_SIZE_128;
2323 break;
2324 default:
2325 ocs_log_test(sli4->os, "num_pages %d not valid\n", mq->num_pages);
2326 return -1;
2327 }
2328
2329 /* TODO break this down by sli4->config.topology */
2330 mq->async_event_bitmap = SLI4_ASYNC_EVT_FC_FCOE;
2331
2332 if (sli4->config.mq_create_version) {
2333 mq->cq_id_v1 = cq_id;
2334 mq->hdr.version = 1;
2335 }
2336 else {
2337 mq->cq_id_v0 = cq_id;
2338 }
2339 mq->val = TRUE;
2340
2341 for (p = 0, addr = qmem->phys;
2342 p < mq->num_pages;
2343 p++, addr += SLI_PAGE_SIZE) {
2344 mq->page_physical_address[p].low = ocs_addr32_lo(addr);
2345 mq->page_physical_address[p].high = ocs_addr32_hi(addr);
2346 }
2347
2348 return(sli_config_off + sizeof(sli4_req_common_create_mq_ext_t));
2349 }
2350
2351 /**
2352 * @brief Write a COMMON_DESTROY_MQ command.
2353 *
2354 * @param sli4 SLI context.
2355 * @param buf Destination buffer for the command.
2356 * @param size Buffer size, in bytes.
2357 * @param mq_id MQ ID
2358 *
2359 * @note This creates a Version 0 message.
2360 *
2361 * @return Returns the number of bytes written.
2362 */
2363 static int32_t
2364 sli_cmd_common_destroy_mq(sli4_t *sli4, void *buf, size_t size, uint16_t mq_id)
2365 {
2366 sli4_req_common_destroy_mq_t *mq = NULL;
2367 uint32_t sli_config_off = 0;
2368
2369 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2370 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2371 /* Payload length must accommodate both request and response */
2372 max(sizeof(sli4_req_common_destroy_mq_t),
2373 sizeof(sli4_res_hdr_t)),
2374 NULL);
2375 }
2376 mq = (sli4_req_common_destroy_mq_t *)((uint8_t *)buf + sli_config_off);
2377
2378 mq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_MQ;
2379 mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2380 mq->hdr.request_length = sizeof(sli4_req_common_destroy_mq_t) -
2381 sizeof(sli4_req_hdr_t);
2382
2383 mq->mq_id = mq_id;
2384
2385 return(sli_config_off + sizeof(sli4_req_common_destroy_mq_t));
2386 }
2387
2388 /**
2389 * @ingroup sli
2390 * @brief Write a COMMON_NOP command
2391 *
2392 * @param sli4 SLI context.
2393 * @param buf Destination buffer for the command.
2394 * @param size Buffer size, in bytes.
2395 * @param context NOP context value (passed to response, except on FC/FCoE).
2396 *
2397 * @return Returns the number of bytes written.
2398 */
2399 int32_t
2400 sli_cmd_common_nop(sli4_t *sli4, void *buf, size_t size, uint64_t context)
2401 {
2402 sli4_req_common_nop_t *nop = NULL;
2403 uint32_t sli_config_off = 0;
2404
2405 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2406 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2407 /* Payload length must accommodate both request and response */
2408 max(sizeof(sli4_req_common_nop_t), sizeof(sli4_res_common_nop_t)),
2409 NULL);
2410 }
2411
2412 nop = (sli4_req_common_nop_t *)((uint8_t *)buf + sli_config_off);
2413
2414 nop->hdr.opcode = SLI4_OPC_COMMON_NOP;
2415 nop->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2416 nop->hdr.request_length = 8;
2417
2418 ocs_memcpy(&nop->context, &context, sizeof(context));
2419
2420 return(sli_config_off + sizeof(sli4_req_common_nop_t));
2421 }
2422
2423 /**
2424 * @ingroup sli
2425 * @brief Write a COMMON_GET_RESOURCE_EXTENT_INFO command.
2426 *
2427 * @param sli4 SLI context.
2428 * @param buf Destination buffer for the command.
2429 * @param size Buffer size, in bytes.
2430 * @param rtype Resource type (for example, XRI, VFI, VPI, and RPI).
2431 *
2432 * @return Returns the number of bytes written.
2433 */
2434 int32_t
2435 sli_cmd_common_get_resource_extent_info(sli4_t *sli4, void *buf, size_t size, uint16_t rtype)
2436 {
2437 sli4_req_common_get_resource_extent_info_t *extent = NULL;
2438 uint32_t sli_config_off = 0;
2439
2440 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2441 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2442 sizeof(sli4_req_common_get_resource_extent_info_t),
2443 NULL);
2444 }
2445
2446 extent = (sli4_req_common_get_resource_extent_info_t *)((uint8_t *)buf + sli_config_off);
2447
2448 extent->hdr.opcode = SLI4_OPC_COMMON_GET_RESOURCE_EXTENT_INFO;
2449 extent->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2450 extent->hdr.request_length = 4;
2451
2452 extent->resource_type = rtype;
2453
2454 return(sli_config_off + sizeof(sli4_req_common_get_resource_extent_info_t));
2455 }
2456
2457 /**
2458 * @ingroup sli
2459 * @brief Write a COMMON_GET_SLI4_PARAMETERS command.
2460 *
2461 * @param sli4 SLI context.
2462 * @param buf Destination buffer for the command.
2463 * @param size Buffer size, in bytes.
2464 *
2465 * @return Returns the number of bytes written.
2466 */
2467 int32_t
2468 sli_cmd_common_get_sli4_parameters(sli4_t *sli4, void *buf, size_t size)
2469 {
2470 sli4_req_hdr_t *hdr = NULL;
2471 uint32_t sli_config_off = 0;
2472
2473 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2474 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2475 sizeof(sli4_res_common_get_sli4_parameters_t),
2476 NULL);
2477 }
2478
2479 hdr = (sli4_req_hdr_t *)((uint8_t *)buf + sli_config_off);
2480
2481 hdr->opcode = SLI4_OPC_COMMON_GET_SLI4_PARAMETERS;
2482 hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
2483 hdr->request_length = 0x50;
2484
2485 return(sli_config_off + sizeof(sli4_req_hdr_t));
2486 }
2487
2488 /**
2489 * @brief Write a COMMON_QUERY_FW_CONFIG command to the provided buffer.
2490 *
2491 * @param sli4 SLI context pointer.
2492 * @param buf Virtual pointer to destination buffer.
2493 * @param size Buffer size in bytes.
2494 *
2495 * @return Returns the number of bytes written
2496 */
2497 static int32_t
2498 sli_cmd_common_query_fw_config(sli4_t *sli4, void *buf, size_t size)
2499 {
2500 sli4_req_common_query_fw_config_t *fw_config;
2501 uint32_t sli_config_off = 0;
2502 uint32_t payload_size;
2503
2504 /* Payload length must accommodate both request and response */
2505 payload_size = max(sizeof(sli4_req_common_query_fw_config_t),
2506 sizeof(sli4_res_common_query_fw_config_t));
2507
2508 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2509 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2510 payload_size,
2511 NULL);
2512 }
2513
2514 fw_config = (sli4_req_common_query_fw_config_t*)((uint8_t*)buf + sli_config_off);
2515 fw_config->hdr.opcode = SLI4_OPC_COMMON_QUERY_FW_CONFIG;
2516 fw_config->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2517 fw_config->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
2518 return sli_config_off + sizeof(sli4_req_common_query_fw_config_t);
2519 }
2520
2521 /**
2522 * @brief Write a COMMON_GET_PORT_NAME command to the provided buffer.
2523 *
2524 * @param sli4 SLI context pointer.
2525 * @param buf Virtual pointer to destination buffer.
2526 * @param size Buffer size in bytes.
2527 *
2528 * @note Function supports both version 0 and 1 forms of this command via
2529 * the IF_TYPE.
2530 *
2531 * @return Returns the number of bytes written.
2532 */
2533 static int32_t
2534 sli_cmd_common_get_port_name(sli4_t *sli4, void *buf, size_t size)
2535 {
2536 sli4_req_common_get_port_name_t *port_name;
2537 uint32_t sli_config_off = 0;
2538 uint32_t payload_size;
2539 uint8_t version = 0;
2540 uint8_t pt = 0;
2541
2542 /* Select command version according to IF_TYPE */
2543 switch (sli4->if_type) {
2544 case SLI4_IF_TYPE_BE3_SKH_PF:
2545 case SLI4_IF_TYPE_BE3_SKH_VF:
2546 version = 0;
2547 break;
2548 case SLI4_IF_TYPE_LANCER_FC_ETH:
2549 case SLI4_IF_TYPE_LANCER_RDMA:
2550 case SLI4_IF_TYPE_LANCER_G7:
2551 version = 1;
2552 break;
2553 default:
2554 ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", sli4->if_type);
2555 return 0;
2556 }
2557
2558 /* Payload length must accommodate both request and response */
2559 payload_size = max(sizeof(sli4_req_common_get_port_name_t),
2560 sizeof(sli4_res_common_get_port_name_t));
2561
2562 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2563 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2564 payload_size,
2565 NULL);
2566
2567 pt = 1;
2568 }
2569
2570 port_name = (sli4_req_common_get_port_name_t *)((uint8_t *)buf + sli_config_off);
2571
2572 port_name->hdr.opcode = SLI4_OPC_COMMON_GET_PORT_NAME;
2573 port_name->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2574 port_name->hdr.request_length = sizeof(sli4_req_hdr_t) + (version * sizeof(uint32_t));
2575 port_name->hdr.version = version;
2576
2577 /* Set the port type value (ethernet=0, FC=1) for V1 commands */
2578 if (version == 1) {
2579 port_name->pt = pt;
2580 }
2581
2582 return sli_config_off + port_name->hdr.request_length;
2583 }
2584
2585 /**
2586 * @ingroup sli
2587 * @brief Write a COMMON_WRITE_OBJECT command.
2588 *
2589 * @param sli4 SLI context.
2590 * @param buf Destination buffer for the command.
2591 * @param size Buffer size, in bytes.
2592 * @param noc True if the object should be written but not committed to flash.
2593 * @param eof True if this is the last write for this object.
2594 * @param desired_write_length Number of bytes of data to write to the object.
2595 * @param offset Offset, in bytes, from the start of the object.
2596 * @param object_name Name of the object to write.
2597 * @param dma DMA structure from which the data will be copied.
2598 *
2599 * @return Returns the number of bytes written.
2600 */
2601 int32_t
2602 sli_cmd_common_write_object(sli4_t *sli4, void *buf, size_t size,
2603 uint16_t noc, uint16_t eof, uint32_t desired_write_length,
2604 uint32_t offset,
2605 char *object_name,
2606 ocs_dma_t *dma)
2607 {
2608 sli4_req_common_write_object_t *wr_obj = NULL;
2609 uint32_t sli_config_off = 0;
2610 sli4_bde_t *host_buffer;
2611
2612 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2613 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2614 sizeof (sli4_req_common_write_object_t) + sizeof (sli4_bde_t),
2615 NULL);
2616 }
2617
2618 wr_obj = (sli4_req_common_write_object_t *)((uint8_t *)buf + sli_config_off);
2619
2620 wr_obj->hdr.opcode = SLI4_OPC_COMMON_WRITE_OBJECT;
2621 wr_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2622 wr_obj->hdr.request_length = sizeof(*wr_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t);
2623 wr_obj->hdr.timeout = 0;
2624 wr_obj->hdr.version = 0;
2625
2626 wr_obj->noc = noc;
2627 wr_obj->eof = eof;
2628 wr_obj->desired_write_length = desired_write_length;
2629 wr_obj->write_offset = offset;
2630 ocs_strncpy(wr_obj->object_name, object_name, sizeof(wr_obj->object_name));
2631 wr_obj->host_buffer_descriptor_count = 1;
2632
2633 host_buffer = (sli4_bde_t *)wr_obj->host_buffer_descriptor;
2634
2635 /* Setup to transfer xfer_size bytes to device */
2636 host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64;
2637 host_buffer->buffer_length = desired_write_length;
2638 host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
2639 host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
2640
2641 return(sli_config_off + sizeof(sli4_req_common_write_object_t) + sizeof (sli4_bde_t));
2642 }
2643
2644 /**
2645 * @ingroup sli
2646 * @brief Write a COMMON_DELETE_OBJECT command.
2647 *
2648 * @param sli4 SLI context.
2649 * @param buf Destination buffer for the command.
2650 * @param size Buffer size, in bytes.
2651 * @param object_name Name of the object to write.
2652 *
2653 * @return Returns the number of bytes written.
2654 */
2655 int32_t
2656 sli_cmd_common_delete_object(sli4_t *sli4, void *buf, size_t size,
2657 char *object_name)
2658 {
2659 sli4_req_common_delete_object_t *del_obj = NULL;
2660 uint32_t sli_config_off = 0;
2661
2662 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2663 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2664 sizeof (sli4_req_common_delete_object_t),
2665 NULL);
2666 }
2667
2668 del_obj = (sli4_req_common_delete_object_t *)((uint8_t *)buf + sli_config_off);
2669
2670 del_obj->hdr.opcode = SLI4_OPC_COMMON_DELETE_OBJECT;
2671 del_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2672 del_obj->hdr.request_length = sizeof(*del_obj);
2673 del_obj->hdr.timeout = 0;
2674 del_obj->hdr.version = 0;
2675
2676 ocs_strncpy(del_obj->object_name, object_name, sizeof(del_obj->object_name));
2677 return(sli_config_off + sizeof(sli4_req_common_delete_object_t));
2678 }
2679
2680 /**
2681 * @ingroup sli
2682 * @brief Write a COMMON_READ_OBJECT command.
2683 *
2684 * @param sli4 SLI context.
2685 * @param buf Destination buffer for the command.
2686 * @param size Buffer size, in bytes.
2687 * @param desired_read_length Number of bytes of data to read from the object.
2688 * @param offset Offset, in bytes, from the start of the object.
2689 * @param object_name Name of the object to read.
2690 * @param dma DMA structure from which the data will be copied.
2691 *
2692 * @return Returns the number of bytes written.
2693 */
2694 int32_t
2695 sli_cmd_common_read_object(sli4_t *sli4, void *buf, size_t size,
2696 uint32_t desired_read_length,
2697 uint32_t offset,
2698 char *object_name,
2699 ocs_dma_t *dma)
2700 {
2701 sli4_req_common_read_object_t *rd_obj = NULL;
2702 uint32_t sli_config_off = 0;
2703 sli4_bde_t *host_buffer;
2704
2705 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2706 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2707 sizeof (sli4_req_common_read_object_t) + sizeof (sli4_bde_t),
2708 NULL);
2709 }
2710
2711 rd_obj = (sli4_req_common_read_object_t *)((uint8_t *)buf + sli_config_off);
2712
2713 rd_obj->hdr.opcode = SLI4_OPC_COMMON_READ_OBJECT;
2714 rd_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2715 rd_obj->hdr.request_length = sizeof(*rd_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t);
2716 rd_obj->hdr.timeout = 0;
2717 rd_obj->hdr.version = 0;
2718
2719 rd_obj->desired_read_length = desired_read_length;
2720 rd_obj->read_offset = offset;
2721 ocs_strncpy(rd_obj->object_name, object_name, sizeof(rd_obj->object_name));
2722 rd_obj->host_buffer_descriptor_count = 1;
2723
2724 host_buffer = (sli4_bde_t *)rd_obj->host_buffer_descriptor;
2725
2726 /* Setup to transfer xfer_size bytes to device */
2727 host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64;
2728 host_buffer->buffer_length = desired_read_length;
2729 if (dma != NULL) {
2730 host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
2731 host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
2732 } else {
2733 host_buffer->u.data.buffer_address_low = 0;
2734 host_buffer->u.data.buffer_address_high = 0;
2735 }
2736
2737 return(sli_config_off + sizeof(sli4_req_common_read_object_t) + sizeof (sli4_bde_t));
2738 }
2739
2740 /**
2741 * @ingroup sli
2742 * @brief Write a DMTF_EXEC_CLP_CMD command.
2743 *
2744 * @param sli4 SLI context.
2745 * @param buf Destination buffer for the command.
2746 * @param size Buffer size, in bytes.
2747 * @param cmd DMA structure that describes the buffer for the command.
2748 * @param resp DMA structure that describes the buffer for the response.
2749 *
2750 * @return Returns the number of bytes written.
2751 */
2752 int32_t
2753 sli_cmd_dmtf_exec_clp_cmd(sli4_t *sli4, void *buf, size_t size,
2754 ocs_dma_t *cmd,
2755 ocs_dma_t *resp)
2756 {
2757 sli4_req_dmtf_exec_clp_cmd_t *clp_cmd = NULL;
2758 uint32_t sli_config_off = 0;
2759
2760 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2761 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2762 sizeof (sli4_req_dmtf_exec_clp_cmd_t),
2763 NULL);
2764 }
2765
2766 clp_cmd = (sli4_req_dmtf_exec_clp_cmd_t*)((uint8_t *)buf + sli_config_off);
2767
2768 clp_cmd->hdr.opcode = SLI4_OPC_DMTF_EXEC_CLP_CMD;
2769 clp_cmd->hdr.subsystem = SLI4_SUBSYSTEM_DMTF;
2770 clp_cmd->hdr.request_length = sizeof(sli4_req_dmtf_exec_clp_cmd_t) -
2771 sizeof(sli4_req_hdr_t);
2772 clp_cmd->hdr.timeout = 0;
2773 clp_cmd->hdr.version = 0;
2774 clp_cmd->cmd_buf_length = cmd->size;
2775 clp_cmd->cmd_buf_addr_low = ocs_addr32_lo(cmd->phys);
2776 clp_cmd->cmd_buf_addr_high = ocs_addr32_hi(cmd->phys);
2777 clp_cmd->resp_buf_length = resp->size;
2778 clp_cmd->resp_buf_addr_low = ocs_addr32_lo(resp->phys);
2779 clp_cmd->resp_buf_addr_high = ocs_addr32_hi(resp->phys);
2780
2781 return(sli_config_off + sizeof(sli4_req_dmtf_exec_clp_cmd_t));
2782 }
2783
2784 /**
2785 * @ingroup sli
2786 * @brief Write a COMMON_SET_DUMP_LOCATION command.
2787 *
2788 * @param sli4 SLI context.
2789 * @param buf Destination buffer for the command.
2790 * @param size Buffer size, in bytes.
2791 * @param query Zero to set dump location, non-zero to query dump size
2792 * @param is_buffer_list Set to one if the buffer is a set of buffer descriptors or
2793 * set to 0 if the buffer is a contiguous dump area.
2794 * @param buffer DMA structure to which the dump will be copied.
2795 *
2796 * @return Returns the number of bytes written.
2797 */
2798 int32_t
2799 sli_cmd_common_set_dump_location(sli4_t *sli4, void *buf, size_t size,
2800 uint8_t query, uint8_t is_buffer_list,
2801 ocs_dma_t *buffer, uint8_t fdb)
2802 {
2803 sli4_req_common_set_dump_location_t *set_dump_loc = NULL;
2804 uint32_t sli_config_off = 0;
2805
2806 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2807 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2808 sizeof (sli4_req_common_set_dump_location_t),
2809 NULL);
2810 }
2811
2812 set_dump_loc = (sli4_req_common_set_dump_location_t *)((uint8_t *)buf + sli_config_off);
2813
2814 set_dump_loc->hdr.opcode = SLI4_OPC_COMMON_SET_DUMP_LOCATION;
2815 set_dump_loc->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2816 set_dump_loc->hdr.request_length = sizeof(sli4_req_common_set_dump_location_t) - sizeof(sli4_req_hdr_t);
2817 set_dump_loc->hdr.timeout = 0;
2818 set_dump_loc->hdr.version = 0;
2819
2820 set_dump_loc->blp = is_buffer_list;
2821 set_dump_loc->qry = query;
2822 set_dump_loc->fdb = fdb;
2823
2824 if (buffer) {
2825 set_dump_loc->buf_addr_low = ocs_addr32_lo(buffer->phys);
2826 set_dump_loc->buf_addr_high = ocs_addr32_hi(buffer->phys);
2827 set_dump_loc->buffer_length = buffer->len;
2828 } else {
2829 set_dump_loc->buf_addr_low = 0;
2830 set_dump_loc->buf_addr_high = 0;
2831 set_dump_loc->buffer_length = 0;
2832 }
2833
2834 return(sli_config_off + sizeof(sli4_req_common_set_dump_location_t));
2835 }
2836
2837 /**
2838 * @ingroup sli
2839 * @brief Write a COMMON_SET_FEATURES command.
2840 *
2841 * @param sli4 SLI context.
2842 * @param buf Destination buffer for the command.
2843 * @param size Buffer size, in bytes.
2844 * @param feature Feature to set.
2845 * @param param_len Length of the parameter (must be a multiple of 4 bytes).
2846 * @param parameter Pointer to the parameter value.
2847 *
2848 * @return Returns the number of bytes written.
2849 */
2850 int32_t
2851 sli_cmd_common_set_features(sli4_t *sli4, void *buf, size_t size,
2852 uint32_t feature,
2853 uint32_t param_len,
2854 void* parameter)
2855 {
2856 sli4_req_common_set_features_t *cmd = NULL;
2857 uint32_t sli_config_off = 0;
2858
2859 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2860 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2861 sizeof (sli4_req_common_set_features_t),
2862 NULL);
2863 }
2864
2865 cmd = (sli4_req_common_set_features_t *)((uint8_t *)buf + sli_config_off);
2866
2867 cmd->hdr.opcode = SLI4_OPC_COMMON_SET_FEATURES;
2868 cmd->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2869 cmd->hdr.request_length = sizeof(sli4_req_common_set_features_t) - sizeof(sli4_req_hdr_t);
2870 cmd->hdr.timeout = 0;
2871 cmd->hdr.version = 0;
2872
2873 cmd->feature = feature;
2874 cmd->param_len = param_len;
2875 ocs_memcpy(cmd->params, parameter, param_len);
2876
2877 return(sli_config_off + sizeof(sli4_req_common_set_features_t));
2878 }
2879
2880 /**
2881 * @ingroup sli
2882 * @brief Write a COMMON_COMMON_GET_PROFILE_CONFIG command.
2883 *
2884 * @param sli4 SLI context.
2885 * @param buf Destination buffer for the command.
2886 * @param size Buffer size in bytes.
2887 * @param dma DMA capable memory used to retrieve profile.
2888 *
2889 * @return Returns the number of bytes written.
2890 */
2891 int32_t
2892 sli_cmd_common_get_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
2893 {
2894 sli4_req_common_get_profile_config_t *req = NULL;
2895 uint32_t sli_config_off = 0;
2896 uint32_t payload_size;
2897
2898 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2899 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
2900 sizeof (sli4_req_common_get_profile_config_t),
2901 dma);
2902 }
2903
2904 if (dma != NULL) {
2905 req = dma->virt;
2906 ocs_memset(req, 0, dma->size);
2907 payload_size = dma->size;
2908 } else {
2909 req = (sli4_req_common_get_profile_config_t *)((uint8_t *)buf + sli_config_off);
2910 payload_size = sizeof(sli4_req_common_get_profile_config_t);
2911 }
2912
2913 req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_CONFIG;
2914 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2915 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
2916 req->hdr.version = 1;
2917
2918 return(sli_config_off + sizeof(sli4_req_common_get_profile_config_t));
2919 }
2920
2921 /**
2922 * @ingroup sli
2923 * @brief Write a COMMON_COMMON_SET_PROFILE_CONFIG command.
2924 *
2925 * @param sli4 SLI context.
2926 * @param buf Destination buffer for the command.
2927 * @param size Buffer size, in bytes.
2928 * @param dma DMA capable memory containing profile.
2929 * @param profile_id Profile ID to configure.
2930 * @param descriptor_count Number of descriptors in DMA buffer.
2931 * @param isap Implicit Set Active Profile value to use.
2932 *
2933 * @return Returns the number of bytes written.
2934 */
2935 int32_t
2936 sli_cmd_common_set_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
2937 uint8_t profile_id, uint32_t descriptor_count, uint8_t isap)
2938 {
2939 sli4_req_common_set_profile_config_t *req = NULL;
2940 uint32_t cmd_off = 0;
2941 uint32_t payload_size;
2942
2943 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2944 cmd_off = sli_cmd_sli_config(sli4, buf, size,
2945 sizeof (sli4_req_common_set_profile_config_t),
2946 dma);
2947 }
2948
2949 if (dma != NULL) {
2950 req = dma->virt;
2951 ocs_memset(req, 0, dma->size);
2952 payload_size = dma->size;
2953 } else {
2954 req = (sli4_req_common_set_profile_config_t *)((uint8_t *)buf + cmd_off);
2955 payload_size = sizeof(sli4_req_common_set_profile_config_t);
2956 }
2957
2958 req->hdr.opcode = SLI4_OPC_COMMON_SET_PROFILE_CONFIG;
2959 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
2960 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
2961 req->hdr.version = 1;
2962 req->profile_id = profile_id;
2963 req->desc_count = descriptor_count;
2964 req->isap = isap;
2965
2966 return(cmd_off + sizeof(sli4_req_common_set_profile_config_t));
2967 }
2968
2969 /**
2970 * @ingroup sli
2971 * @brief Write a COMMON_COMMON_GET_PROFILE_LIST command.
2972 *
2973 * @param sli4 SLI context.
2974 * @param buf Destination buffer for the command.
2975 * @param size Buffer size in bytes.
2976 * @param start_profile_index First profile index to return.
2977 * @param dma Buffer into which the list will be written.
2978 *
2979 * @return Returns the number of bytes written.
2980 */
2981 int32_t
2982 sli_cmd_common_get_profile_list(sli4_t *sli4, void *buf, size_t size,
2983 uint32_t start_profile_index, ocs_dma_t *dma)
2984 {
2985 sli4_req_common_get_profile_list_t *req = NULL;
2986 uint32_t cmd_off = 0;
2987 uint32_t payload_size;
2988
2989 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
2990 cmd_off = sli_cmd_sli_config(sli4, buf, size,
2991 sizeof (sli4_req_common_get_profile_list_t),
2992 dma);
2993 }
2994
2995 if (dma != NULL) {
2996 req = dma->virt;
2997 ocs_memset(req, 0, dma->size);
2998 payload_size = dma->size;
2999 } else {
3000 req = (sli4_req_common_get_profile_list_t *)((uint8_t *)buf + cmd_off);
3001 payload_size = sizeof(sli4_req_common_get_profile_list_t);
3002 }
3003
3004 req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_LIST;
3005 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3006 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3007 req->hdr.version = 0;
3008
3009 req->start_profile_index = start_profile_index;
3010
3011 return(cmd_off + sizeof(sli4_req_common_get_profile_list_t));
3012 }
3013
3014 /**
3015 * @ingroup sli
3016 * @brief Write a COMMON_COMMON_GET_ACTIVE_PROFILE command.
3017 *
3018 * @param sli4 SLI context.
3019 * @param buf Destination buffer for the command.
3020 * @param size Buffer size in bytes.
3021 *
3022 * @return Returns the number of bytes written.
3023 */
3024 int32_t
3025 sli_cmd_common_get_active_profile(sli4_t *sli4, void *buf, size_t size)
3026 {
3027 sli4_req_common_get_active_profile_t *req = NULL;
3028 uint32_t cmd_off = 0;
3029 uint32_t payload_size;
3030
3031 /* Payload length must accommodate both request and response */
3032 payload_size = max(sizeof(sli4_req_common_get_active_profile_t),
3033 sizeof(sli4_res_common_get_active_profile_t));
3034
3035 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3036 cmd_off = sli_cmd_sli_config(sli4, buf, size,
3037 payload_size,
3038 NULL);
3039 }
3040
3041 req = (sli4_req_common_get_active_profile_t *)
3042 ((uint8_t*)buf + cmd_off);
3043
3044 req->hdr.opcode = SLI4_OPC_COMMON_GET_ACTIVE_PROFILE;
3045 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3046 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3047 req->hdr.version = 0;
3048
3049 return(cmd_off + sizeof(sli4_req_common_get_active_profile_t));
3050 }
3051
3052 /**
3053 * @ingroup sli
3054 * @brief Write a COMMON_COMMON_SET_ACTIVE_PROFILE command.
3055 *
3056 * @param sli4 SLI context.
3057 * @param buf Destination buffer for the command.
3058 * @param size Buffer size in bytes.
3059 * @param fd If non-zero, set profile to factory default.
3060 * @param active_profile_id ID of new active profile.
3061 *
3062 * @return Returns the number of bytes written.
3063 */
3064 int32_t
3065 sli_cmd_common_set_active_profile(sli4_t *sli4, void *buf, size_t size,
3066 uint32_t fd, uint32_t active_profile_id)
3067 {
3068 sli4_req_common_set_active_profile_t *req = NULL;
3069 uint32_t cmd_off = 0;
3070 uint32_t payload_size;
3071
3072 /* Payload length must accommodate both request and response */
3073 payload_size = max(sizeof(sli4_req_common_set_active_profile_t),
3074 sizeof(sli4_res_common_set_active_profile_t));
3075
3076 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3077 cmd_off = sli_cmd_sli_config(sli4, buf, size,
3078 payload_size,
3079 NULL);
3080 }
3081
3082 req = (sli4_req_common_set_active_profile_t *)
3083 ((uint8_t*)buf + cmd_off);
3084
3085 req->hdr.opcode = SLI4_OPC_COMMON_SET_ACTIVE_PROFILE;
3086 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3087 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3088 req->hdr.version = 0;
3089 req->fd = fd;
3090 req->active_profile_id = active_profile_id;
3091
3092 return(cmd_off + sizeof(sli4_req_common_set_active_profile_t));
3093 }
3094
3095 /**
3096 * @ingroup sli
3097 * @brief Write a COMMON_GET_RECONFIG_LINK_INFO command.
3098 *
3099 * @param sli4 SLI context.
3100 * @param buf Destination buffer for the command.
3101 * @param size Buffer size in bytes.
3102 * @param dma Buffer to store the supported link configuration modes from the physical device.
3103 *
3104 * @return Returns the number of bytes written.
3105 */
3106 int32_t
3107 sli_cmd_common_get_reconfig_link_info(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
3108 {
3109 sli4_req_common_get_reconfig_link_info_t *req = NULL;
3110 uint32_t cmd_off = 0;
3111 uint32_t payload_size;
3112
3113 /* Payload length must accommodate both request and response */
3114 payload_size = max(sizeof(sli4_req_common_get_reconfig_link_info_t),
3115 sizeof(sli4_res_common_get_reconfig_link_info_t));
3116
3117 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3118 cmd_off = sli_cmd_sli_config(sli4, buf, size,
3119 payload_size,
3120 dma);
3121 }
3122
3123 if (dma != NULL) {
3124 req = dma->virt;
3125 ocs_memset(req, 0, dma->size);
3126 payload_size = dma->size;
3127 } else {
3128 req = (sli4_req_common_get_reconfig_link_info_t *)((uint8_t *)buf + cmd_off);
3129 payload_size = sizeof(sli4_req_common_get_reconfig_link_info_t);
3130 }
3131
3132 req->hdr.opcode = SLI4_OPC_COMMON_GET_RECONFIG_LINK_INFO;
3133 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3134 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3135 req->hdr.version = 0;
3136
3137 return(cmd_off + sizeof(sli4_req_common_get_reconfig_link_info_t));
3138 }
3139
3140 /**
3141 * @ingroup sli
3142 * @brief Write a COMMON_SET_RECONFIG_LINK_ID command.
3143 *
3144 * @param sli4 SLI context.
3145 * @param buf destination buffer for the command.
3146 * @param size buffer size in bytes.
3147 * @param fd If non-zero, set link config to factory default.
3148 * @param active_link_config_id ID of new active profile.
3149 * @param dma Buffer to assign the link configuration mode that is to become active from the physical device.
3150 *
3151 * @return Returns the number of bytes written.
3152 */
3153 int32_t
3154 sli_cmd_common_set_reconfig_link_id(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
3155 uint32_t fd, uint32_t active_link_config_id)
3156 {
3157 sli4_req_common_set_reconfig_link_id_t *req = NULL;
3158 uint32_t cmd_off = 0;
3159 uint32_t payload_size;
3160
3161 /* Payload length must accommodate both request and response */
3162 payload_size = max(sizeof(sli4_req_common_set_reconfig_link_id_t),
3163 sizeof(sli4_res_common_set_reconfig_link_id_t));
3164
3165 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3166 cmd_off = sli_cmd_sli_config(sli4, buf, size,
3167 payload_size,
3168 NULL);
3169 }
3170
3171 if (dma != NULL) {
3172 req = dma->virt;
3173 ocs_memset(req, 0, dma->size);
3174 payload_size = dma->size;
3175 } else {
3176 req = (sli4_req_common_set_reconfig_link_id_t *)((uint8_t *)buf + cmd_off);
3177 payload_size = sizeof(sli4_req_common_set_reconfig_link_id_t);
3178 }
3179
3180 req->hdr.opcode = SLI4_OPC_COMMON_SET_RECONFIG_LINK_ID;
3181 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
3182 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
3183 req->hdr.version = 0;
3184 req->fd = fd;
3185 req->next_link_config_id = active_link_config_id;
3186
3187 return(cmd_off + sizeof(sli4_req_common_set_reconfig_link_id_t));
3188 }
3189
3190 /**
3191 * @ingroup sli
3192 * @brief Check the mailbox/queue completion entry.
3193 *
3194 * @param buf Pointer to the MCQE.
3195 *
3196 * @return Returns 0 on success, or a non-zero value on failure.
3197 */
3198 int32_t
3199 sli_cqe_mq(void *buf)
3200 {
3201 sli4_mcqe_t *mcqe = buf;
3202
3203 /*
3204 * Firmware can split mbx completions into two MCQEs: first with only
3205 * the "consumed" bit set and a second with the "complete" bit set.
3206 * Thus, ignore MCQE unless "complete" is set.
3207 */
3208 if (!mcqe->cmp) {
3209 return -2;
3210 }
3211
3212 if (mcqe->completion_status) {
3213 ocs_log_debug(NULL, "bad status (cmpl=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n",
3214 mcqe->completion_status,
3215 mcqe->extended_status,
3216 mcqe->con,
3217 mcqe->cmp,
3218 mcqe->ae,
3219 mcqe->val);
3220 }
3221
3222 return mcqe->completion_status;
3223 }
3224
3225 /**
3226 * @ingroup sli
3227 * @brief Check the asynchronous event completion entry.
3228 *
3229 * @param sli4 SLI context.
3230 * @param buf Pointer to the ACQE.
3231 *
3232 * @return Returns 0 on success, or a non-zero value on failure.
3233 */
3234 int32_t
3235 sli_cqe_async(sli4_t *sli4, void *buf)
3236 {
3237 sli4_acqe_t *acqe = buf;
3238 int32_t rc = -1;
3239
3240 if (!sli4 || !buf) {
3241 ocs_log_err(NULL, "bad parameter sli4=%p buf=%p\n", sli4, buf);
3242 return -1;
3243 }
3244
3245 switch (acqe->event_code) {
3246 case SLI4_ACQE_EVENT_CODE_LINK_STATE:
3247 rc = sli_fc_process_link_state(sli4, buf);
3248 break;
3249 case SLI4_ACQE_EVENT_CODE_FCOE_FIP:
3250 rc = sli_fc_process_fcoe(sli4, buf);
3251 break;
3252 case SLI4_ACQE_EVENT_CODE_GRP_5:
3253 /*TODO*/ocs_log_debug(sli4->os, "ACQE GRP5\n");
3254 break;
3255 case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT:
3256 ocs_log_debug(sli4->os,"ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n",
3257 acqe->event_type, acqe->event_data[0], acqe->event_data[1]);
3258 #if defined(OCS_INCLUDE_DEBUG)
3259 ocs_dump32(OCS_DEBUG_ALWAYS, sli4->os, "acq", acqe, sizeof(*acqe));
3260 #endif
3261 break;
3262 case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT:
3263 rc = sli_fc_process_link_attention(sli4, buf);
3264 break;
3265 default:
3266 /*TODO*/ocs_log_test(sli4->os, "ACQE unknown=%#x\n", acqe->event_code);
3267 }
3268
3269 return rc;
3270 }
3271
3272 /**
3273 * @brief Check the SLI_CONFIG response.
3274 *
3275 * @par Description
3276 * Function checks the SLI_CONFIG response and the payload status.
3277 *
3278 * @param buf Pointer to SLI_CONFIG response.
3279 *
3280 * @return Returns 0 on success, or a non-zero value on failure.
3281 */
3282 static int32_t
3283 sli_res_sli_config(void *buf)
3284 {
3285 sli4_cmd_sli_config_t *sli_config = buf;
3286
3287 if (!buf || (SLI4_MBOX_COMMAND_SLI_CONFIG != sli_config->hdr.command)) {
3288 ocs_log_err(NULL, "bad parameter buf=%p cmd=%#x\n", buf,
3289 buf ? sli_config->hdr.command : -1);
3290 return -1;
3291 }
3292
3293 if (sli_config->hdr.status) {
3294 return sli_config->hdr.status;
3295 }
3296
3297 if (sli_config->emb) {
3298 return sli_config->payload.embed[4];
3299 } else {
3300 ocs_log_test(NULL, "external buffers not supported\n");
3301 return -1;
3302 }
3303 }
3304
3305 /**
3306 * @brief Issue a COMMON_FUNCTION_RESET command.
3307 *
3308 * @param sli4 SLI context.
3309 *
3310 * @return Returns 0 on success, or a non-zero value on failure.
3311 */
3312 static int32_t
3313 sli_common_function_reset(sli4_t *sli4)
3314 {
3315
3316 if (sli_cmd_common_function_reset(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3317 if (sli_bmbx_command(sli4)) {
3318 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COM_FUNC_RESET)\n");
3319 return -1;
3320 }
3321 if (sli_res_sli_config(sli4->bmbx.virt)) {
3322 ocs_log_err(sli4->os, "bad status COM_FUNC_RESET\n");
3323 return -1;
3324 }
3325 } else {
3326 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
3327 return -1;
3328 }
3329
3330 return 0;
3331 }
3332
3333 /**
3334 * @brief check to see if the FW is ready.
3335 *
3336 * @par Description
3337 * Based on <i>SLI-4 Architecture Specification, Revision 4.x0-13 (2012).</i>.
3338 *
3339 * @param sli4 SLI context.
3340 * @param timeout_ms Time, in milliseconds, to wait for the port to be ready
3341 * before failing.
3342 *
3343 * @return Returns TRUE for ready, or FALSE otherwise.
3344 */
3345 static int32_t
3346 sli_wait_for_fw_ready(sli4_t *sli4, uint32_t timeout_ms)
3347 {
3348 uint32_t iter = timeout_ms / (SLI4_INIT_PORT_DELAY_US / 1000);
3349 uint32_t ready = FALSE;
3350
3351 do {
3352 iter--;
3353 ocs_udelay(SLI4_INIT_PORT_DELAY_US);
3354 if (sli_fw_ready(sli4) == 1) {
3355 ready = TRUE;
3356 }
3357 } while (!ready && (iter > 0));
3358
3359 return ready;
3360 }
3361
3362 /**
3363 * @brief Initialize the firmware.
3364 *
3365 * @par Description
3366 * Based on <i>SLI-4 Architecture Specification, Revision 4.x0-13 (2012).</i>.
3367 *
3368 * @param sli4 SLI context.
3369 *
3370 * @return Returns 0 on success, or a non-zero value on failure.
3371 */
3372 static int32_t
3373 sli_fw_init(sli4_t *sli4)
3374 {
3375 uint32_t ready;
3376 uint32_t endian;
3377
3378 /*
3379 * Is firmware ready for operation?
3380 */
3381 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
3382 if (!ready) {
3383 ocs_log_crit(sli4->os, "FW status is NOT ready\n");
3384 return -1;
3385 }
3386
3387 /*
3388 * Reset port to a known state
3389 */
3390 switch (sli4->if_type) {
3391 case SLI4_IF_TYPE_BE3_SKH_PF:
3392 case SLI4_IF_TYPE_BE3_SKH_VF:
3393 /* No SLIPORT_CONTROL register so use command sequence instead */
3394 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
3395 ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n");
3396 return -1;
3397 }
3398
3399 if (sli_cmd_fw_initialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3400 if (sli_bmbx_command(sli4)) {
3401 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_INIT)\n");
3402 return -1;
3403 }
3404 } else {
3405 ocs_log_crit(sli4->os, "bad FW_INIT write\n");
3406 return -1;
3407 }
3408
3409 if (sli_common_function_reset(sli4)) {
3410 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
3411 return -1;
3412 }
3413 break;
3414 case SLI4_IF_TYPE_LANCER_FC_ETH:
3415 case SLI4_IF_TYPE_LANCER_G7:
3416 #if BYTE_ORDER == LITTLE_ENDIAN
3417 endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN;
3418 #else
3419 endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN;
3420 #endif
3421
3422 if (sli_sliport_control(sli4, endian))
3423 return -1;
3424 break;
3425 default:
3426 ocs_log_test(sli4->os, "if_type %d not supported\n", sli4->if_type);
3427 return -1;
3428 }
3429
3430 return 0;
3431 }
3432
3433 /**
3434 * @brief Terminate the firmware.
3435 *
3436 * @param sli4 SLI context.
3437 *
3438 * @return Returns 0 on success, or a non-zero value on failure.
3439 */
3440 static int32_t
3441 sli_fw_term(sli4_t *sli4)
3442 {
3443 uint32_t endian;
3444
3445 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF ||
3446 sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF) {
3447 /* No SLIPORT_CONTROL register so use command sequence instead */
3448 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
3449 ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n");
3450 return -1;
3451 }
3452
3453 if (sli_common_function_reset(sli4)) {
3454 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
3455 return -1;
3456 }
3457
3458 if (sli_cmd_fw_deinitialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3459 if (sli_bmbx_command(sli4)) {
3460 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_DEINIT)\n");
3461 return -1;
3462 }
3463 } else {
3464 ocs_log_test(sli4->os, "bad FW_DEINIT write\n");
3465 return -1;
3466 }
3467 } else {
3468 #if BYTE_ORDER == LITTLE_ENDIAN
3469 endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN;
3470 #else
3471 endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN;
3472 #endif
3473 /* type 2 etc. use SLIPORT_CONTROL to initialize port */
3474 sli_sliport_control(sli4, endian);
3475 }
3476 return 0;
3477 }
3478
3479 /**
3480 * @brief Write the doorbell register associated with the queue object.
3481 *
3482 * @param sli4 SLI context.
3483 * @param q Queue object.
3484 *
3485 * @return Returns 0 on success, or a non-zero value on failure.
3486 */
3487 static int32_t
3488 sli_queue_doorbell(sli4_t *sli4, sli4_queue_t *q)
3489 {
3490 uint32_t val = 0;
3491
3492 switch (q->type) {
3493 case SLI_QTYPE_EQ:
3494 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
3495 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, FALSE);
3496 else
3497 val = sli_eq_doorbell(q->n_posted, q->id, FALSE);
3498 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3499 break;
3500 case SLI_QTYPE_CQ:
3501 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
3502 val = sli_iftype6_cq_doorbell(q->n_posted, q->id, FALSE);
3503 else
3504 val = sli_cq_doorbell(q->n_posted, q->id, FALSE);
3505 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3506 break;
3507 case SLI_QTYPE_MQ:
3508 val = SLI4_MQ_DOORBELL(q->n_posted, q->id);
3509 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3510 break;
3511 case SLI_QTYPE_RQ:
3512 {
3513 uint32_t n_posted = q->n_posted;
3514 /*
3515 * FC/FCoE has different rules for Receive Queues. The host
3516 * should only update the doorbell of the RQ-pair containing
3517 * the headers since the header / payload RQs are treated
3518 * as a matched unit.
3519 */
3520 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
3521 /*
3522 * In RQ-pair, an RQ either contains the FC header
3523 * (i.e. is_hdr == TRUE) or the payload.
3524 *
3525 * Don't ring doorbell for payload RQ
3526 */
3527 if (!q->u.flag.is_hdr) {
3528 break;
3529 }
3530 /*
3531 * Some RQ cannot be incremented one entry at a time. Instead,
3532 * the driver collects a number of entries and updates the
3533 * RQ in batches.
3534 */
3535 if (q->u.flag.rq_batch) {
3536 if (((q->index + q->n_posted) % SLI4_QUEUE_RQ_BATCH)) {
3537 break;
3538 }
3539 n_posted = SLI4_QUEUE_RQ_BATCH;
3540 }
3541 }
3542
3543 val = SLI4_RQ_DOORBELL(n_posted, q->id);
3544 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3545 break;
3546 }
3547 case SLI_QTYPE_WQ:
3548 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) {
3549 val = SLI4_WQ_DOORBELL(q->n_posted, 0, q->id);
3550 } else {
3551 /* For iftype = 2 and 3, q->index value is ignored */
3552 val = SLI4_WQ_DOORBELL(q->n_posted, q->index, q->id);
3553 }
3554
3555 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
3556 break;
3557 default:
3558 ocs_log_test(sli4->os, "bad queue type %d\n", q->type);
3559 return -1;
3560 }
3561
3562 return 0;
3563 }
3564
3565 static int32_t
3566 sli_request_features(sli4_t *sli4, sli4_features_t *features, uint8_t query)
3567 {
3568
3569 if (sli_cmd_request_features(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
3570 *features, query)) {
3571 sli4_cmd_request_features_t *req_features = sli4->bmbx.virt;
3572
3573 if (sli_bmbx_command(sli4)) {
3574 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (REQUEST_FEATURES)\n");
3575 return -1;
3576 }
3577 if (req_features->hdr.status) {
3578 ocs_log_err(sli4->os, "REQUEST_FEATURES bad status %#x\n",
3579 req_features->hdr.status);
3580 return -1;
3581 }
3582 features->dword = req_features->response.dword;
3583 } else {
3584 ocs_log_err(sli4->os, "bad REQUEST_FEATURES write\n");
3585 return -1;
3586 }
3587
3588 return 0;
3589 }
3590
3591 /**
3592 * @brief Calculate max queue entries.
3593 *
3594 * @param sli4 SLI context.
3595 *
3596 * @return Returns 0 on success, or a non-zero value on failure.
3597 */
3598 void
3599 sli_calc_max_qentries(sli4_t *sli4)
3600 {
3601 sli4_qtype_e q;
3602 uint32_t alloc_size, qentries, qentry_size;
3603
3604 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
3605 sli4->config.max_qentries[q] = sli_convert_mask_to_count(sli4->config.count_method[q],
3606 sli4->config.count_mask[q]);
3607 }
3608
3609 /* single, continguous DMA allocations will be called for each queue
3610 * of size (max_qentries * queue entry size); since these can be large,
3611 * check against the OS max DMA allocation size
3612 */
3613 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
3614 qentries = sli4->config.max_qentries[q];
3615 qentry_size = sli_get_queue_entry_size(sli4, q);
3616 alloc_size = qentries * qentry_size;
3617 if (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) {
3618 while (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) {
3619 /* cut the qentries in hwf until alloc_size <= max DMA alloc size */
3620 qentries >>= 1;
3621 alloc_size = qentries * qentry_size;
3622 }
3623 ocs_log_debug(sli4->os, "[%s]: max_qentries from %d to %d (max dma %d)\n",
3624 SLI_QNAME[q], sli4->config.max_qentries[q],
3625 qentries, ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE));
3626 sli4->config.max_qentries[q] = qentries;
3627 }
3628 }
3629 }
3630
3631 /**
3632 * @brief Issue a FW_CONFIG mailbox command and store the results.
3633 *
3634 * @param sli4 SLI context.
3635 *
3636 * @return Returns 0 on success, or a non-zero value on failure.
3637 */
3638 static int32_t
3639 sli_query_fw_config(sli4_t *sli4)
3640 {
3641 /*
3642 * Read the device configuration
3643 *
3644 * Note: Only ulp0 fields contain values
3645 */
3646 if (sli_cmd_common_query_fw_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3647 sli4_res_common_query_fw_config_t *fw_config =
3648 (sli4_res_common_query_fw_config_t *)
3649 (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed));
3650
3651 if (sli_bmbx_command(sli4)) {
3652 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (QUERY_FW_CONFIG)\n");
3653 return -1;
3654 }
3655 if (fw_config->hdr.status) {
3656 ocs_log_err(sli4->os, "COMMON_QUERY_FW_CONFIG bad status %#x\n",
3657 fw_config->hdr.status);
3658 return -1;
3659 }
3660
3661 sli4->physical_port = fw_config->physical_port;
3662 sli4->config.dual_ulp_capable = ((fw_config->function_mode & SLI4_FUNCTION_MODE_DUA_MODE) == 0 ? 0 : 1);
3663 sli4->config.is_ulp_fc[0] = ((fw_config->ulp0_mode &
3664 (SLI4_ULP_MODE_FCOE_INI |
3665 SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1);
3666 sli4->config.is_ulp_fc[1] = ((fw_config->ulp1_mode &
3667 (SLI4_ULP_MODE_FCOE_INI |
3668 SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1);
3669
3670 if (sli4->config.dual_ulp_capable) {
3671 /*
3672 * Lancer will not support this, so we use the values
3673 * from the READ_CONFIG.
3674 */
3675 if (sli4->config.is_ulp_fc[0] &&
3676 sli4->config.is_ulp_fc[1]) {
3677 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total + fw_config->ulp1_toe_wq_total;
3678 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total + fw_config->ulp1_toe_defrq_total;
3679 } else if (sli4->config.is_ulp_fc[0]) {
3680 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total;
3681 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total;
3682 } else {
3683 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp1_toe_wq_total;
3684 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp1_toe_defrq_total;
3685 }
3686 }
3687 } else {
3688 ocs_log_err(sli4->os, "bad QUERY_FW_CONFIG write\n");
3689 return -1;
3690 }
3691 return 0;
3692 }
3693
3694 static int32_t
3695 sli_get_config(sli4_t *sli4)
3696 {
3697 ocs_dma_t get_cntl_addl_data;
3698
3699 /*
3700 * Read the device configuration
3701 */
3702 if (sli_cmd_read_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3703 sli4_res_read_config_t *read_config = sli4->bmbx.virt;
3704 uint32_t i;
3705 uint32_t total;
3706
3707 if (sli_bmbx_command(sli4)) {
3708 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_CONFIG)\n");
3709 return -1;
3710 }
3711 if (read_config->hdr.status) {
3712 ocs_log_err(sli4->os, "READ_CONFIG bad status %#x\n",
3713 read_config->hdr.status);
3714 return -1;
3715 }
3716
3717 sli4->config.has_extents = read_config->ext;
3718 if (FALSE == sli4->config.has_extents) {
3719 uint32_t i = 0;
3720 uint32_t *base = sli4->config.extent[0].base;
3721
3722 if (!base) {
3723 if (NULL == (base = ocs_malloc(sli4->os, SLI_RSRC_MAX * sizeof(uint32_t),
3724 OCS_M_ZERO | OCS_M_NOWAIT))) {
3725 ocs_log_err(sli4->os, "memory allocation failed for sli4_resource_t\n");
3726 return -1;
3727 }
3728 }
3729
3730 for (i = 0; i < SLI_RSRC_MAX; i++) {
3731 sli4->config.extent[i].number = 1;
3732 sli4->config.extent[i].n_alloc = 0;
3733 sli4->config.extent[i].base = &base[i];
3734 }
3735
3736 sli4->config.extent[SLI_RSRC_FCOE_VFI].base[0] = read_config->vfi_base;
3737 sli4->config.extent[SLI_RSRC_FCOE_VFI].size = read_config->vfi_count;
3738
3739 sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] = read_config->vpi_base;
3740 sli4->config.extent[SLI_RSRC_FCOE_VPI].size = read_config->vpi_count;
3741
3742 sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0] = read_config->rpi_base;
3743 sli4->config.extent[SLI_RSRC_FCOE_RPI].size = read_config->rpi_count;
3744
3745 sli4->config.extent[SLI_RSRC_FCOE_XRI].base[0] = read_config->xri_base;
3746 sli4->config.extent[SLI_RSRC_FCOE_XRI].size = OCS_MIN(255,read_config->xri_count);
3747
3748 sli4->config.extent[SLI_RSRC_FCOE_FCFI].base[0] = 0;
3749 sli4->config.extent[SLI_RSRC_FCOE_FCFI].size = read_config->fcfi_count;
3750 } else {
3751 /* TODO extents*/
3752 ;
3753 }
3754
3755 for (i = 0; i < SLI_RSRC_MAX; i++) {
3756 total = sli4->config.extent[i].number * sli4->config.extent[i].size;
3757 sli4->config.extent[i].use_map = ocs_bitmap_alloc(total);
3758 if (NULL == sli4->config.extent[i].use_map) {
3759 ocs_log_err(sli4->os, "bitmap memory allocation failed "
3760 "resource %d\n", i);
3761 return -1;
3762 }
3763 sli4->config.extent[i].map_size = total;
3764 }
3765
3766 sli4->config.topology = read_config->topology;
3767 sli4->config.ptv = read_config->ptv;
3768 if (sli4->config.ptv){
3769 sli4->config.tf = read_config->tf;
3770 sli4->config.pt = read_config->pt;
3771 }
3772 ocs_log_info(sli4->os, "Persistent Topology: PTV: %d, TF: %d, PT: %d \n",
3773 sli4->config.topology, sli4->config.ptv, sli4->config.tf, sli4->config.pt);
3774
3775 switch (sli4->config.topology) {
3776 case SLI4_READ_CFG_TOPO_FCOE:
3777 ocs_log_debug(sli4->os, "FCoE\n");
3778 break;
3779 case SLI4_READ_CFG_TOPO_FC:
3780 ocs_log_debug(sli4->os, "FC (unknown)\n");
3781 break;
3782 case SLI4_READ_CFG_TOPO_FC_DA:
3783 ocs_log_debug(sli4->os, "FC (direct attach)\n");
3784 break;
3785 case SLI4_READ_CFG_TOPO_FC_AL:
3786 ocs_log_debug(sli4->os, "FC (arbitrated loop)\n");
3787 break;
3788 default:
3789 ocs_log_test(sli4->os, "bad topology %#x\n", sli4->config.topology);
3790 }
3791
3792 sli4->config.e_d_tov = read_config->e_d_tov;
3793 sli4->config.r_a_tov = read_config->r_a_tov;
3794
3795 sli4->config.link_module_type = read_config->lmt;
3796
3797 sli4->config.max_qcount[SLI_QTYPE_EQ] = read_config->eq_count;
3798 sli4->config.max_qcount[SLI_QTYPE_CQ] = read_config->cq_count;
3799 sli4->config.max_qcount[SLI_QTYPE_WQ] = read_config->wq_count;
3800 sli4->config.max_qcount[SLI_QTYPE_RQ] = read_config->rq_count;
3801
3802 /*
3803 * READ_CONFIG doesn't give the max number of MQ. Applications
3804 * will typically want 1, but we may need another at some future
3805 * date. Dummy up a "max" MQ count here.
3806 */
3807 sli4->config.max_qcount[SLI_QTYPE_MQ] = SLI_USER_MQ_COUNT;
3808 } else {
3809 ocs_log_err(sli4->os, "bad READ_CONFIG write\n");
3810 return -1;
3811 }
3812
3813 if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3814 sli4_res_common_get_sli4_parameters_t *parms = (sli4_res_common_get_sli4_parameters_t *)
3815 (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed));
3816
3817 if (sli_bmbx_command(sli4)) {
3818 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_SLI4_PARAMETERS)\n");
3819 return -1;
3820 } else if (parms->hdr.status) {
3821 ocs_log_err(sli4->os, "COMMON_GET_SLI4_PARAMETERS bad status %#x att'l %#x\n",
3822 parms->hdr.status, parms->hdr.additional_status);
3823 return -1;
3824 }
3825
3826 sli4->config.auto_reg = parms->areg;
3827 sli4->config.auto_xfer_rdy = parms->agxf;
3828 sli4->config.hdr_template_req = parms->hdrr;
3829 sli4->config.t10_dif_inline_capable = parms->timm;
3830 sli4->config.t10_dif_separate_capable = parms->tsmm;
3831
3832 sli4->config.mq_create_version = parms->mqv;
3833 sli4->config.cq_create_version = parms->cqv;
3834 sli4->config.rq_min_buf_size = parms->min_rq_buffer_size;
3835 sli4->config.rq_max_buf_size = parms->max_rq_buffer_size;
3836
3837 sli4->config.qpage_count[SLI_QTYPE_EQ] = parms->eq_page_cnt;
3838 sli4->config.qpage_count[SLI_QTYPE_CQ] = parms->cq_page_cnt;
3839 sli4->config.qpage_count[SLI_QTYPE_MQ] = parms->mq_page_cnt;
3840 sli4->config.qpage_count[SLI_QTYPE_WQ] = parms->wq_page_cnt;
3841 sli4->config.qpage_count[SLI_QTYPE_RQ] = parms->rq_page_cnt;
3842
3843 /* save count methods and masks for each queue type */
3844 sli4->config.count_mask[SLI_QTYPE_EQ] = parms->eqe_count_mask;
3845 sli4->config.count_method[SLI_QTYPE_EQ] = parms->eqe_count_method;
3846 sli4->config.count_mask[SLI_QTYPE_CQ] = parms->cqe_count_mask;
3847 sli4->config.count_method[SLI_QTYPE_CQ] = parms->cqe_count_method;
3848 sli4->config.count_mask[SLI_QTYPE_MQ] = parms->mqe_count_mask;
3849 sli4->config.count_method[SLI_QTYPE_MQ] = parms->mqe_count_method;
3850 sli4->config.count_mask[SLI_QTYPE_WQ] = parms->wqe_count_mask;
3851 sli4->config.count_method[SLI_QTYPE_WQ] = parms->wqe_count_method;
3852 sli4->config.count_mask[SLI_QTYPE_RQ] = parms->rqe_count_mask;
3853 sli4->config.count_method[SLI_QTYPE_RQ] = parms->rqe_count_method;
3854
3855 /* now calculate max queue entries */
3856 sli_calc_max_qentries(sli4);
3857
3858 sli4->config.max_sgl_pages = parms->sgl_page_cnt; /* max # of pages */
3859 sli4->config.sgl_page_sizes = parms->sgl_page_sizes; /* bit map of available sizes */
3860 /* ignore HLM here. Use value from REQUEST_FEATURES */
3861
3862 sli4->config.sge_supported_length = parms->sge_supported_length;
3863 if (sli4->config.sge_supported_length > OCS_MAX_SGE_SIZE)
3864 sli4->config.sge_supported_length = OCS_MAX_SGE_SIZE;
3865
3866 sli4->config.sgl_pre_registration_required = parms->sglr;
3867 /* default to using pre-registered SGL's */
3868 sli4->config.sgl_pre_registered = TRUE;
3869
3870 sli4->config.perf_hint = parms->phon;
3871 sli4->config.perf_wq_id_association = parms->phwq;
3872
3873 sli4->config.rq_batch = parms->rq_db_window;
3874
3875 /* save the fields for skyhawk SGL chaining */
3876 sli4->config.sgl_chaining_params.chaining_capable =
3877 (parms->sglc == 1);
3878 sli4->config.sgl_chaining_params.frag_num_field_offset =
3879 parms->frag_num_field_offset;
3880 sli4->config.sgl_chaining_params.frag_num_field_mask =
3881 (1ull << parms->frag_num_field_size) - 1;
3882 sli4->config.sgl_chaining_params.sgl_index_field_offset =
3883 parms->sgl_index_field_offset;
3884 sli4->config.sgl_chaining_params.sgl_index_field_mask =
3885 (1ull << parms->sgl_index_field_size) - 1;
3886 sli4->config.sgl_chaining_params.chain_sge_initial_value_lo =
3887 parms->chain_sge_initial_value_lo;
3888 sli4->config.sgl_chaining_params.chain_sge_initial_value_hi =
3889 parms->chain_sge_initial_value_hi;
3890
3891 /* Use the highest available WQE size. */
3892 if (parms->wqe_sizes & SLI4_128BYTE_WQE_SUPPORT) {
3893 sli4->config.wqe_size = SLI4_WQE_EXT_BYTES;
3894 } else {
3895 sli4->config.wqe_size = SLI4_WQE_BYTES;
3896 }
3897 }
3898
3899 if (sli_query_fw_config(sli4)) {
3900 ocs_log_err(sli4->os, "Error sending QUERY_FW_CONFIG\n");
3901 return -1;
3902 }
3903
3904 sli4->config.port_number = 0;
3905
3906 /*
3907 * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily
3908 * uses VPD DMA buffer as the response won't fit in the embedded
3909 * buffer.
3910 */
3911 if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) {
3912 sli4_res_common_get_cntl_attributes_t *attr = sli4->vpd.data.virt;
3913
3914 if (sli_bmbx_command(sli4)) {
3915 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_CNTL_ATTRIBUTES)\n");
3916 return -1;
3917 } else if (attr->hdr.status) {
3918 ocs_log_err(sli4->os, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x att'l %#x\n",
3919 attr->hdr.status, attr->hdr.additional_status);
3920 return -1;
3921 }
3922
3923 sli4->config.port_number = attr->port_number;
3924
3925 ocs_memcpy(sli4->config.bios_version_string, attr->bios_version_string,
3926 sizeof(sli4->config.bios_version_string));
3927 } else {
3928 ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ATTRIBUTES write\n");
3929 return -1;
3930 }
3931
3932 if (ocs_dma_alloc(sli4->os, &get_cntl_addl_data, sizeof(sli4_res_common_get_cntl_addl_attributes_t),
3933 OCS_MIN_DMA_ALIGNMENT)) {
3934 ocs_log_err(sli4->os, "Failed to allocate memory for GET_CNTL_ADDL_ATTR data\n");
3935 } else {
3936 if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
3937 &get_cntl_addl_data)) {
3938 sli4_res_common_get_cntl_addl_attributes_t *attr = get_cntl_addl_data.virt;
3939
3940 if (sli_bmbx_command(sli4)) {
3941 ocs_log_crit(sli4->os,
3942 "bootstrap mailbox write fail (COMMON_GET_CNTL_ADDL_ATTRIBUTES)\n");
3943 ocs_dma_free(sli4->os, &get_cntl_addl_data);
3944 return -1;
3945 }
3946 if (attr->hdr.status) {
3947 ocs_log_err(sli4->os, "COMMON_GET_CNTL_ADDL_ATTRIBUTES bad status %#x\n",
3948 attr->hdr.status);
3949 ocs_dma_free(sli4->os, &get_cntl_addl_data);
3950 return -1;
3951 }
3952
3953 ocs_memcpy(sli4->config.ipl_name, attr->ipl_file_name, sizeof(sli4->config.ipl_name));
3954
3955 ocs_log_debug(sli4->os, "IPL:%s \n", (char*)sli4->config.ipl_name);
3956 } else {
3957 ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ADDL_ATTRIBUTES write\n");
3958 ocs_dma_free(sli4->os, &get_cntl_addl_data);
3959 return -1;
3960 }
3961
3962 ocs_dma_free(sli4->os, &get_cntl_addl_data);
3963 }
3964
3965 if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
3966 sli4_res_common_get_port_name_t *port_name = (sli4_res_common_get_port_name_t *)(((uint8_t *)sli4->bmbx.virt) +
3967 offsetof(sli4_cmd_sli_config_t, payload.embed));
3968
3969 if (sli_bmbx_command(sli4)) {
3970 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_PORT_NAME)\n");
3971 return -1;
3972 }
3973
3974 sli4->config.port_name[0] = port_name->port_name[sli4->config.port_number];
3975 }
3976 sli4->config.port_name[1] = '\0';
3977
3978 if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) {
3979 sli4_cmd_read_rev_t *read_rev = sli4->bmbx.virt;
3980
3981 if (sli_bmbx_command(sli4)) {
3982 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_REV)\n");
3983 return -1;
3984 }
3985 if (read_rev->hdr.status) {
3986 ocs_log_err(sli4->os, "READ_REV bad status %#x\n",
3987 read_rev->hdr.status);
3988 return -1;
3989 }
3990
3991 sli4->config.fw_rev[0] = read_rev->first_fw_id;
3992 ocs_memcpy(sli4->config.fw_name[0],read_rev->first_fw_name, sizeof(sli4->config.fw_name[0]));
3993
3994 sli4->config.fw_rev[1] = read_rev->second_fw_id;
3995 ocs_memcpy(sli4->config.fw_name[1],read_rev->second_fw_name, sizeof(sli4->config.fw_name[1]));
3996
3997 sli4->config.hw_rev[0] = read_rev->first_hw_revision;
3998 sli4->config.hw_rev[1] = read_rev->second_hw_revision;
3999 sli4->config.hw_rev[2] = read_rev->third_hw_revision;
4000
4001 ocs_log_debug(sli4->os, "FW1:%s (%08x) / FW2:%s (%08x)\n",
4002 read_rev->first_fw_name, read_rev->first_fw_id,
4003 read_rev->second_fw_name, read_rev->second_fw_id);
4004
4005 ocs_log_debug(sli4->os, "HW1: %08x / HW2: %08x\n", read_rev->first_hw_revision,
4006 read_rev->second_hw_revision);
4007
4008 /* Check that all VPD data was returned */
4009 if (read_rev->returned_vpd_length != read_rev->actual_vpd_length) {
4010 ocs_log_test(sli4->os, "VPD length: available=%d returned=%d actual=%d\n",
4011 read_rev->available_length,
4012 read_rev->returned_vpd_length,
4013 read_rev->actual_vpd_length);
4014 }
4015 sli4->vpd.length = read_rev->returned_vpd_length;
4016 } else {
4017 ocs_log_err(sli4->os, "bad READ_REV write\n");
4018 return -1;
4019 }
4020
4021 if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
4022 sli4_cmd_read_nvparms_t *read_nvparms = sli4->bmbx.virt;
4023
4024 if (sli_bmbx_command(sli4)) {
4025 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_NVPARMS)\n");
4026 return -1;
4027 }
4028 if (read_nvparms->hdr.status) {
4029 ocs_log_err(sli4->os, "READ_NVPARMS bad status %#x\n",
4030 read_nvparms->hdr.status);
4031 return -1;
4032 }
4033
4034 ocs_memcpy(sli4->config.wwpn, read_nvparms->wwpn, sizeof(sli4->config.wwpn));
4035 ocs_memcpy(sli4->config.wwnn, read_nvparms->wwnn, sizeof(sli4->config.wwnn));
4036
4037 ocs_log_debug(sli4->os, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
4038 sli4->config.wwpn[0],
4039 sli4->config.wwpn[1],
4040 sli4->config.wwpn[2],
4041 sli4->config.wwpn[3],
4042 sli4->config.wwpn[4],
4043 sli4->config.wwpn[5],
4044 sli4->config.wwpn[6],
4045 sli4->config.wwpn[7]);
4046 ocs_log_debug(sli4->os, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
4047 sli4->config.wwnn[0],
4048 sli4->config.wwnn[1],
4049 sli4->config.wwnn[2],
4050 sli4->config.wwnn[3],
4051 sli4->config.wwnn[4],
4052 sli4->config.wwnn[5],
4053 sli4->config.wwnn[6],
4054 sli4->config.wwnn[7]);
4055 } else {
4056 ocs_log_err(sli4->os, "bad READ_NVPARMS write\n");
4057 return -1;
4058 }
4059
4060 return 0;
4061 }
4062
4063 /****************************************************************************
4064 * Public functions
4065 */
4066
4067 /**
4068 * @ingroup sli
4069 * @brief Set up the SLI context.
4070 *
4071 * @param sli4 SLI context.
4072 * @param os Device abstraction.
4073 * @param port_type Protocol type of port (for example, FC and NIC).
4074 *
4075 * @return Returns 0 on success, or a non-zero value on failure.
4076 */
4077 int32_t
4078 sli_setup(sli4_t *sli4, ocs_os_handle_t os, sli4_port_type_e port_type)
4079 {
4080 uint32_t sli_intf = UINT32_MAX;
4081 uint32_t pci_class_rev = 0;
4082 uint32_t rev_id = 0;
4083 uint32_t family = 0;
4084 uint32_t i;
4085 sli4_asic_entry_t *asic;
4086
4087 ocs_memset(sli4, 0, sizeof(sli4_t));
4088
4089 sli4->os = os;
4090 sli4->port_type = port_type;
4091
4092 /*
4093 * Read the SLI_INTF register to discover the register layout
4094 * and other capability information
4095 */
4096 sli_intf = ocs_config_read32(os, SLI4_INTF_REG);
4097
4098 if (sli_intf_valid_check(sli_intf)) {
4099 ocs_log_err(os, "SLI_INTF is not valid\n");
4100 return -1;
4101 }
4102
4103 /* driver only support SLI-4 */
4104 sli4->sli_rev = sli_intf_sli_revision(sli_intf);
4105 if (4 != sli4->sli_rev) {
4106 ocs_log_err(os, "Unsupported SLI revision (intf=%#x)\n",
4107 sli_intf);
4108 return -1;
4109 }
4110
4111 sli4->sli_family = sli_intf_sli_family(sli_intf);
4112
4113 sli4->if_type = sli_intf_if_type(sli_intf);
4114
4115 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type) ||
4116 (SLI4_IF_TYPE_LANCER_G7 == sli4->if_type)) {
4117 ocs_log_debug(os, "status=%#x error1=%#x error2=%#x\n",
4118 sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS),
4119 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1),
4120 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2));
4121 }
4122
4123 /*
4124 * set the ASIC type and revision
4125 */
4126 pci_class_rev = ocs_config_read32(os, SLI4_PCI_CLASS_REVISION);
4127 rev_id = sli_pci_rev_id(pci_class_rev);
4128 family = sli4->sli_family;
4129 if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) {
4130 uint32_t asic_id = ocs_config_read32(os, SLI4_ASIC_ID_REG);
4131 family = sli_asic_gen(asic_id);
4132 }
4133
4134 for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table); i++, asic++) {
4135 if ((rev_id == asic->rev_id) && (family == asic->family)) {
4136 sli4->asic_type = asic->type;
4137 sli4->asic_rev = asic->rev;
4138 break;
4139 }
4140 }
4141 /* Fail if no matching asic type/rev was found */
4142 if( (sli4->asic_type == 0) || (sli4->asic_rev == 0)) {
4143 ocs_log_err(os, "no matching asic family/rev found: %02x/%02x\n", family, rev_id);
4144 return -1;
4145 }
4146
4147 /*
4148 * The bootstrap mailbox is equivalent to a MQ with a single 256 byte
4149 * entry, a CQ with a single 16 byte entry, and no event queue.
4150 * Alignment must be 16 bytes as the low order address bits in the
4151 * address register are also control / status.
4152 */
4153 if (ocs_dma_alloc(sli4->os, &sli4->bmbx, SLI4_BMBX_SIZE +
4154 sizeof(sli4_mcqe_t), 16)) {
4155 ocs_log_err(os, "bootstrap mailbox allocation failed\n");
4156 return -1;
4157 }
4158
4159 if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) {
4160 ocs_log_err(os, "bad alignment for bootstrap mailbox\n");
4161 return -1;
4162 }
4163
4164 ocs_log_debug(os, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt,
4165 ocs_addr32_hi(sli4->bmbx.phys),
4166 ocs_addr32_lo(sli4->bmbx.phys),
4167 sli4->bmbx.size);
4168
4169 /* TODO 4096 is arbitrary. What should this value actually be? */
4170 if (ocs_dma_alloc(sli4->os, &sli4->vpd.data, 4096/*TODO*/, 4096)) {
4171 /* Note that failure isn't fatal in this specific case */
4172 sli4->vpd.data.size = 0;
4173 ocs_log_test(os, "VPD buffer allocation failed\n");
4174 }
4175
4176 if (sli_fw_init(sli4)) {
4177 ocs_log_err(sli4->os, "FW initialization failed\n");
4178 return -1;
4179 }
4180
4181 /*
4182 * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true
4183 * in addition to any other desired features
4184 */
4185 sli4->config.features.flag.iaab = TRUE;
4186 sli4->config.features.flag.npiv = TRUE;
4187 sli4->config.features.flag.dif = TRUE;
4188 sli4->config.features.flag.vf = TRUE;
4189 sli4->config.features.flag.fcpc = TRUE;
4190 sli4->config.features.flag.iaar = TRUE;
4191 sli4->config.features.flag.hlm = TRUE;
4192 sli4->config.features.flag.perfh = TRUE;
4193 sli4->config.features.flag.rxseq = TRUE;
4194 sli4->config.features.flag.rxri = TRUE;
4195 sli4->config.features.flag.mrqp = TRUE;
4196
4197 /* use performance hints if available */
4198 if (sli4->config.perf_hint) {
4199 sli4->config.features.flag.perfh = TRUE;
4200 }
4201
4202 if (sli_request_features(sli4, &sli4->config.features, TRUE)) {
4203 return -1;
4204 }
4205
4206 if (sli_get_config(sli4)) {
4207 return -1;
4208 }
4209
4210 return 0;
4211 }
4212
4213 bool
4214 sli_persist_topology_enabled(sli4_t *sli4)
4215 {
4216 return (sli4->config.ptv);
4217 }
4218
4219 int32_t
4220 sli_init(sli4_t *sli4)
4221 {
4222
4223 if (sli4->config.has_extents) {
4224 /* TODO COMMON_ALLOC_RESOURCE_EXTENTS */;
4225 ocs_log_test(sli4->os, "XXX need to implement extent allocation\n");
4226 return -1;
4227 }
4228
4229 sli4->config.features.flag.hlm = sli4->config.high_login_mode;
4230 sli4->config.features.flag.rxseq = FALSE;
4231 sli4->config.features.flag.rxri = FALSE;
4232
4233 if (sli_request_features(sli4, &sli4->config.features, FALSE)) {
4234 return -1;
4235 }
4236
4237 return 0;
4238 }
4239
4240 int32_t
4241 sli_reset(sli4_t *sli4)
4242 {
4243 uint32_t i;
4244
4245 if (sli_fw_init(sli4)) {
4246 ocs_log_crit(sli4->os, "FW initialization failed\n");
4247 return -1;
4248 }
4249
4250 if (sli4->config.extent[0].base) {
4251 ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t));
4252 sli4->config.extent[0].base = NULL;
4253 }
4254
4255 for (i = 0; i < SLI_RSRC_MAX; i++) {
4256 if (sli4->config.extent[i].use_map) {
4257 ocs_bitmap_free(sli4->config.extent[i].use_map);
4258 sli4->config.extent[i].use_map = NULL;
4259 }
4260 sli4->config.extent[i].base = NULL;
4261 }
4262
4263 if (sli_get_config(sli4)) {
4264 return -1;
4265 }
4266
4267 return 0;
4268 }
4269
4270 /**
4271 * @ingroup sli
4272 * @brief Issue a Firmware Reset.
4273 *
4274 * @par Description
4275 * Issues a Firmware Reset to the chip. This reset affects the entire chip,
4276 * so all PCI function on the same PCI bus and device are affected.
4277 * @n @n This type of reset can be used to activate newly downloaded firmware.
4278 * @n @n The driver should be considered to be in an unknown state after this
4279 * reset and should be reloaded.
4280 *
4281 * @param sli4 SLI context.
4282 *
4283 * @return Returns 0 on success, or -1 otherwise.
4284 */
4285
4286 int32_t
4287 sli_fw_reset(sli4_t *sli4)
4288 {
4289 uint32_t val;
4290 uint32_t ready;
4291
4292 /*
4293 * Firmware must be ready before issuing the reset.
4294 */
4295 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
4296 if (!ready) {
4297 ocs_log_crit(sli4->os, "FW status is NOT ready\n");
4298 return -1;
4299 }
4300 switch(sli4->if_type) {
4301 case SLI4_IF_TYPE_BE3_SKH_PF:
4302 /* BE3 / Skyhawk use PCICFG_SOFT_RESET_CSR */
4303 val = ocs_config_read32(sli4->os, SLI4_PCI_SOFT_RESET_CSR);
4304 val |= SLI4_PCI_SOFT_RESET_MASK;
4305 ocs_config_write32(sli4->os, SLI4_PCI_SOFT_RESET_CSR, val);
4306 break;
4307 case SLI4_IF_TYPE_LANCER_FC_ETH:
4308 /* Lancer uses PHYDEV_CONTROL */
4309
4310 val = SLI4_PHYDEV_CONTROL_FRST;
4311 sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, val);
4312 break;
4313 default:
4314 ocs_log_test(sli4->os, "Unexpected iftype %d\n", sli4->if_type);
4315 return -1;
4316 break;
4317 }
4318
4319 /* wait for the FW to become ready after the reset */
4320 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
4321 if (!ready) {
4322 ocs_log_crit(sli4->os, "Failed to become ready after firmware reset\n");
4323 return -1;
4324 }
4325 return 0;
4326 }
4327
4328 /**
4329 * @ingroup sli
4330 * @brief Tear down a SLI context.
4331 *
4332 * @param sli4 SLI context.
4333 *
4334 * @return Returns 0 on success, or non-zero otherwise.
4335 */
4336 int32_t
4337 sli_teardown(sli4_t *sli4)
4338 {
4339 uint32_t i;
4340
4341 if (sli4->config.extent[0].base) {
4342 ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t));
4343 sli4->config.extent[0].base = NULL;
4344 }
4345
4346 for (i = 0; i < SLI_RSRC_MAX; i++) {
4347 if (sli4->config.has_extents) {
4348 /* TODO COMMON_DEALLOC_RESOURCE_EXTENTS */;
4349 }
4350
4351 sli4->config.extent[i].base = NULL;
4352
4353 ocs_bitmap_free(sli4->config.extent[i].use_map);
4354 sli4->config.extent[i].use_map = NULL;
4355 }
4356
4357 if (sli_fw_term(sli4)) {
4358 ocs_log_err(sli4->os, "FW deinitialization failed\n");
4359 }
4360
4361 ocs_dma_free(sli4->os, &sli4->vpd.data);
4362 ocs_dma_free(sli4->os, &sli4->bmbx);
4363
4364 return 0;
4365 }
4366
4367 /**
4368 * @ingroup sli
4369 * @brief Register a callback for the given event.
4370 *
4371 * @param sli4 SLI context.
4372 * @param which Event of interest.
4373 * @param func Function to call when the event occurs.
4374 * @param arg Argument passed to the callback function.
4375 *
4376 * @return Returns 0 on success, or non-zero otherwise.
4377 */
4378 int32_t
4379 sli_callback(sli4_t *sli4, sli4_callback_e which, void *func, void *arg)
4380 {
4381
4382 if (!sli4 || !func || (which >= SLI4_CB_MAX)) {
4383 ocs_log_err(NULL, "bad parameter sli4=%p which=%#x func=%p\n",
4384 sli4, which, func);
4385 return -1;
4386 }
4387
4388 switch (which) {
4389 case SLI4_CB_LINK:
4390 sli4->link = func;
4391 sli4->link_arg = arg;
4392 break;
4393 case SLI4_CB_FIP:
4394 sli4->fip = func;
4395 sli4->fip_arg = arg;
4396 break;
4397 default:
4398 ocs_log_test(sli4->os, "unknown callback %#x\n", which);
4399 return -1;
4400 }
4401
4402 return 0;
4403 }
4404
4405 /**
4406 * @ingroup sli
4407 * @brief Initialize a queue object.
4408 *
4409 * @par Description
4410 * This initializes the sli4_queue_t object members, including the underlying
4411 * DMA memory.
4412 *
4413 * @param sli4 SLI context.
4414 * @param q Pointer to queue object.
4415 * @param qtype Type of queue to create.
4416 * @param size Size of each entry.
4417 * @param n_entries Number of entries to allocate.
4418 * @param align Starting memory address alignment.
4419 *
4420 * @note Checks if using the existing DMA memory (if any) is possible. If not,
4421 * it frees the existing memory and re-allocates.
4422 *
4423 * @return Returns 0 on success, or non-zero otherwise.
4424 */
4425 int32_t
4426 __sli_queue_init(sli4_t *sli4, sli4_queue_t *q, uint32_t qtype,
4427 size_t size, uint32_t n_entries, uint32_t align)
4428 {
4429
4430 if ((q->dma.virt == NULL) || (size != q->size) || (n_entries != q->length)) {
4431 if (q->dma.size) {
4432 ocs_dma_free(sli4->os, &q->dma);
4433 }
4434
4435 ocs_memset(q, 0, sizeof(sli4_queue_t));
4436
4437 if (ocs_dma_alloc(sli4->os, &q->dma, size * n_entries, align)) {
4438 ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]);
4439 return -1;
4440 }
4441
4442 ocs_memset(q->dma.virt, 0, size * n_entries);
4443
4444 ocs_lock_init(sli4->os, &q->lock, "%s lock[%d:%p]",
4445 SLI_QNAME[qtype], ocs_instance(sli4->os), &q->lock);
4446
4447 q->type = qtype;
4448 q->size = size;
4449 q->length = n_entries;
4450
4451 /* Limit to hwf the queue size per interrupt */
4452 q->proc_limit = n_entries / 2;
4453
4454 if ( (q->type == SLI_QTYPE_EQ) || (q->type == SLI_QTYPE_CQ) ) {
4455 /* For prism, phase will be flipped after a sweep through eq and cq */
4456 q->phase = 1;
4457 }
4458
4459 switch(q->type) {
4460 case SLI_QTYPE_EQ:
4461 q->posted_limit = q->length / 2;
4462 break;
4463 default:
4464 if ((sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) ||
4465 (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF)) {
4466 /* For Skyhawk, ring the doorbell more often */
4467 q->posted_limit = 8;
4468 } else {
4469 q->posted_limit = 64;
4470 }
4471 break;
4472 }
4473 }
4474
4475 return 0;
4476 }
4477
4478 /**
4479 * @ingroup sli
4480 * @brief Issue the command to create a queue.
4481 *
4482 * @param sli4 SLI context.
4483 * @param q Pointer to queue object.
4484 *
4485 * @return Returns 0 on success, or non-zero otherwise.
4486 */
4487 int32_t
4488 __sli_create_queue(sli4_t *sli4, sli4_queue_t *q)
4489 {
4490 sli4_res_common_create_queue_t *res_q = NULL;
4491
4492 if (sli_bmbx_command(sli4)){
4493 ocs_log_crit(sli4->os, "bootstrap mailbox write fail %s\n",
4494 SLI_QNAME[q->type]);
4495 ocs_dma_free(sli4->os, &q->dma);
4496 return -1;
4497 }
4498 if (sli_res_sli_config(sli4->bmbx.virt)) {
4499 ocs_log_err(sli4->os, "bad status create %s\n", SLI_QNAME[q->type]);
4500 ocs_dma_free(sli4->os, &q->dma);
4501 return -1;
4502 }
4503 res_q = (void *)((uint8_t *)sli4->bmbx.virt +
4504 offsetof(sli4_cmd_sli_config_t, payload));
4505
4506 if (res_q->hdr.status) {
4507 ocs_log_err(sli4->os, "bad create %s status=%#x addl=%#x\n",
4508 SLI_QNAME[q->type],
4509 res_q->hdr.status, res_q->hdr.additional_status);
4510 ocs_dma_free(sli4->os, &q->dma);
4511 return -1;
4512 } else {
4513 q->id = res_q->q_id;
4514 q->doorbell_offset = res_q->db_offset;
4515 q->doorbell_rset = res_q->db_rs;
4516
4517 switch (q->type) {
4518 case SLI_QTYPE_EQ:
4519 /* No doorbell information in response for EQs */
4520 q->doorbell_offset = regmap[SLI4_REG_EQ_DOORBELL][sli4->if_type].off;
4521 q->doorbell_rset = regmap[SLI4_REG_EQ_DOORBELL][sli4->if_type].rset;
4522 break;
4523 case SLI_QTYPE_CQ:
4524 /* No doorbell information in response for CQs */
4525 q->doorbell_offset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].off;
4526 q->doorbell_rset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].rset;
4527 break;
4528 case SLI_QTYPE_MQ:
4529 /* No doorbell information in response for MQs */
4530 q->doorbell_offset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].off;
4531 q->doorbell_rset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].rset;
4532 break;
4533 case SLI_QTYPE_RQ:
4534 /* set the doorbell for non-skyhawks */
4535 if (!sli4->config.dual_ulp_capable) {
4536 q->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off;
4537 q->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset;
4538 }
4539 break;
4540 case SLI_QTYPE_WQ:
4541 /* set the doorbell for non-skyhawks */
4542 if (!sli4->config.dual_ulp_capable) {
4543 q->doorbell_offset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].off;
4544 q->doorbell_rset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].rset;
4545 }
4546 break;
4547 default:
4548 break;
4549 }
4550 }
4551
4552 return 0;
4553 }
4554
4555 /**
4556 * @ingroup sli
4557 * @brief Get queue entry size.
4558 *
4559 * Get queue entry size given queue type.
4560 *
4561 * @param sli4 SLI context
4562 * @param qtype Type for which the entry size is returned.
4563 *
4564 * @return Returns > 0 on success (queue entry size), or a negative value on failure.
4565 */
4566 int32_t
4567 sli_get_queue_entry_size(sli4_t *sli4, uint32_t qtype)
4568 {
4569 uint32_t size = 0;
4570
4571 if (!sli4) {
4572 ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4);
4573 return -1;
4574 }
4575
4576 switch (qtype) {
4577 case SLI_QTYPE_EQ:
4578 size = sizeof(uint32_t);
4579 break;
4580 case SLI_QTYPE_CQ:
4581 size = 16;
4582 break;
4583 case SLI_QTYPE_MQ:
4584 size = 256;
4585 break;
4586 case SLI_QTYPE_WQ:
4587 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
4588 size = sli4->config.wqe_size;
4589 } else {
4590 /* TODO */
4591 ocs_log_test(sli4->os, "unsupported queue entry size\n");
4592 return -1;
4593 }
4594 break;
4595 case SLI_QTYPE_RQ:
4596 size = SLI4_FCOE_RQE_SIZE;
4597 break;
4598 default:
4599 ocs_log_test(sli4->os, "unknown queue type %d\n", qtype);
4600 return -1;
4601 }
4602 return size;
4603 }
4604
4605 /**
4606 * @ingroup sli
4607 * @brief Modify the delay timer for all the EQs
4608 *
4609 * @param sli4 SLI context.
4610 * @param eq Array of EQs.
4611 * @param num_eq Count of EQs.
4612 * @param shift Phase shift for staggering interrupts.
4613 * @param delay_mult Delay multiplier for limiting interrupt frequency.
4614 *
4615 * @return Returns 0 on success, or -1 otherwise.
4616 */
4617 int32_t
4618 sli_eq_modify_delay(sli4_t *sli4, sli4_queue_t *eq, uint32_t num_eq, uint32_t shift, uint32_t delay_mult)
4619 {
4620
4621 sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, eq, num_eq, shift, delay_mult);
4622
4623 if (sli_bmbx_command(sli4)) {
4624 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n");
4625 return -1;
4626 }
4627 if (sli_res_sli_config(sli4->bmbx.virt)) {
4628 ocs_log_err(sli4->os, "bad status MODIFY EQ DELAY\n");
4629 return -1;
4630 }
4631
4632 return 0;
4633 }
4634
4635 /**
4636 * @ingroup sli
4637 * @brief Allocate a queue.
4638 *
4639 * @par Description
4640 * Allocates DMA memory and configures the requested queue type.
4641 *
4642 * @param sli4 SLI context.
4643 * @param qtype Type of queue to create.
4644 * @param q Pointer to the queue object.
4645 * @param n_entries Number of entries to allocate.
4646 * @param assoc Associated queue (that is, the EQ for a CQ, the CQ for a MQ, and so on).
4647 * @param ulp The ULP to bind, which is only used for WQ and RQs
4648 *
4649 * @return Returns 0 on success, or -1 otherwise.
4650 */
4651 int32_t
4652 sli_queue_alloc(sli4_t *sli4, uint32_t qtype, sli4_queue_t *q, uint32_t n_entries,
4653 sli4_queue_t *assoc, uint16_t ulp)
4654 {
4655 int32_t size;
4656 uint32_t align = 0;
4657 sli4_create_q_fn_t create = NULL;
4658
4659 if (!sli4 || !q) {
4660 ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q);
4661 return -1;
4662 }
4663
4664 /* get queue size */
4665 size = sli_get_queue_entry_size(sli4, qtype);
4666 if (size < 0)
4667 return -1;
4668 align = SLI_PAGE_SIZE;
4669
4670 switch (qtype) {
4671 case SLI_QTYPE_EQ:
4672 create = sli_cmd_common_create_eq;
4673 break;
4674 case SLI_QTYPE_CQ:
4675 create = sli_cmd_common_create_cq;
4676 break;
4677 case SLI_QTYPE_MQ:
4678 /* Validate the number of entries */
4679 switch (n_entries) {
4680 case 16:
4681 case 32:
4682 case 64:
4683 case 128:
4684 break;
4685 default:
4686 ocs_log_test(sli4->os, "illegal n_entries value %d for MQ\n", n_entries);
4687 return -1;
4688 }
4689 assoc->u.flag.is_mq = TRUE;
4690 create = sli_cmd_common_create_mq_ext;
4691 break;
4692 case SLI_QTYPE_WQ:
4693 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
4694 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) {
4695 create = sli_cmd_fcoe_wq_create;
4696 } else {
4697 create = sli_cmd_fcoe_wq_create_v1;
4698 }
4699 } else {
4700 /* TODO */
4701 ocs_log_test(sli4->os, "unsupported WQ create\n");
4702 return -1;
4703 }
4704 break;
4705 default:
4706 ocs_log_test(sli4->os, "unknown queue type %d\n", qtype);
4707 return -1;
4708 }
4709
4710 if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) {
4711 ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]);
4712 return -1;
4713 }
4714
4715 if (create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, assoc ? assoc->id : 0, ulp)) {
4716 if (__sli_create_queue(sli4, q)) {
4717 ocs_log_err(sli4->os, "create %s failed\n", SLI_QNAME[qtype]);
4718 return -1;
4719 }
4720 q->ulp = ulp;
4721 } else {
4722 ocs_log_err(sli4->os, "cannot create %s\n", SLI_QNAME[qtype]);
4723 return -1;
4724 }
4725
4726 return 0;
4727 }
4728
4729 /**
4730 * @ingroup sli
4731 * @brief Allocate a c queue set.
4732 *
4733 * @param sli4 SLI context.
4734 * @param num_cqs to create
4735 * @param qs Pointers to the queue objects.
4736 * @param n_entries Number of entries to allocate per CQ.
4737 * @param eqs Associated event queues
4738 *
4739 * @return Returns 0 on success, or -1 otherwise.
4740 */
4741 int32_t
4742 sli_cq_alloc_set(sli4_t *sli4, sli4_queue_t *qs[], uint32_t num_cqs,
4743 uint32_t n_entries, sli4_queue_t *eqs[])
4744 {
4745 uint32_t i, offset = 0, page_bytes = 0, payload_size, cmd_size = 0;
4746 uint32_t p = 0, page_size = 0, n_cqe = 0, num_pages_cq;
4747 uintptr_t addr;
4748 ocs_dma_t dma;
4749 sli4_req_common_create_cq_set_v0_t *req = NULL;
4750 sli4_res_common_create_queue_set_t *res = NULL;
4751
4752 if (!sli4) {
4753 ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4);
4754 return -1;
4755 }
4756
4757 memset(&dma, 0, sizeof(dma));
4758
4759 /* Align the queue DMA memory */
4760 for (i = 0; i < num_cqs; i++) {
4761 if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_CQ, SLI4_CQE_BYTES,
4762 n_entries, SLI_PAGE_SIZE)) {
4763 ocs_log_err(sli4->os, "Queue init failed.\n");
4764 goto error;
4765 }
4766 }
4767
4768 n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
4769 switch (n_cqe) {
4770 case 256:
4771 case 512:
4772 case 1024:
4773 case 2048:
4774 page_size = 1;
4775 break;
4776 case 4096:
4777 page_size = 2;
4778 break;
4779 default:
4780 return -1;
4781 }
4782
4783 page_bytes = page_size * SLI_PAGE_SIZE;
4784 num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
4785 cmd_size = sizeof(sli4_req_common_create_cq_set_v0_t) + (8 * num_pages_cq * num_cqs);
4786 payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_set_t));
4787
4788 if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) {
4789 ocs_log_err(sli4->os, "DMA allocation failed\n");
4790 goto error;
4791 }
4792 ocs_memset(dma.virt, 0, payload_size);
4793
4794 if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
4795 payload_size, &dma) == -1) {
4796 goto error;
4797 }
4798
4799 /* Fill the request structure */
4800
4801 req = (sli4_req_common_create_cq_set_v0_t *)((uint8_t *)dma.virt);
4802 req->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ_SET;
4803 req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
4804 req->hdr.version = 0;
4805 req->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
4806 req->page_size = page_size;
4807
4808 req->num_pages = num_pages_cq;
4809 switch (req->num_pages) {
4810 case 1:
4811 req->cqecnt = SLI4_CQ_CNT_256;
4812 break;
4813 case 2:
4814 req->cqecnt = SLI4_CQ_CNT_512;
4815 break;
4816 case 4:
4817 req->cqecnt = SLI4_CQ_CNT_1024;
4818 break;
4819 case 8:
4820 req->cqecnt = SLI4_CQ_CNT_LARGE;
4821 req->cqe_count = n_cqe;
4822 break;
4823 default:
4824 ocs_log_test(sli4->os, "num_pages %d not valid\n", req->num_pages);
4825 goto error;
4826 }
4827
4828 req->evt = TRUE;
4829 req->valid = TRUE;
4830 req->arm = FALSE;
4831 req->num_cq_req = num_cqs;
4832
4833 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
4834 req->autovalid = TRUE;
4835
4836 /* Fill page addresses of all the CQs. */
4837 for (i = 0; i < num_cqs; i++) {
4838 req->eq_id[i] = eqs[i]->id;
4839 for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += page_bytes) {
4840 req->page_physical_address[offset].low = ocs_addr32_lo(addr);
4841 req->page_physical_address[offset].high = ocs_addr32_hi(addr);
4842 offset++;
4843 }
4844 }
4845
4846 if (sli_bmbx_command(sli4)) {
4847 ocs_log_crit(sli4->os, "bootstrap mailbox write fail CQSet\n");
4848 goto error;
4849 }
4850
4851 res = (void *)((uint8_t *)dma.virt);
4852 if (res->hdr.status) {
4853 ocs_log_err(sli4->os, "bad create CQSet status=%#x addl=%#x\n",
4854 res->hdr.status, res->hdr.additional_status);
4855 goto error;
4856 } else {
4857 /* Check if we got all requested CQs. */
4858 if (res->num_q_allocated != num_cqs) {
4859 ocs_log_crit(sli4->os, "Requested count CQs doesnt match.\n");
4860 goto error;
4861 }
4862
4863 /* Fill the resp cq ids. */
4864 for (i = 0; i < num_cqs; i++) {
4865 qs[i]->id = res->q_id + i;
4866 qs[i]->doorbell_offset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].off;
4867 qs[i]->doorbell_rset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].rset;
4868 }
4869 }
4870
4871 ocs_dma_free(sli4->os, &dma);
4872
4873 return 0;
4874
4875 error:
4876 for (i = 0; i < num_cqs; i++) {
4877 if (qs[i]->dma.size) {
4878 ocs_dma_free(sli4->os, &qs[i]->dma);
4879 }
4880 }
4881
4882 if (dma.size) {
4883 ocs_dma_free(sli4->os, &dma);
4884 }
4885
4886 return -1;
4887 }
4888
4889 /**
4890 * @ingroup sli
4891 * @brief Free a queue.
4892 *
4893 * @par Description
4894 * Frees DMA memory and de-registers the requested queue.
4895 *
4896 * @param sli4 SLI context.
4897 * @param q Pointer to the queue object.
4898 * @param destroy_queues Non-zero if the mailbox commands should be sent to destroy the queues.
4899 * @param free_memory Non-zero if the DMA memory associated with the queue should be freed.
4900 *
4901 * @return Returns 0 on success, or -1 otherwise.
4902 */
4903 int32_t
4904 sli_queue_free(sli4_t *sli4, sli4_queue_t *q, uint32_t destroy_queues, uint32_t free_memory)
4905 {
4906 sli4_destroy_q_fn_t destroy = NULL;
4907 int32_t rc = -1;
4908
4909 if (!sli4 || !q) {
4910 ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q);
4911 return -1;
4912 }
4913
4914 if (destroy_queues) {
4915 switch (q->type) {
4916 case SLI_QTYPE_EQ:
4917 destroy = sli_cmd_common_destroy_eq;
4918 break;
4919 case SLI_QTYPE_CQ:
4920 destroy = sli_cmd_common_destroy_cq;
4921 break;
4922 case SLI_QTYPE_MQ:
4923 destroy = sli_cmd_common_destroy_mq;
4924 break;
4925 case SLI_QTYPE_WQ:
4926 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
4927 destroy = sli_cmd_fcoe_wq_destroy;
4928 } else {
4929 /* TODO */
4930 ocs_log_test(sli4->os, "unsupported WQ destroy\n");
4931 return -1;
4932 }
4933 break;
4934 case SLI_QTYPE_RQ:
4935 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
4936 destroy = sli_cmd_fcoe_rq_destroy;
4937 } else {
4938 /* TODO */
4939 ocs_log_test(sli4->os, "unsupported RQ destroy\n");
4940 return -1;
4941 }
4942 break;
4943 default:
4944 ocs_log_test(sli4->os, "bad queue type %d\n",
4945 q->type);
4946 return -1;
4947 }
4948
4949 /*
4950 * Destroying queues makes BE3 sad (version 0 interface type). Rely
4951 * on COMMON_FUNCTION_RESET to free host allocated queue resources
4952 * inside the SLI Port.
4953 */
4954 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
4955 destroy = NULL;
4956 }
4957
4958 /* Destroy the queue if the operation is defined */
4959 if (destroy && destroy(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, q->id)) {
4960 sli4_res_hdr_t *res = NULL;
4961
4962 if (sli_bmbx_command(sli4)){
4963 ocs_log_crit(sli4->os, "bootstrap mailbox write fail destroy %s\n",
4964 SLI_QNAME[q->type]);
4965 } else if (sli_res_sli_config(sli4->bmbx.virt)) {
4966 ocs_log_err(sli4->os, "bad status destroy %s\n", SLI_QNAME[q->type]);
4967 } else {
4968 res = (void *)((uint8_t *)sli4->bmbx.virt +
4969 offsetof(sli4_cmd_sli_config_t, payload));
4970
4971 if (res->status) {
4972 ocs_log_err(sli4->os, "bad destroy %s status=%#x addl=%#x\n",
4973 SLI_QNAME[q->type],
4974 res->status, res->additional_status);
4975 } else {
4976 rc = 0;
4977 }
4978 }
4979 }
4980 }
4981
4982 if (free_memory) {
4983 ocs_lock_free(&q->lock);
4984
4985 if (ocs_dma_free(sli4->os, &q->dma)) {
4986 ocs_log_err(sli4->os, "%s queue ID %d free failed\n",
4987 SLI_QNAME[q->type], q->id);
4988 rc = -1;
4989 }
4990 }
4991
4992 return rc;
4993 }
4994
4995 int32_t
4996 sli_queue_reset(sli4_t *sli4, sli4_queue_t *q)
4997 {
4998
4999 ocs_lock(&q->lock);
5000
5001 q->index = 0;
5002 q->n_posted = 0;
5003
5004 if (SLI_QTYPE_MQ == q->type) {
5005 q->u.r_idx = 0;
5006 }
5007
5008 if (q->dma.virt != NULL) {
5009 ocs_memset(q->dma.virt, 0, (q->size * (uint64_t)q->length));
5010 }
5011
5012 ocs_unlock(&q->lock);
5013
5014 return 0;
5015 }
5016
5017 /**
5018 * @ingroup sli
5019 * @brief Check if the given queue is empty.
5020 *
5021 * @par Description
5022 * If the valid bit of the current entry is unset, the queue is empty.
5023 *
5024 * @param sli4 SLI context.
5025 * @param q Pointer to the queue object.
5026 *
5027 * @return Returns TRUE if empty, or FALSE otherwise.
5028 */
5029 int32_t
5030 sli_queue_is_empty(sli4_t *sli4, sli4_queue_t *q)
5031 {
5032 int32_t rc = TRUE;
5033 uint8_t *qe = q->dma.virt;
5034
5035 ocs_lock(&q->lock);
5036
5037 ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD);
5038
5039 qe += q->index * q->size;
5040
5041 rc = !sli_queue_entry_is_valid(q, qe, FALSE);
5042
5043 ocs_unlock(&q->lock);
5044
5045 return rc;
5046 }
5047
5048 /**
5049 * @ingroup sli
5050 * @brief Arm an EQ.
5051 *
5052 * @param sli4 SLI context.
5053 * @param q Pointer to queue object.
5054 * @param arm If TRUE, arm the EQ.
5055 *
5056 * @return Returns 0 on success, or non-zero otherwise.
5057 */
5058 int32_t
5059 sli_queue_eq_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm)
5060 {
5061 uint32_t val = 0;
5062
5063 ocs_lock(&q->lock);
5064 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
5065 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, arm);
5066 else
5067 val = sli_eq_doorbell(q->n_posted, q->id, arm);
5068
5069 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
5070 q->n_posted = 0;
5071 ocs_unlock(&q->lock);
5072
5073 return 0;
5074 }
5075
5076 /**
5077 * @ingroup sli
5078 * @brief Arm a queue.
5079 *
5080 * @param sli4 SLI context.
5081 * @param q Pointer to queue object.
5082 * @param arm If TRUE, arm the queue.
5083 *
5084 * @return Returns 0 on success, or non-zero otherwise.
5085 */
5086 int32_t
5087 sli_queue_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm)
5088 {
5089 uint32_t val = 0;
5090
5091 ocs_lock(&q->lock);
5092
5093 switch (q->type) {
5094 case SLI_QTYPE_EQ:
5095 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
5096 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, arm);
5097 else
5098 val = sli_eq_doorbell(q->n_posted, q->id, arm);
5099 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
5100 q->n_posted = 0;
5101 break;
5102 case SLI_QTYPE_CQ:
5103 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7)
5104 val = sli_iftype6_cq_doorbell(q->n_posted, q->id, arm);
5105 else
5106 val = sli_cq_doorbell(q->n_posted, q->id, arm);
5107 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
5108 q->n_posted = 0;
5109 break;
5110 default:
5111 ocs_log_test(sli4->os, "should only be used for EQ/CQ, not %s\n",
5112 SLI_QNAME[q->type]);
5113 }
5114
5115 ocs_unlock(&q->lock);
5116
5117 return 0;
5118 }
5119
5120 /**
5121 * @ingroup sli
5122 * @brief Write an entry to the queue object.
5123 *
5124 * Note: Assumes the q->lock will be locked and released by the caller.
5125 *
5126 * @param sli4 SLI context.
5127 * @param q Pointer to the queue object.
5128 * @param entry Pointer to the entry contents.
5129 *
5130 * @return Returns queue index on success, or negative error value otherwise.
5131 */
5132 int32_t
5133 _sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
5134 {
5135 int32_t rc = 0;
5136 uint8_t *qe = q->dma.virt;
5137 uint32_t qindex;
5138
5139 qindex = q->index;
5140 qe += q->index * q->size;
5141
5142 if (entry) {
5143 if ((SLI_QTYPE_WQ == q->type) && sli4->config.perf_wq_id_association) {
5144 sli_set_wq_id_association(entry, q->id);
5145 }
5146 #if defined(OCS_INCLUDE_DEBUG)
5147 switch (q->type) {
5148 case SLI_QTYPE_WQ: {
5149 ocs_dump32(OCS_DEBUG_ENABLE_WQ_DUMP, sli4->os, "wqe", entry, q->size);
5150 break;
5151 }
5152 case SLI_QTYPE_MQ:
5153 /* Note: we don't really need to dump the whole
5154 * 256 bytes, just do 64 */
5155 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mqe outbound", entry, 64);
5156 break;
5157
5158 default:
5159 break;
5160 }
5161 #endif
5162 ocs_memcpy(qe, entry, q->size);
5163 q->n_posted = 1;
5164 }
5165
5166 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
5167
5168 rc = sli_queue_doorbell(sli4, q);
5169
5170 q->index = (q->index + q->n_posted) & (q->length - 1);
5171 q->n_posted = 0;
5172
5173 if (rc < 0) {
5174 /* failure */
5175 return rc;
5176 } else if (rc > 0) {
5177 /* failure, but we need to return a negative value on failure */
5178 return -rc;
5179 } else {
5180 return qindex;
5181 }
5182 }
5183
5184 /**
5185 * @ingroup sli
5186 * @brief Write an entry to the queue object.
5187 *
5188 * Note: Assumes the q->lock will be locked and released by the caller.
5189 *
5190 * @param sli4 SLI context.
5191 * @param q Pointer to the queue object.
5192 * @param entry Pointer to the entry contents.
5193 *
5194 * @return Returns queue index on success, or negative error value otherwise.
5195 */
5196 int32_t
5197 sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
5198 {
5199 int32_t rc;
5200
5201 ocs_lock(&q->lock);
5202 rc = _sli_queue_write(sli4, q, entry);
5203 ocs_unlock(&q->lock);
5204
5205 return rc;
5206 }
5207
5208 /**
5209 * @brief Check if the current queue entry is valid.
5210 *
5211 * @param q Pointer to the queue object.
5212 * @param qe Pointer to the queue entry.
5213 * @param clear Boolean to clear valid bit.
5214 *
5215 * @return Returns TRUE if the entry is valid, or FALSE otherwise.
5216 */
5217 static uint8_t
5218 sli_queue_entry_is_valid(sli4_queue_t *q, uint8_t *qe, uint8_t clear)
5219 {
5220 uint8_t valid = FALSE;
5221 uint8_t valid_bit_set = 0;
5222
5223 switch (q->type) {
5224 case SLI_QTYPE_EQ:
5225 valid = (((sli4_eqe_t *)qe)->vld == q->phase) ? 1 : 0;
5226 if (valid && clear) {
5227 ((sli4_eqe_t *)qe)->vld = 0;
5228 }
5229 break;
5230 case SLI_QTYPE_CQ:
5231 /*
5232 * For both MCQE and WCQE/RCQE, the valid bit
5233 * is bit 31 of dword 3 (0 based)
5234 */
5235 valid_bit_set = (qe[15] & 0x80) != 0;
5236 if (valid_bit_set == q->phase)
5237 valid = 1;
5238
5239 if (valid & clear) {
5240 qe[15] &= ~0x80;
5241 }
5242 break;
5243 case SLI_QTYPE_MQ:
5244 valid = q->index != q->u.r_idx;
5245 break;
5246 case SLI_QTYPE_RQ:
5247 valid = TRUE;
5248 clear = FALSE;
5249 break;
5250 default:
5251 ocs_log_test(NULL, "doesn't handle type=%#x\n", q->type);
5252 }
5253
5254 if (clear) {
5255
5256 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
5257 }
5258
5259 return valid;
5260 }
5261
5262 /**
5263 * @ingroup sli
5264 * @brief Read an entry from the queue object.
5265 *
5266 * @param sli4 SLI context.
5267 * @param q Pointer to the queue object.
5268 * @param entry Destination pointer for the queue entry contents.
5269 *
5270 * @return Returns 0 on success, or non-zero otherwise.
5271 */
5272 int32_t
5273 sli_queue_read(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
5274 {
5275 int32_t rc = 0;
5276 uint8_t *qe = q->dma.virt;
5277 uint32_t *qindex = NULL;
5278
5279 uint8_t clear = (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4)) ? FALSE : TRUE;
5280 if (SLI_QTYPE_MQ == q->type) {
5281 qindex = &q->u.r_idx;
5282 } else {
5283 qindex = &q->index;
5284 }
5285
5286 ocs_lock(&q->lock);
5287
5288 ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD);
5289
5290 qe += *qindex * q->size;
5291
5292 if (!sli_queue_entry_is_valid(q, qe, clear)) {
5293 ocs_unlock(&q->lock);
5294 return -1;
5295 }
5296
5297 if (entry) {
5298 ocs_memcpy(entry, qe, q->size);
5299 #if defined(OCS_INCLUDE_DEBUG)
5300 switch(q->type) {
5301 case SLI_QTYPE_CQ:
5302 ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "cq", entry, q->size);
5303 break;
5304 case SLI_QTYPE_MQ:
5305 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mq Compl", entry, 64);
5306 break;
5307 case SLI_QTYPE_EQ:
5308 ocs_dump32(OCS_DEBUG_ENABLE_EQ_DUMP, sli4->os, "eq Compl", entry, q->size);
5309 break;
5310 default:
5311 break;
5312 }
5313 #endif
5314 }
5315
5316 switch (q->type) {
5317 case SLI_QTYPE_EQ:
5318 case SLI_QTYPE_CQ:
5319 case SLI_QTYPE_MQ:
5320 *qindex = (*qindex + 1) & (q->length - 1);
5321 if (SLI_QTYPE_MQ != q->type) {
5322 q->n_posted++;
5323 /*
5324 * For prism, the phase value will be used to check the validity of eq/cq entries.
5325 * The value toggles after a complete sweep through the queue.
5326 */
5327 if ((SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4)) && (*qindex == 0)) {
5328 q->phase ^= (uint16_t) 0x1;
5329 }
5330 }
5331 break;
5332 default:
5333 /* reads don't update the index */
5334 break;
5335 }
5336
5337 ocs_unlock(&q->lock);
5338
5339 return rc;
5340 }
5341
5342 int32_t
5343 sli_queue_index(sli4_t *sli4, sli4_queue_t *q)
5344 {
5345
5346 if (q) {
5347 return q->index;
5348 } else {
5349 return -1;
5350 }
5351 }
5352
5353 int32_t
5354 sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry)
5355 {
5356 int32_t rc;
5357
5358 ocs_lock(&q->lock);
5359 rc = _sli_queue_poke(sli4, q, index, entry);
5360 ocs_unlock(&q->lock);
5361
5362 return rc;
5363 }
5364
5365 int32_t
5366 _sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry)
5367 {
5368 int32_t rc = 0;
5369 uint8_t *qe = q->dma.virt;
5370
5371 if (index >= q->length) {
5372 return -1;
5373 }
5374
5375 qe += index * q->size;
5376
5377 if (entry) {
5378 ocs_memcpy(qe, entry, q->size);
5379 }
5380
5381 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
5382
5383 return rc;
5384 }
5385
5386 /**
5387 * @ingroup sli
5388 * @brief Allocate SLI Port resources.
5389 *
5390 * @par Description
5391 * Allocate port-related resources, such as VFI, RPI, XRI, and so on.
5392 * Resources are modeled using extents, regardless of whether the underlying
5393 * device implements resource extents. If the device does not implement
5394 * extents, the SLI layer models this as a single (albeit large) extent.
5395 *
5396 * @param sli4 SLI context.
5397 * @param rtype Resource type (for example, RPI or XRI)
5398 * @param rid Allocated resource ID.
5399 * @param index Index into the bitmap.
5400 *
5401 * @return Returns 0 on success, or a non-zero value on failure.
5402 */
5403 int32_t
5404 sli_resource_alloc(sli4_t *sli4, sli4_resource_e rtype, uint32_t *rid, uint32_t *index)
5405 {
5406 int32_t rc = 0;
5407 uint32_t size;
5408 uint32_t extent_idx;
5409 uint32_t item_idx;
5410 int status;
5411
5412 *rid = UINT32_MAX;
5413 *index = UINT32_MAX;
5414
5415 switch (rtype) {
5416 case SLI_RSRC_FCOE_VFI:
5417 case SLI_RSRC_FCOE_VPI:
5418 case SLI_RSRC_FCOE_RPI:
5419 case SLI_RSRC_FCOE_XRI:
5420 status = ocs_bitmap_find(sli4->config.extent[rtype].use_map,
5421 sli4->config.extent[rtype].map_size);
5422 if (status < 0) {
5423 ocs_log_err(sli4->os, "out of resource %d (alloc=%d)\n",
5424 rtype, sli4->config.extent[rtype].n_alloc);
5425 rc = -1;
5426 break;
5427 } else {
5428 *index = status;
5429 }
5430
5431 size = sli4->config.extent[rtype].size;
5432
5433 extent_idx = *index / size;
5434 item_idx = *index % size;
5435
5436 *rid = sli4->config.extent[rtype].base[extent_idx] + item_idx;
5437
5438 sli4->config.extent[rtype].n_alloc++;
5439 break;
5440 default:
5441 rc = -1;
5442 }
5443
5444 return rc;
5445 }
5446
5447 /**
5448 * @ingroup sli
5449 * @brief Free the SLI Port resources.
5450 *
5451 * @par Description
5452 * Free port-related resources, such as VFI, RPI, XRI, and so. See discussion of
5453 * "extent" usage in sli_resource_alloc.
5454 *
5455 * @param sli4 SLI context.
5456 * @param rtype Resource type (for example, RPI or XRI).
5457 * @param rid Allocated resource ID.
5458 *
5459 * @return Returns 0 on success, or a non-zero value on failure.
5460 */
5461 int32_t
5462 sli_resource_free(sli4_t *sli4, sli4_resource_e rtype, uint32_t rid)
5463 {
5464 int32_t rc = -1;
5465 uint32_t x;
5466 uint32_t size, *base;
5467
5468 switch (rtype) {
5469 case SLI_RSRC_FCOE_VFI:
5470 case SLI_RSRC_FCOE_VPI:
5471 case SLI_RSRC_FCOE_RPI:
5472 case SLI_RSRC_FCOE_XRI:
5473 /*
5474 * Figure out which extent contains the resource ID. I.e. find
5475 * the extent such that
5476 * extent->base <= resource ID < extent->base + extent->size
5477 */
5478 base = sli4->config.extent[rtype].base;
5479 size = sli4->config.extent[rtype].size;
5480
5481 /*
5482 * In the case of FW reset, this may be cleared but the force_free path will
5483 * still attempt to free the resource. Prevent a NULL pointer access.
5484 */
5485 if (base != NULL) {
5486 for (x = 0; x < sli4->config.extent[rtype].number; x++) {
5487 if ((rid >= base[x]) && (rid < (base[x] + size))) {
5488 rid -= base[x];
5489 ocs_bitmap_clear(sli4->config.extent[rtype].use_map,
5490 (x * size) + rid);
5491 rc = 0;
5492 break;
5493 }
5494 }
5495 }
5496 break;
5497 default:
5498 ;
5499 }
5500
5501 return rc;
5502 }
5503
5504 int32_t
5505 sli_resource_reset(sli4_t *sli4, sli4_resource_e rtype)
5506 {
5507 int32_t rc = -1;
5508 uint32_t i;
5509
5510 switch (rtype) {
5511 case SLI_RSRC_FCOE_VFI:
5512 case SLI_RSRC_FCOE_VPI:
5513 case SLI_RSRC_FCOE_RPI:
5514 case SLI_RSRC_FCOE_XRI:
5515 for (i = 0; i < sli4->config.extent[rtype].map_size; i++) {
5516 ocs_bitmap_clear(sli4->config.extent[rtype].use_map, i);
5517 }
5518 rc = 0;
5519 break;
5520 default:
5521 ;
5522 }
5523
5524 return rc;
5525 }
5526
5527 /**
5528 * @ingroup sli
5529 * @brief Parse an EQ entry to retrieve the CQ_ID for this event.
5530 *
5531 * @param sli4 SLI context.
5532 * @param buf Pointer to the EQ entry.
5533 * @param cq_id CQ_ID for this entry (only valid on success).
5534 *
5535 * @return
5536 * - 0 if success.
5537 * - < 0 if error.
5538 * - > 0 if firmware detects EQ overflow.
5539 */
5540 int32_t
5541 sli_eq_parse(sli4_t *sli4, uint8_t *buf, uint16_t *cq_id)
5542 {
5543 sli4_eqe_t *eqe = (void *)buf;
5544 int32_t rc = 0;
5545
5546 if (!sli4 || !buf || !cq_id) {
5547 ocs_log_err(NULL, "bad parameters sli4=%p buf=%p cq_id=%p\n",
5548 sli4, buf, cq_id);
5549 return -1;
5550 }
5551
5552 switch (eqe->major_code) {
5553 case SLI4_MAJOR_CODE_STANDARD:
5554 *cq_id = eqe->resource_id;
5555 break;
5556 case SLI4_MAJOR_CODE_SENTINEL:
5557 ocs_log_debug(sli4->os, "sentinel EQE\n");
5558 rc = 1;
5559 break;
5560 default:
5561 ocs_log_test(sli4->os, "Unsupported EQE: major %x minor %x\n",
5562 eqe->major_code, eqe->minor_code);
5563 rc = -1;
5564 }
5565
5566 return rc;
5567 }
5568
5569 /**
5570 * @ingroup sli
5571 * @brief Parse a CQ entry to retrieve the event type and the associated queue.
5572 *
5573 * @param sli4 SLI context.
5574 * @param cq CQ to process.
5575 * @param cqe Pointer to the CQ entry.
5576 * @param etype CQ event type.
5577 * @param q_id Queue ID associated with this completion message
5578 * (that is, MQ_ID, RQ_ID, and so on).
5579 *
5580 * @return
5581 * - 0 if call completed correctly and CQE status is SUCCESS.
5582 * - -1 if call failed (no CQE status).
5583 * - Other value if call completed correctly and return value is a CQE status value.
5584 */
5585 int32_t
5586 sli_cq_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype,
5587 uint16_t *q_id)
5588 {
5589 int32_t rc = 0;
5590
5591 if (!sli4 || !cq || !cqe || !etype) {
5592 ocs_log_err(NULL, "bad parameters sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n",
5593 sli4, cq, cqe, etype, q_id);
5594 return -1;
5595 }
5596
5597 if (cq->u.flag.is_mq) {
5598 sli4_mcqe_t *mcqe = (void *)cqe;
5599
5600 if (mcqe->ae) {
5601 *etype = SLI_QENTRY_ASYNC;
5602 } else {
5603 *etype = SLI_QENTRY_MQ;
5604 rc = sli_cqe_mq(mcqe);
5605 }
5606 *q_id = -1;
5607 } else if (SLI4_PORT_TYPE_FC == sli4->port_type) {
5608 rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id);
5609 } else {
5610 ocs_log_test(sli4->os, "implement CQE parsing type = %#x\n",
5611 sli4->port_type);
5612 rc = -1;
5613 }
5614
5615 return rc;
5616 }
5617
5618 /**
5619 * @ingroup sli
5620 * @brief Cause chip to enter an unrecoverable error state.
5621 *
5622 * @par Description
5623 * Cause chip to enter an unrecoverable error state. This is
5624 * used when detecting unexpected FW behavior so FW can be
5625 * hwted from the driver as soon as error is detected.
5626 *
5627 * @param sli4 SLI context.
5628 * @param dump Generate dump as part of reset.
5629 *
5630 * @return Returns 0 if call completed correctly, or -1 if call failed (unsupported chip).
5631 */
5632 int32_t sli_raise_ue(sli4_t *sli4, uint8_t dump)
5633 {
5634 #define FDD 2
5635 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
5636 switch(sli_get_asic_type(sli4)) {
5637 case SLI4_ASIC_TYPE_BE3: {
5638 sli_reg_write(sli4, SLI4_REG_SW_UE_CSR1, 0xffffffff);
5639 sli_reg_write(sli4, SLI4_REG_SW_UE_CSR2, 0);
5640 break;
5641 }
5642 case SLI4_ASIC_TYPE_SKYHAWK: {
5643 uint32_t value;
5644 value = ocs_config_read32(sli4->os, SLI4_SW_UE_REG);
5645 ocs_config_write32(sli4->os, SLI4_SW_UE_REG, (value | (1U << 24)));
5646 break;
5647 }
5648 default:
5649 ocs_log_test(sli4->os, "invalid asic type %d\n", sli_get_asic_type(sli4));
5650 return -1;
5651 }
5652 } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) ||
5653 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4))) {
5654 if (FDD == dump) {
5655 sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, SLI4_SLIPORT_CONTROL_FDD | SLI4_SLIPORT_CONTROL_IP);
5656 } else {
5657 uint32_t value = SLI4_PHYDEV_CONTROL_FRST;
5658 if (dump == 1) {
5659 value |= SLI4_PHYDEV_CONTROL_DD;
5660 }
5661 sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, value);
5662 }
5663 } else {
5664 ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4));
5665 return -1;
5666 }
5667 return 0;
5668 }
5669
5670 /**
5671 * @ingroup sli
5672 * @brief Read the SLIPORT_STATUS register to check if a dump is present.
5673 *
5674 * @param sli4 SLI context.
5675 *
5676 * @return Returns 1 if the chip is ready, or 0 if the chip is not ready, 2 if fdp is present.
5677 */
5678 int32_t sli_dump_is_ready(sli4_t *sli4)
5679 {
5680 int32_t rc = 0;
5681 uint32_t port_val;
5682 uint32_t bmbx_val;
5683 uint32_t uerr_lo;
5684 uint32_t uerr_hi;
5685 uint32_t uerr_mask_lo;
5686 uint32_t uerr_mask_hi;
5687
5688 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
5689 /* for iftype=0, dump ready when UE is encountered */
5690 uerr_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO);
5691 uerr_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI);
5692 uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO);
5693 uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI);
5694 if ((uerr_lo & ~uerr_mask_lo) || (uerr_hi & ~uerr_mask_hi)) {
5695 rc = 1;
5696 }
5697
5698 } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) ||
5699 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4))) {
5700 /*
5701 * Ensure that the port is ready AND the mailbox is
5702 * ready before signaling that the dump is ready to go.
5703 */
5704 port_val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5705 bmbx_val = sli_reg_read(sli4, SLI4_REG_BMBX);
5706
5707 if ((bmbx_val & SLI4_BMBX_RDY) &&
5708 SLI4_PORT_STATUS_READY(port_val)) {
5709 if(SLI4_PORT_STATUS_DUMP_PRESENT(port_val)) {
5710 rc = 1;
5711 }else if( SLI4_PORT_STATUS_FDP_PRESENT(port_val)) {
5712 rc = 2;
5713 }
5714 }
5715 } else {
5716 ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4));
5717 return -1;
5718 }
5719 return rc;
5720 }
5721
5722 /**
5723 * @ingroup sli
5724 * @brief Read the SLIPORT_STATUS register to check if a dump is present.
5725 *
5726 * @param sli4 SLI context.
5727 *
5728 * @return
5729 * - 0 if call completed correctly and no dump is present.
5730 * - 1 if call completed and dump is present.
5731 * - -1 if call failed (unsupported chip).
5732 */
5733 int32_t sli_dump_is_present(sli4_t *sli4)
5734 {
5735 uint32_t val;
5736 uint32_t ready;
5737
5738 if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(sli4)) &&
5739 (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(sli4))) {
5740 ocs_log_test(sli4->os, "Function only supported for I/F type 2");
5741 return -1;
5742 }
5743
5744 /* If the chip is not ready, then there cannot be a dump */
5745 ready = sli_wait_for_fw_ready(sli4, SLI4_INIT_PORT_DELAY_US);
5746 if (!ready) {
5747 return 0;
5748 }
5749
5750 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5751 if (UINT32_MAX == val) {
5752 ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n");
5753 return -1;
5754 } else {
5755 return ((val & SLI4_PORT_STATUS_DIP) ? 1 : 0);
5756 }
5757 }
5758
5759 /**
5760 * @ingroup sli
5761 * @brief Read the SLIPORT_STATUS register to check if the reset required is set.
5762 *
5763 * @param sli4 SLI context.
5764 *
5765 * @return
5766 * - 0 if call completed correctly and reset is not required.
5767 * - 1 if call completed and reset is required.
5768 * - -1 if call failed.
5769 */
5770 int32_t sli_reset_required(sli4_t *sli4)
5771 {
5772 uint32_t val;
5773
5774 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
5775 ocs_log_test(sli4->os, "reset required N/A for iftype 0\n");
5776 return 0;
5777 }
5778
5779 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5780 if (UINT32_MAX == val) {
5781 ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n");
5782 return -1;
5783 } else {
5784 return ((val & SLI4_PORT_STATUS_RN) ? 1 : 0);
5785 }
5786 }
5787
5788 /**
5789 * @ingroup sli
5790 * @brief Read the SLIPORT_SEMAPHORE and SLIPORT_STATUS registers to check if
5791 * the port status indicates that a FW error has occurred.
5792 *
5793 * @param sli4 SLI context.
5794 *
5795 * @return
5796 * - 0 if call completed correctly and no FW error occurred.
5797 * - > 0 which indicates that a FW error has occurred.
5798 * - -1 if call failed.
5799 */
5800 int32_t sli_fw_error_status(sli4_t *sli4)
5801 {
5802 uint32_t sliport_semaphore;
5803 int32_t rc = 0;
5804
5805 sliport_semaphore = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE);
5806 if (UINT32_MAX == sliport_semaphore) {
5807 ocs_log_err(sli4->os, "error reading SLIPORT_SEMAPHORE register\n");
5808 return 1;
5809 }
5810 rc = (SLI4_PORT_SEMAPHORE_IN_ERR(sliport_semaphore) ? 1 : 0);
5811
5812 if (rc == 0) {
5813 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type ||
5814 (SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type)) {
5815 uint32_t uerr_mask_lo, uerr_mask_hi;
5816 uint32_t uerr_status_lo, uerr_status_hi;
5817
5818 uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO);
5819 uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI);
5820 uerr_status_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO);
5821 uerr_status_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI);
5822 if ((uerr_mask_lo & uerr_status_lo) != 0 ||
5823 (uerr_mask_hi & uerr_status_hi) != 0) {
5824 rc = 1;
5825 }
5826 } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type ||
5827 SLI4_IF_TYPE_LANCER_G7 == sli4->if_type) {
5828 uint32_t sliport_status;
5829
5830 sliport_status = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5831 rc = (SLI4_PORT_STATUS_ERROR(sliport_status) ? 1 : 0);
5832 }
5833 }
5834 return rc;
5835 }
5836
5837 /**
5838 * @ingroup sli
5839 * @brief Determine if the chip FW is in a ready state
5840 *
5841 * @param sli4 SLI context.
5842 *
5843 * @return
5844 * - 0 if call completed correctly and FW is not ready.
5845 * - 1 if call completed correctly and FW is ready.
5846 * - -1 if call failed.
5847 */
5848 int32_t
5849 sli_fw_ready(sli4_t *sli4)
5850 {
5851 uint32_t val;
5852 int32_t rc = -1;
5853
5854 /*
5855 * Is firmware ready for operation? Check needed depends on IF_TYPE
5856 */
5857 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type ||
5858 SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type) {
5859 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE);
5860 rc = ((SLI4_PORT_SEMAPHORE_STATUS_POST_READY ==
5861 SLI4_PORT_SEMAPHORE_PORT(val)) &&
5862 (!SLI4_PORT_SEMAPHORE_IN_ERR(val)) ? 1 : 0);
5863 } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type ||
5864 SLI4_IF_TYPE_LANCER_G7 == sli4->if_type) {
5865 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
5866 rc = (SLI4_PORT_STATUS_READY(val) ? 1 : 0);
5867 }
5868 return rc;
5869 }
5870
5871 /**
5872 * @ingroup sli
5873 * @brief Determine if the link can be configured
5874 *
5875 * @param sli4 SLI context.
5876 *
5877 * @return
5878 * - 0 if link is not configurable.
5879 * - 1 if link is configurable.
5880 */
5881 int32_t sli_link_is_configurable(sli4_t *sli)
5882 {
5883 int32_t rc = 0;
5884 /*
5885 * Link config works on: Skyhawk and Lancer
5886 * Link config does not work on: LancerG6
5887 */
5888
5889 switch (sli_get_asic_type(sli)) {
5890 case SLI4_ASIC_TYPE_SKYHAWK:
5891 case SLI4_ASIC_TYPE_LANCER:
5892 case SLI4_ASIC_TYPE_CORSAIR:
5893 rc = 1;
5894 break;
5895 case SLI4_ASIC_TYPE_LANCERG6:
5896 case SLI4_ASIC_TYPE_LANCERG7:
5897 case SLI4_ASIC_TYPE_BE3:
5898 default:
5899 rc = 0;
5900 break;
5901 }
5902
5903 return rc;
5904
5905 }
5906
5907 /* vim: set noexpandtab textwidth=120: */
5908
5909 /**
5910 * @ingroup sli_fc
5911 * @brief Write an FCOE_WQ_CREATE command.
5912 *
5913 * @param sli4 SLI context.
5914 * @param buf Destination buffer for the command.
5915 * @param size Buffer size, in bytes.
5916 * @param qmem DMA memory for the queue.
5917 * @param cq_id Associated CQ_ID.
5918 * @param ulp The ULP to bind
5919 *
5920 * @note This creates a Version 0 message.
5921 *
5922 * @return Returns the number of bytes written.
5923 */
5924 int32_t
5925 sli_cmd_fcoe_wq_create(sli4_t *sli4, void *buf, size_t size,
5926 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp)
5927 {
5928 sli4_req_fcoe_wq_create_t *wq = NULL;
5929 uint32_t sli_config_off = 0;
5930 uint32_t p;
5931 uintptr_t addr;
5932
5933 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
5934 uint32_t payload_size;
5935
5936 /* Payload length must accommodate both request and response */
5937 payload_size = max(sizeof(sli4_req_fcoe_wq_create_t),
5938 sizeof(sli4_res_common_create_queue_t));
5939
5940 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
5941 NULL);
5942 }
5943 wq = (sli4_req_fcoe_wq_create_t *)((uint8_t *)buf + sli_config_off);
5944
5945 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE;
5946 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
5947 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_t) -
5948 sizeof(sli4_req_hdr_t);
5949 /* valid values for number of pages: 1-4 (sec 4.5.1) */
5950 wq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
5951 if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES)) {
5952 return 0;
5953 }
5954
5955 wq->cq_id = cq_id;
5956
5957 if (sli4->config.dual_ulp_capable) {
5958 wq->dua = 1;
5959 wq->bqu = 1;
5960 wq->ulp = ulp;
5961 }
5962
5963 for (p = 0, addr = qmem->phys;
5964 p < wq->num_pages;
5965 p++, addr += SLI_PAGE_SIZE) {
5966 wq->page_physical_address[p].low = ocs_addr32_lo(addr);
5967 wq->page_physical_address[p].high = ocs_addr32_hi(addr);
5968 }
5969
5970 return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_t));
5971 }
5972
5973 /**
5974 * @ingroup sli_fc
5975 * @brief Write an FCOE_WQ_CREATE_V1 command.
5976 *
5977 * @param sli4 SLI context.
5978 * @param buf Destination buffer for the command.
5979 * @param size Buffer size, in bytes.
5980 * @param qmem DMA memory for the queue.
5981 * @param cq_id Associated CQ_ID.
5982 * @param ignored This parameter carries the ULP for WQ (ignored for V1)
5983
5984 *
5985 * @return Returns the number of bytes written.
5986 */
5987 int32_t
5988 sli_cmd_fcoe_wq_create_v1(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem,
5989 uint16_t cq_id, uint16_t ignored)
5990 {
5991 sli4_req_fcoe_wq_create_v1_t *wq = NULL;
5992 uint32_t sli_config_off = 0;
5993 uint32_t p;
5994 uintptr_t addr;
5995 uint32_t page_size = 0;
5996 uint32_t page_bytes = 0;
5997 uint32_t n_wqe = 0;
5998
5999 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6000 uint32_t payload_size;
6001
6002 /* Payload length must accommodate both request and response */
6003 payload_size = max(sizeof(sli4_req_fcoe_wq_create_v1_t),
6004 sizeof(sli4_res_common_create_queue_t));
6005
6006 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6007 NULL);
6008 }
6009 wq = (sli4_req_fcoe_wq_create_v1_t *)((uint8_t *)buf + sli_config_off);
6010
6011 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE;
6012 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6013 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_v1_t) -
6014 sizeof(sli4_req_hdr_t);
6015 wq->hdr.version = 1;
6016
6017 n_wqe = qmem->size / sli4->config.wqe_size;
6018
6019 /* This heuristic to determine the page size is simplistic
6020 * but could be made more sophisticated
6021 */
6022 switch (qmem->size) {
6023 case 4096:
6024 case 8192:
6025 case 16384:
6026 case 32768:
6027 page_size = 1;
6028 break;
6029 case 65536:
6030 page_size = 2;
6031 break;
6032 case 131072:
6033 page_size = 4;
6034 break;
6035 case 262144:
6036 page_size = 8;
6037 break;
6038 case 524288:
6039 page_size = 10;
6040 break;
6041 default:
6042 return 0;
6043 }
6044 page_bytes = page_size * SLI_PAGE_SIZE;
6045
6046 /* valid values for number of pages: 1-8 */
6047 wq->num_pages = sli_page_count(qmem->size, page_bytes);
6048 if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V1_MAX_PAGES)) {
6049 return 0;
6050 }
6051
6052 wq->cq_id = cq_id;
6053
6054 wq->page_size = page_size;
6055
6056 if (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) {
6057 wq->wqe_size = SLI4_WQE_EXT_SIZE;
6058 } else {
6059 wq->wqe_size = SLI4_WQE_SIZE;
6060 }
6061
6062 wq->wqe_count = n_wqe;
6063
6064 for (p = 0, addr = qmem->phys;
6065 p < wq->num_pages;
6066 p++, addr += page_bytes) {
6067 wq->page_physical_address[p].low = ocs_addr32_lo(addr);
6068 wq->page_physical_address[p].high = ocs_addr32_hi(addr);
6069 }
6070
6071 return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_v1_t));
6072 }
6073
6074 /**
6075 * @ingroup sli_fc
6076 * @brief Write an FCOE_WQ_DESTROY command.
6077 *
6078 * @param sli4 SLI context.
6079 * @param buf Destination buffer for the command.
6080 * @param size Buffer size, in bytes.
6081 * @param wq_id WQ_ID.
6082 *
6083 * @return Returns the number of bytes written.
6084 */
6085 int32_t
6086 sli_cmd_fcoe_wq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t wq_id)
6087 {
6088 sli4_req_fcoe_wq_destroy_t *wq = NULL;
6089 uint32_t sli_config_off = 0;
6090
6091 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6092 uint32_t payload_size;
6093
6094 /* Payload length must accommodate both request and response */
6095 payload_size = max(sizeof(sli4_req_fcoe_wq_destroy_t),
6096 sizeof(sli4_res_hdr_t));
6097
6098 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6099 NULL);
6100 }
6101 wq = (sli4_req_fcoe_wq_destroy_t *)((uint8_t *)buf + sli_config_off);
6102
6103 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_DESTROY;
6104 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6105 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_destroy_t) -
6106 sizeof(sli4_req_hdr_t);
6107
6108 wq->wq_id = wq_id;
6109
6110 return(sli_config_off + sizeof(sli4_req_fcoe_wq_destroy_t));
6111 }
6112
6113 /**
6114 * @ingroup sli_fc
6115 * @brief Write an FCOE_POST_SGL_PAGES command.
6116 *
6117 * @param sli4 SLI context.
6118 * @param buf Destination buffer for the command.
6119 * @param size Buffer size, in bytes.
6120 * @param xri starting XRI
6121 * @param xri_count XRI
6122 * @param page0 First SGL memory page.
6123 * @param page1 Second SGL memory page (optional).
6124 * @param dma DMA buffer for non-embedded mailbox command (options)
6125 *
6126 * if non-embedded mbx command is used, dma buffer must be at least (32 + xri_count*16) in length
6127 *
6128 * @return Returns the number of bytes written.
6129 */
6130 int32_t
6131 sli_cmd_fcoe_post_sgl_pages(sli4_t *sli4, void *buf, size_t size,
6132 uint16_t xri, uint32_t xri_count, ocs_dma_t *page0[], ocs_dma_t *page1[], ocs_dma_t *dma)
6133 {
6134 sli4_req_fcoe_post_sgl_pages_t *post = NULL;
6135 uint32_t sli_config_off = 0;
6136 uint32_t i;
6137
6138 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6139 uint32_t payload_size;
6140
6141 /* Payload length must accommodate both request and response */
6142 payload_size = max(sizeof(sli4_req_fcoe_post_sgl_pages_t),
6143 sizeof(sli4_res_hdr_t));
6144
6145 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6146 dma);
6147 }
6148 if (dma) {
6149 post = dma->virt;
6150 ocs_memset(post, 0, dma->size);
6151 } else {
6152 post = (sli4_req_fcoe_post_sgl_pages_t *)((uint8_t *)buf + sli_config_off);
6153 }
6154
6155 post->hdr.opcode = SLI4_OPC_FCOE_POST_SGL_PAGES;
6156 post->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6157 /* payload size calculation
6158 * 4 = xri_start + xri_count
6159 * xri_count = # of XRI's registered
6160 * sizeof(uint64_t) = physical address size
6161 * 2 = # of physical addresses per page set
6162 */
6163 post->hdr.request_length = 4 + (xri_count * (sizeof(uint64_t) * 2));
6164
6165 post->xri_start = xri;
6166 post->xri_count = xri_count;
6167
6168 for (i = 0; i < xri_count; i++) {
6169 post->page_set[i].page0_low = ocs_addr32_lo(page0[i]->phys);
6170 post->page_set[i].page0_high = ocs_addr32_hi(page0[i]->phys);
6171 }
6172
6173 if (page1) {
6174 for (i = 0; i < xri_count; i++) {
6175 post->page_set[i].page1_low = ocs_addr32_lo(page1[i]->phys);
6176 post->page_set[i].page1_high = ocs_addr32_hi(page1[i]->phys);
6177 }
6178 }
6179
6180 return dma ? sli_config_off : (sli_config_off + sizeof(sli4_req_fcoe_post_sgl_pages_t));
6181 }
6182
6183 /**
6184 * @ingroup sli_fc
6185 * @brief Write an FCOE_RQ_CREATE command.
6186 *
6187 * @param sli4 SLI context.
6188 * @param buf Destination buffer for the command.
6189 * @param size Buffer size, in bytes.
6190 * @param qmem DMA memory for the queue.
6191 * @param cq_id Associated CQ_ID.
6192 * @param ulp This parameter carries the ULP for the RQ
6193 * @param buffer_size Buffer size pointed to by each RQE.
6194 *
6195 * @note This creates a Version 0 message.
6196 *
6197 * @return Returns the number of bytes written.
6198 */
6199 int32_t
6200 sli_cmd_fcoe_rq_create(sli4_t *sli4, void *buf, size_t size,
6201 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, uint16_t buffer_size)
6202 {
6203 sli4_req_fcoe_rq_create_t *rq = NULL;
6204 uint32_t sli_config_off = 0;
6205 uint32_t p;
6206 uintptr_t addr;
6207
6208 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6209 uint32_t payload_size;
6210
6211 /* Payload length must accommodate both request and response */
6212 payload_size = max(sizeof(sli4_req_fcoe_rq_create_t),
6213 sizeof(sli4_res_common_create_queue_t));
6214
6215 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6216 NULL);
6217 }
6218 rq = (sli4_req_fcoe_rq_create_t *)((uint8_t *)buf + sli_config_off);
6219
6220 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
6221 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6222 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_t) -
6223 sizeof(sli4_req_hdr_t);
6224 /* valid values for number of pages: 1-8 (sec 4.5.6) */
6225 rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
6226 if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V0_MAX_PAGES)) {
6227 ocs_log_test(sli4->os, "num_pages %d not valid\n", rq->num_pages);
6228 return 0;
6229 }
6230
6231 /*
6232 * RQE count is the log base 2 of the total number of entries
6233 */
6234 rq->rqe_count = ocs_lg2(qmem->size / SLI4_FCOE_RQE_SIZE);
6235
6236 if ((buffer_size < SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE) ||
6237 (buffer_size > SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE)) {
6238 ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n",
6239 buffer_size,
6240 SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE,
6241 SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE);
6242 return -1;
6243 }
6244 rq->buffer_size = buffer_size;
6245
6246 rq->cq_id = cq_id;
6247
6248 if (sli4->config.dual_ulp_capable) {
6249 rq->dua = 1;
6250 rq->bqu = 1;
6251 rq->ulp = ulp;
6252 }
6253
6254 for (p = 0, addr = qmem->phys;
6255 p < rq->num_pages;
6256 p++, addr += SLI_PAGE_SIZE) {
6257 rq->page_physical_address[p].low = ocs_addr32_lo(addr);
6258 rq->page_physical_address[p].high = ocs_addr32_hi(addr);
6259 }
6260
6261 return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_t));
6262 }
6263
6264 /**
6265 * @ingroup sli_fc
6266 * @brief Write an FCOE_RQ_CREATE_V1 command.
6267 *
6268 * @param sli4 SLI context.
6269 * @param buf Destination buffer for the command.
6270 * @param size Buffer size, in bytes.
6271 * @param qmem DMA memory for the queue.
6272 * @param cq_id Associated CQ_ID.
6273 * @param ulp This parameter carries the ULP for RQ (ignored for V1)
6274 * @param buffer_size Buffer size pointed to by each RQE.
6275 *
6276 * @note This creates a Version 0 message
6277 *
6278 * @return Returns the number of bytes written.
6279 */
6280 int32_t
6281 sli_cmd_fcoe_rq_create_v1(sli4_t *sli4, void *buf, size_t size,
6282 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp,
6283 uint16_t buffer_size)
6284 {
6285 sli4_req_fcoe_rq_create_v1_t *rq = NULL;
6286 uint32_t sli_config_off = 0;
6287 uint32_t p;
6288 uintptr_t addr;
6289
6290 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6291 uint32_t payload_size;
6292
6293 /* Payload length must accommodate both request and response */
6294 payload_size = max(sizeof(sli4_req_fcoe_rq_create_v1_t),
6295 sizeof(sli4_res_common_create_queue_t));
6296
6297 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6298 NULL);
6299 }
6300 rq = (sli4_req_fcoe_rq_create_v1_t *)((uint8_t *)buf + sli_config_off);
6301
6302 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
6303 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6304 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v1_t) -
6305 sizeof(sli4_req_hdr_t);
6306 rq->hdr.version = 1;
6307
6308 /* Disable "no buffer warnings" to avoid Lancer bug */
6309 rq->dnb = TRUE;
6310
6311 /* valid values for number of pages: 1-8 (sec 4.5.6) */
6312 rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
6313 if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES)) {
6314 ocs_log_test(sli4->os, "num_pages %d not valid, max %d\n",
6315 rq->num_pages, SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES);
6316 return 0;
6317 }
6318
6319 /*
6320 * RQE count is the total number of entries (note not lg2(# entries))
6321 */
6322 rq->rqe_count = qmem->size / SLI4_FCOE_RQE_SIZE;
6323
6324 rq->rqe_size = SLI4_FCOE_RQE_SIZE_8;
6325
6326 rq->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096;
6327
6328 if ((buffer_size < sli4->config.rq_min_buf_size) ||
6329 (buffer_size > sli4->config.rq_max_buf_size)) {
6330 ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n",
6331 buffer_size,
6332 sli4->config.rq_min_buf_size,
6333 sli4->config.rq_max_buf_size);
6334 return -1;
6335 }
6336 rq->buffer_size = buffer_size;
6337
6338 rq->cq_id = cq_id;
6339
6340 for (p = 0, addr = qmem->phys;
6341 p < rq->num_pages;
6342 p++, addr += SLI_PAGE_SIZE) {
6343 rq->page_physical_address[p].low = ocs_addr32_lo(addr);
6344 rq->page_physical_address[p].high = ocs_addr32_hi(addr);
6345 }
6346
6347 return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_v1_t));
6348 }
6349
6350 /**
6351 * @ingroup sli_fc
6352 * @brief Write an FCOE_RQ_DESTROY command.
6353 *
6354 * @param sli4 SLI context.
6355 * @param buf Destination buffer for the command.
6356 * @param size Buffer size, in bytes.
6357 * @param rq_id RQ_ID.
6358 *
6359 * @return Returns the number of bytes written.
6360 */
6361 int32_t
6362 sli_cmd_fcoe_rq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t rq_id)
6363 {
6364 sli4_req_fcoe_rq_destroy_t *rq = NULL;
6365 uint32_t sli_config_off = 0;
6366
6367 if (SLI4_PORT_TYPE_FC == sli4->port_type) {
6368 uint32_t payload_size;
6369
6370 /* Payload length must accommodate both request and response */
6371 payload_size = max(sizeof(sli4_req_fcoe_rq_destroy_t),
6372 sizeof(sli4_res_hdr_t));
6373
6374 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
6375 NULL);
6376 }
6377 rq = (sli4_req_fcoe_rq_destroy_t *)((uint8_t *)buf + sli_config_off);
6378
6379 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_DESTROY;
6380 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6381 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_destroy_t) -
6382 sizeof(sli4_req_hdr_t);
6383
6384 rq->rq_id = rq_id;
6385
6386 return(sli_config_off + sizeof(sli4_req_fcoe_rq_destroy_t));
6387 }
6388
6389 /**
6390 * @ingroup sli_fc
6391 * @brief Write an FCOE_READ_FCF_TABLE command.
6392 *
6393 * @note
6394 * The response of this command exceeds the size of an embedded
6395 * command and requires an external buffer with DMA capability to hold the results.
6396 * The caller should allocate the ocs_dma_t structure / memory.
6397 *
6398 * @param sli4 SLI context.
6399 * @param buf Destination buffer for the command.
6400 * @param size Buffer size, in bytes.
6401 * @param dma Pointer to DMA memory structure. This is allocated by the caller.
6402 * @param index FCF table index to retrieve.
6403 *
6404 * @return Returns the number of bytes written.
6405 */
6406 int32_t
6407 sli_cmd_fcoe_read_fcf_table(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t index)
6408 {
6409 sli4_req_fcoe_read_fcf_table_t *read_fcf = NULL;
6410
6411 if (SLI4_PORT_TYPE_FC != sli4->port_type) {
6412 ocs_log_test(sli4->os, "FCOE_READ_FCF_TABLE only supported on FC\n");
6413 return -1;
6414 }
6415
6416 read_fcf = dma->virt;
6417
6418 ocs_memset(read_fcf, 0, sizeof(sli4_req_fcoe_read_fcf_table_t));
6419
6420 read_fcf->hdr.opcode = SLI4_OPC_FCOE_READ_FCF_TABLE;
6421 read_fcf->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6422 read_fcf->hdr.request_length = dma->size -
6423 sizeof(sli4_req_fcoe_read_fcf_table_t);
6424 read_fcf->fcf_index = index;
6425
6426 return sli_cmd_sli_config(sli4, buf, size, 0, dma);
6427 }
6428
6429 /**
6430 * @ingroup sli_fc
6431 * @brief Write an FCOE_POST_HDR_TEMPLATES command.
6432 *
6433 * @param sli4 SLI context.
6434 * @param buf Destination buffer for the command.
6435 * @param size Buffer size, in bytes.
6436 * @param dma Pointer to DMA memory structure. This is allocated by the caller.
6437 * @param rpi Starting RPI index for the header templates.
6438 * @param payload_dma Pointer to DMA memory used to hold larger descriptor counts.
6439 *
6440 * @return Returns the number of bytes written.
6441 */
6442 int32_t
6443 sli_cmd_fcoe_post_hdr_templates(sli4_t *sli4, void *buf, size_t size,
6444 ocs_dma_t *dma, uint16_t rpi, ocs_dma_t *payload_dma)
6445 {
6446 sli4_req_fcoe_post_hdr_templates_t *template = NULL;
6447 uint32_t sli_config_off = 0;
6448 uintptr_t phys = 0;
6449 uint32_t i = 0;
6450 uint32_t page_count;
6451 uint32_t payload_size;
6452
6453 page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
6454
6455 payload_size = sizeof(sli4_req_fcoe_post_hdr_templates_t) +
6456 page_count * sizeof(sli4_physical_page_descriptor_t);
6457
6458 if (page_count > 16) {
6459 /* We can't fit more than 16 descriptors into an embedded mailbox
6460 command, it has to be non-embedded */
6461 if (ocs_dma_alloc(sli4->os, payload_dma, payload_size, 4)) {
6462 ocs_log_err(sli4->os, "mailbox payload memory allocation fail\n");
6463 return 0;
6464 }
6465 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, payload_dma);
6466 template = (sli4_req_fcoe_post_hdr_templates_t *)payload_dma->virt;
6467 } else {
6468 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL);
6469 template = (sli4_req_fcoe_post_hdr_templates_t *)((uint8_t *)buf + sli_config_off);
6470 }
6471
6472 if (UINT16_MAX == rpi) {
6473 rpi = sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0];
6474 }
6475
6476 template->hdr.opcode = SLI4_OPC_FCOE_POST_HDR_TEMPLATES;
6477 template->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6478 template->hdr.request_length = sizeof(sli4_req_fcoe_post_hdr_templates_t) -
6479 sizeof(sli4_req_hdr_t);
6480
6481 template->rpi_offset = rpi;
6482 template->page_count = page_count;
6483 phys = dma->phys;
6484 for (i = 0; i < template->page_count; i++) {
6485 template->page_descriptor[i].low = ocs_addr32_lo(phys);
6486 template->page_descriptor[i].high = ocs_addr32_hi(phys);
6487
6488 phys += SLI_PAGE_SIZE;
6489 }
6490
6491 return(sli_config_off + payload_size);
6492 }
6493
6494 int32_t
6495 sli_cmd_fcoe_rediscover_fcf(sli4_t *sli4, void *buf, size_t size, uint16_t index)
6496 {
6497 sli4_req_fcoe_rediscover_fcf_t *redisc = NULL;
6498 uint32_t sli_config_off = 0;
6499
6500 sli_config_off = sli_cmd_sli_config(sli4, buf, size,
6501 sizeof(sli4_req_fcoe_rediscover_fcf_t),
6502 NULL);
6503
6504 redisc = (sli4_req_fcoe_rediscover_fcf_t *)((uint8_t *)buf + sli_config_off);
6505
6506 redisc->hdr.opcode = SLI4_OPC_FCOE_REDISCOVER_FCF;
6507 redisc->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
6508 redisc->hdr.request_length = sizeof(sli4_req_fcoe_rediscover_fcf_t) -
6509 sizeof(sli4_req_hdr_t);
6510
6511 if (index == UINT16_MAX) {
6512 redisc->fcf_count = 0;
6513 } else {
6514 redisc->fcf_count = 1;
6515 redisc->fcf_index[0] = index;
6516 }
6517
6518 return(sli_config_off + sizeof(sli4_req_fcoe_rediscover_fcf_t));
6519 }
6520
6521 /**
6522 * @ingroup sli_fc
6523 * @brief Write an ABORT_WQE work queue entry.
6524 *
6525 * @param sli4 SLI context.
6526 * @param buf Destination buffer for the WQE.
6527 * @param size Buffer size, in bytes.
6528 * @param type Abort type, such as XRI, abort tag, and request tag.
6529 * @param send_abts Boolean to cause the hardware to automatically generate an ABTS.
6530 * @param ids ID of IOs to abort.
6531 * @param mask Mask applied to the ID values to abort.
6532 * @param tag Tag value associated with this abort.
6533 * @param cq_id The id of the completion queue where the WQE response is sent.
6534 * @param dnrx When set to 1, this field indicates that the SLI Port must not return the associated XRI to the SLI
6535 * Port's optimized write XRI pool.
6536 *
6537 * @return Returns 0 on success, or a non-zero value on failure.
6538 */
6539 int32_t
6540 sli_abort_wqe(sli4_t *sli4, void *buf, size_t size, sli4_abort_type_e type, uint32_t send_abts,
6541 uint32_t ids, uint32_t mask, uint16_t tag, uint16_t cq_id)
6542 {
6543 sli4_abort_wqe_t *abort = buf;
6544
6545 ocs_memset(buf, 0, size);
6546
6547 switch (type) {
6548 case SLI_ABORT_XRI:
6549 abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
6550 if (mask) {
6551 ocs_log_warn(sli4->os, "warning non-zero mask %#x when aborting XRI %#x\n", mask, ids);
6552 mask = 0;
6553 }
6554 break;
6555 case SLI_ABORT_ABORT_ID:
6556 abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG;
6557 break;
6558 case SLI_ABORT_REQUEST_ID:
6559 abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG;
6560 break;
6561 default:
6562 ocs_log_test(sli4->os, "unsupported type %#x\n", type);
6563 return -1;
6564 }
6565
6566 abort->ia = send_abts ? 0 : 1;
6567
6568 /* Suppress ABTS retries */
6569 abort->ir = 1;
6570
6571 abort->t_mask = mask;
6572 abort->t_tag = ids;
6573 abort->command = SLI4_WQE_ABORT;
6574 abort->request_tag = tag;
6575 abort->qosd = TRUE;
6576 abort->cq_id = cq_id;
6577 abort->cmd_type = SLI4_CMD_ABORT_WQE;
6578
6579 return 0;
6580 }
6581
6582 /**
6583 * @ingroup sli_fc
6584 * @brief Write an ELS_REQUEST64_WQE work queue entry.
6585 *
6586 * @param sli4 SLI context.
6587 * @param buf Destination buffer for the WQE.
6588 * @param size Buffer size, in bytes.
6589 * @param sgl DMA memory for the ELS request.
6590 * @param req_type ELS request type.
6591 * @param req_len Length of ELS request in bytes.
6592 * @param max_rsp_len Max length of ELS response in bytes.
6593 * @param timeout Time, in seconds, before an IO times out. Zero means 2 * R_A_TOV.
6594 * @param xri XRI for this exchange.
6595 * @param tag IO tag value.
6596 * @param cq_id The id of the completion queue where the WQE response is sent.
6597 * @param rnode Destination of ELS request (that is, the remote node).
6598 *
6599 * @return Returns 0 on success, or a non-zero value on failure.
6600 */
6601 int32_t
6602 sli_els_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint8_t req_type,
6603 uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout,
6604 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode)
6605 {
6606 sli4_els_request64_wqe_t *els = buf;
6607 sli4_sge_t *sge = sgl->virt;
6608 uint8_t is_fabric = FALSE;
6609
6610 ocs_memset(buf, 0, size);
6611
6612 if (sli4->config.sgl_pre_registered) {
6613 els->xbl = FALSE;
6614
6615 els->dbde = TRUE;
6616 els->els_request_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
6617
6618 els->els_request_payload.buffer_length = req_len;
6619 els->els_request_payload.u.data.buffer_address_low = sge[0].buffer_address_low;
6620 els->els_request_payload.u.data.buffer_address_high = sge[0].buffer_address_high;
6621 } else {
6622 els->xbl = TRUE;
6623
6624 els->els_request_payload.bde_type = SLI4_BDE_TYPE_BLP;
6625
6626 els->els_request_payload.buffer_length = 2 * sizeof(sli4_sge_t);
6627 els->els_request_payload.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
6628 els->els_request_payload.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
6629 }
6630
6631 els->els_request_payload_length = req_len;
6632 els->max_response_payload_length = max_rsp_len;
6633
6634 els->xri_tag = xri;
6635 els->timer = timeout;
6636 els->class = SLI4_ELS_REQUEST64_CLASS_3;
6637
6638 els->command = SLI4_WQE_ELS_REQUEST64;
6639
6640 els->request_tag = tag;
6641
6642 if (rnode->node_group) {
6643 els->hlm = TRUE;
6644 els->remote_id = rnode->fc_id & 0x00ffffff;
6645 }
6646
6647 els->iod = SLI4_ELS_REQUEST64_DIR_READ;
6648
6649 els->qosd = TRUE;
6650
6651 /* figure out the ELS_ID value from the request buffer */
6652
6653 switch (req_type) {
6654 case FC_ELS_CMD_LOGO:
6655 els->els_id = SLI4_ELS_REQUEST64_LOGO;
6656 if (rnode->attached) {
6657 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
6658 els->context_tag = rnode->indicator;
6659 } else {
6660 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6661 els->context_tag = rnode->sport->indicator;
6662 }
6663 if (FC_ADDR_FABRIC == rnode->fc_id) {
6664 is_fabric = TRUE;
6665 }
6666 break;
6667 case FC_ELS_CMD_FDISC:
6668 if (FC_ADDR_FABRIC == rnode->fc_id) {
6669 is_fabric = TRUE;
6670 }
6671 if (0 == rnode->sport->fc_id) {
6672 els->els_id = SLI4_ELS_REQUEST64_FDISC;
6673 is_fabric = TRUE;
6674 } else {
6675 els->els_id = SLI4_ELS_REQUEST64_OTHER;
6676 }
6677 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6678 els->context_tag = rnode->sport->indicator;
6679 els->sp = TRUE;
6680 break;
6681 case FC_ELS_CMD_FLOGI:
6682 els->els_id = SLI4_ELS_REQUEST64_FLOGIN;
6683 is_fabric = TRUE;
6684 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
6685 if (!rnode->sport->domain) {
6686 ocs_log_test(sli4->os, "invalid domain handle\n");
6687 return -1;
6688 }
6689 /*
6690 * IF_TYPE 0 skips INIT_VFI/INIT_VPI and therefore must use the
6691 * FCFI here
6692 */
6693 els->ct = SLI4_ELS_REQUEST64_CONTEXT_FCFI;
6694 els->context_tag = rnode->sport->domain->fcf_indicator;
6695 els->sp = TRUE;
6696 } else {
6697 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6698 els->context_tag = rnode->sport->indicator;
6699
6700 /*
6701 * Set SP here ... we haven't done a REG_VPI yet
6702 * TODO: need to maybe not set this when we have
6703 * completed VFI/VPI registrations ...
6704 *
6705 * Use the FC_ID of the SPORT if it has been allocated, otherwise
6706 * use an S_ID of zero.
6707 */
6708 els->sp = TRUE;
6709 if (rnode->sport->fc_id != UINT32_MAX) {
6710 els->sid = rnode->sport->fc_id;
6711 }
6712 }
6713 break;
6714 case FC_ELS_CMD_PLOGI:
6715 els->els_id = SLI4_ELS_REQUEST64_PLOGI;
6716 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6717 els->context_tag = rnode->sport->indicator;
6718 break;
6719 case FC_ELS_CMD_SCR:
6720 els->els_id = SLI4_ELS_REQUEST64_OTHER;
6721 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6722 els->context_tag = rnode->sport->indicator;
6723 break;
6724 default:
6725 els->els_id = SLI4_ELS_REQUEST64_OTHER;
6726 if (rnode->attached) {
6727 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
6728 els->context_tag = rnode->indicator;
6729 } else {
6730 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
6731 els->context_tag = rnode->sport->indicator;
6732 }
6733 break;
6734 }
6735
6736 if (is_fabric) {
6737 els->cmd_type = SLI4_ELS_REQUEST64_CMD_FABRIC;
6738 } else {
6739 els->cmd_type = SLI4_ELS_REQUEST64_CMD_NON_FABRIC;
6740 }
6741
6742 els->cq_id = cq_id;
6743
6744 if (SLI4_ELS_REQUEST64_CONTEXT_RPI != els->ct) {
6745 els->remote_id = rnode->fc_id;
6746 }
6747 if (SLI4_ELS_REQUEST64_CONTEXT_VPI == els->ct) {
6748 els->temporary_rpi = rnode->indicator;
6749 }
6750
6751 return 0;
6752 }
6753
6754 /**
6755 * @ingroup sli_fc
6756 * @brief Write an FCP_ICMND64_WQE work queue entry.
6757 *
6758 * @param sli4 SLI context.
6759 * @param buf Destination buffer for the WQE.
6760 * @param size Buffer size, in bytes.
6761 * @param sgl DMA memory for the scatter gather list.
6762 * @param xri XRI for this exchange.
6763 * @param tag IO tag value.
6764 * @param cq_id The id of the completion queue where the WQE response is sent.
6765 * @param rpi remote node indicator (RPI)
6766 * @param rnode Destination request (that is, the remote node).
6767 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
6768 *
6769 * @return Returns 0 on success, or a non-zero value on failure.
6770 */
6771 int32_t
6772 sli_fcp_icmnd64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl,
6773 uint16_t xri, uint16_t tag, uint16_t cq_id,
6774 uint32_t rpi, ocs_remote_node_t *rnode, uint8_t timeout)
6775 {
6776 sli4_fcp_icmnd64_wqe_t *icmnd = buf;
6777 sli4_sge_t *sge = NULL;
6778
6779 ocs_memset(buf, 0, size);
6780
6781 if (!sgl || !sgl->virt) {
6782 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
6783 sgl, sgl ? sgl->virt : NULL);
6784 return -1;
6785 }
6786 sge = sgl->virt;
6787
6788 if (sli4->config.sgl_pre_registered) {
6789 icmnd->xbl = FALSE;
6790
6791 icmnd->dbde = TRUE;
6792 icmnd->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
6793
6794 icmnd->bde.buffer_length = sge[0].buffer_length;
6795 icmnd->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
6796 icmnd->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
6797 } else {
6798 icmnd->xbl = TRUE;
6799
6800 icmnd->bde.bde_type = SLI4_BDE_TYPE_BLP;
6801
6802 icmnd->bde.buffer_length = sgl->size;
6803 icmnd->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
6804 icmnd->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
6805 }
6806
6807 icmnd->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
6808 icmnd->xri_tag = xri;
6809 icmnd->context_tag = rpi;
6810 icmnd->timer = timeout;
6811
6812 icmnd->pu = 2; /* WQE word 4 contains read transfer length */
6813 icmnd->class = SLI4_ELS_REQUEST64_CLASS_3;
6814 icmnd->command = SLI4_WQE_FCP_ICMND64;
6815 icmnd->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
6816
6817 icmnd->abort_tag = xri;
6818
6819 icmnd->request_tag = tag;
6820 icmnd->len_loc = 3;
6821 if (rnode->node_group) {
6822 icmnd->hlm = TRUE;
6823 icmnd->remote_n_port_id = rnode->fc_id & 0x00ffffff;
6824 }
6825 if (((ocs_node_t *)rnode->node)->fcp2device) {
6826 icmnd->erp = TRUE;
6827 }
6828 icmnd->cmd_type = SLI4_CMD_FCP_ICMND64_WQE;
6829 icmnd->cq_id = cq_id;
6830
6831 return 0;
6832 }
6833
6834 /**
6835 * @ingroup sli_fc
6836 * @brief Write an FCP_IREAD64_WQE work queue entry.
6837 *
6838 * @param sli4 SLI context.
6839 * @param buf Destination buffer for the WQE.
6840 * @param size Buffer size, in bytes.
6841 * @param sgl DMA memory for the scatter gather list.
6842 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
6843 * @param xfer_len Data transfer length.
6844 * @param xri XRI for this exchange.
6845 * @param tag IO tag value.
6846 * @param cq_id The id of the completion queue where the WQE response is sent.
6847 * @param rpi remote node indicator (RPI)
6848 * @param rnode Destination request (i.e. remote node).
6849 * @param dif T10 DIF operation, or 0 to disable.
6850 * @param bs T10 DIF block size, or 0 if DIF is disabled.
6851 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
6852 *
6853 * @return Returns 0 on success, or a non-zero value on failure.
6854 */
6855 int32_t
6856 sli_fcp_iread64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
6857 uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
6858 uint32_t rpi, ocs_remote_node_t *rnode,
6859 uint8_t dif, uint8_t bs, uint8_t timeout)
6860 {
6861 sli4_fcp_iread64_wqe_t *iread = buf;
6862 sli4_sge_t *sge = NULL;
6863
6864 ocs_memset(buf, 0, size);
6865
6866 if (!sgl || !sgl->virt) {
6867 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
6868 sgl, sgl ? sgl->virt : NULL);
6869 return -1;
6870 }
6871 sge = sgl->virt;
6872
6873 if (sli4->config.sgl_pre_registered) {
6874 iread->xbl = FALSE;
6875
6876 iread->dbde = TRUE;
6877 iread->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
6878
6879 iread->bde.buffer_length = sge[0].buffer_length;
6880 iread->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
6881 iread->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
6882 } else {
6883 iread->xbl = TRUE;
6884
6885 iread->bde.bde_type = SLI4_BDE_TYPE_BLP;
6886
6887 iread->bde.buffer_length = sgl->size;
6888 iread->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
6889 iread->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
6890
6891 /* fill out fcp_cmnd buffer len and change resp buffer to be of type
6892 * "skip" (note: response will still be written to sge[1] if necessary) */
6893 iread->fcp_cmd_buffer_length = sge[0].buffer_length;
6894 sge[1].sge_type = SLI4_SGE_TYPE_SKIP;
6895 }
6896
6897 iread->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
6898 iread->total_transfer_length = xfer_len;
6899
6900 iread->xri_tag = xri;
6901 iread->context_tag = rpi;
6902
6903 iread->timer = timeout;
6904
6905 iread->pu = 2; /* WQE word 4 contains read transfer length */
6906 iread->class = SLI4_ELS_REQUEST64_CLASS_3;
6907 iread->command = SLI4_WQE_FCP_IREAD64;
6908 iread->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
6909 iread->dif = dif;
6910 iread->bs = bs;
6911
6912 iread->abort_tag = xri;
6913
6914 iread->request_tag = tag;
6915 iread->len_loc = 3;
6916 if (rnode->node_group) {
6917 iread->hlm = TRUE;
6918 iread->remote_n_port_id = rnode->fc_id & 0x00ffffff;
6919 }
6920 if (((ocs_node_t *)rnode->node)->fcp2device) {
6921 iread->erp = TRUE;
6922 }
6923 iread->iod = 1;
6924 iread->cmd_type = SLI4_CMD_FCP_IREAD64_WQE;
6925 iread->cq_id = cq_id;
6926
6927 if (sli4->config.perf_hint) {
6928 iread->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
6929 iread->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
6930 iread->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
6931 iread->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
6932 }
6933
6934 return 0;
6935 }
6936
6937 /**
6938 * @ingroup sli_fc
6939 * @brief Write an FCP_IWRITE64_WQE work queue entry.
6940 *
6941 * @param sli4 SLI context.
6942 * @param buf Destination buffer for the WQE.
6943 * @param size Buffer size, in bytes.
6944 * @param sgl DMA memory for the scatter gather list.
6945 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
6946 * @param xfer_len Data transfer length.
6947 * @param first_burst The number of first burst bytes
6948 * @param xri XRI for this exchange.
6949 * @param tag IO tag value.
6950 * @param cq_id The id of the completion queue where the WQE response is sent.
6951 * @param rpi remote node indicator (RPI)
6952 * @param rnode Destination request (i.e. remote node)
6953 * @param dif T10 DIF operation, or 0 to disable
6954 * @param bs T10 DIF block size, or 0 if DIF is disabled
6955 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
6956 *
6957 * @return Returns 0 on success, or a non-zero value on failure.
6958 */
6959 int32_t
6960 sli_fcp_iwrite64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
6961 uint32_t xfer_len, uint32_t first_burst, uint16_t xri, uint16_t tag, uint16_t cq_id,
6962 uint32_t rpi, ocs_remote_node_t *rnode,
6963 uint8_t dif, uint8_t bs, uint8_t timeout)
6964 {
6965 sli4_fcp_iwrite64_wqe_t *iwrite = buf;
6966 sli4_sge_t *sge = NULL;
6967
6968 ocs_memset(buf, 0, size);
6969
6970 if (!sgl || !sgl->virt) {
6971 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
6972 sgl, sgl ? sgl->virt : NULL);
6973 return -1;
6974 }
6975 sge = sgl->virt;
6976
6977 if (sli4->config.sgl_pre_registered) {
6978 iwrite->xbl = FALSE;
6979
6980 iwrite->dbde = TRUE;
6981 iwrite->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
6982
6983 iwrite->bde.buffer_length = sge[0].buffer_length;
6984 iwrite->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
6985 iwrite->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
6986 } else {
6987 iwrite->xbl = TRUE;
6988
6989 iwrite->bde.bde_type = SLI4_BDE_TYPE_BLP;
6990
6991 iwrite->bde.buffer_length = sgl->size;
6992 iwrite->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
6993 iwrite->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
6994
6995 /* fill out fcp_cmnd buffer len and change resp buffer to be of type
6996 * "skip" (note: response will still be written to sge[1] if necessary) */
6997 iwrite->fcp_cmd_buffer_length = sge[0].buffer_length;
6998 sge[1].sge_type = SLI4_SGE_TYPE_SKIP;
6999 }
7000
7001 iwrite->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
7002 iwrite->total_transfer_length = xfer_len;
7003 iwrite->initial_transfer_length = MIN(xfer_len, first_burst);
7004
7005 iwrite->xri_tag = xri;
7006 iwrite->context_tag = rpi;
7007
7008 iwrite->timer = timeout;
7009
7010 iwrite->pu = 2; /* WQE word 4 contains read transfer length */
7011 iwrite->class = SLI4_ELS_REQUEST64_CLASS_3;
7012 iwrite->command = SLI4_WQE_FCP_IWRITE64;
7013 iwrite->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7014 iwrite->dif = dif;
7015 iwrite->bs = bs;
7016
7017 iwrite->abort_tag = xri;
7018
7019 iwrite->request_tag = tag;
7020 iwrite->len_loc = 3;
7021 if (rnode->node_group) {
7022 iwrite->hlm = TRUE;
7023 iwrite->remote_n_port_id = rnode->fc_id & 0x00ffffff;
7024 }
7025 if (((ocs_node_t *)rnode->node)->fcp2device) {
7026 iwrite->erp = TRUE;
7027 }
7028 iwrite->cmd_type = SLI4_CMD_FCP_IWRITE64_WQE;
7029 iwrite->cq_id = cq_id;
7030
7031 if (sli4->config.perf_hint) {
7032 iwrite->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7033 iwrite->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
7034 iwrite->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
7035 iwrite->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
7036 }
7037
7038 return 0;
7039 }
7040
7041 /**
7042 * @ingroup sli_fc
7043 * @brief Write an FCP_TRECEIVE64_WQE work queue entry.
7044 *
7045 * @param sli4 SLI context.
7046 * @param buf Destination buffer for the WQE.
7047 * @param size Buffer size, in bytes.
7048 * @param sgl DMA memory for the Scatter-Gather List.
7049 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
7050 * @param relative_off Relative offset of the IO (if any).
7051 * @param xfer_len Data transfer length.
7052 * @param xri XRI for this exchange.
7053 * @param tag IO tag value.
7054 * @param xid OX_ID for the exchange.
7055 * @param cq_id The id of the completion queue where the WQE response is sent.
7056 * @param rpi remote node indicator (RPI)
7057 * @param rnode Destination request (i.e. remote node).
7058 * @param flags Optional attributes, including:
7059 * - ACTIVE - IO is already active.
7060 * - AUTO RSP - Automatically generate a good FCP_RSP.
7061 * @param dif T10 DIF operation, or 0 to disable.
7062 * @param bs T10 DIF block size, or 0 if DIF is disabled.
7063 * @param csctl value of csctl field.
7064 * @param app_id value for VM application header.
7065 *
7066 * @return Returns 0 on success, or a non-zero value on failure.
7067 */
7068 int32_t
7069 sli_fcp_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
7070 uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
7071 uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs,
7072 uint8_t csctl, uint32_t app_id)
7073 {
7074 sli4_fcp_treceive64_wqe_t *trecv = buf;
7075 sli4_fcp_128byte_wqe_t *trecv_128 = buf;
7076 sli4_sge_t *sge = NULL;
7077
7078 ocs_memset(buf, 0, size);
7079
7080 if (!sgl || !sgl->virt) {
7081 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
7082 sgl, sgl ? sgl->virt : NULL);
7083 return -1;
7084 }
7085 sge = sgl->virt;
7086
7087 if (sli4->config.sgl_pre_registered) {
7088 trecv->xbl = FALSE;
7089
7090 trecv->dbde = TRUE;
7091 trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7092
7093 trecv->bde.buffer_length = sge[0].buffer_length;
7094 trecv->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
7095 trecv->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
7096
7097 trecv->payload_offset_length = sge[0].buffer_length;
7098 } else {
7099 trecv->xbl = TRUE;
7100
7101 /* if data is a single physical address, use a BDE */
7102 if (!dif && (xfer_len <= sge[2].buffer_length)) {
7103 trecv->dbde = TRUE;
7104 trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7105
7106 trecv->bde.buffer_length = sge[2].buffer_length;
7107 trecv->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
7108 trecv->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
7109 } else {
7110 trecv->bde.bde_type = SLI4_BDE_TYPE_BLP;
7111 trecv->bde.buffer_length = sgl->size;
7112 trecv->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
7113 trecv->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
7114 }
7115 }
7116
7117 trecv->relative_offset = relative_off;
7118
7119 if (flags & SLI4_IO_CONTINUATION) {
7120 trecv->xc = TRUE;
7121 }
7122 trecv->xri_tag = xri;
7123
7124 trecv->context_tag = rpi;
7125
7126 trecv->pu = TRUE; /* WQE uses relative offset */
7127
7128 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
7129 trecv->ar = TRUE;
7130 }
7131
7132 trecv->command = SLI4_WQE_FCP_TRECEIVE64;
7133 trecv->class = SLI4_ELS_REQUEST64_CLASS_3;
7134 trecv->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7135 trecv->dif = dif;
7136 trecv->bs = bs;
7137
7138 trecv->remote_xid = xid;
7139
7140 trecv->request_tag = tag;
7141
7142 trecv->iod = 1;
7143
7144 trecv->len_loc = 0x2;
7145
7146 if (rnode->node_group) {
7147 trecv->hlm = TRUE;
7148 trecv->dword5.dword = rnode->fc_id & 0x00ffffff;
7149 }
7150
7151 trecv->cmd_type = SLI4_CMD_FCP_TRECEIVE64_WQE;
7152
7153 trecv->cq_id = cq_id;
7154
7155 trecv->fcp_data_receive_length = xfer_len;
7156
7157 if (sli4->config.perf_hint) {
7158 trecv->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7159 trecv->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
7160 trecv->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
7161 trecv->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
7162 }
7163
7164 /* The upper 7 bits of csctl is the priority */
7165 if (csctl & SLI4_MASK_CCP) {
7166 trecv->ccpe = 1;
7167 trecv->ccp = (csctl & SLI4_MASK_CCP);
7168 }
7169
7170 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trecv->eat) {
7171 trecv->app_id_valid = 1;
7172 trecv->wqes = 1;
7173 trecv_128->dw[31] = app_id;
7174 }
7175 return 0;
7176 }
7177
7178 /**
7179 * @ingroup sli_fc
7180 * @brief Write an FCP_CONT_TRECEIVE64_WQE work queue entry.
7181 *
7182 * @param sli4 SLI context.
7183 * @param buf Destination buffer for the WQE.
7184 * @param size Buffer size, in bytes.
7185 * @param sgl DMA memory for the Scatter-Gather List.
7186 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
7187 * @param relative_off Relative offset of the IO (if any).
7188 * @param xfer_len Data transfer length.
7189 * @param xri XRI for this exchange.
7190 * @param sec_xri Secondary XRI for this exchange. (BZ 161832 workaround)
7191 * @param tag IO tag value.
7192 * @param xid OX_ID for the exchange.
7193 * @param cq_id The id of the completion queue where the WQE response is sent.
7194 * @param rpi remote node indicator (RPI)
7195 * @param rnode Destination request (i.e. remote node).
7196 * @param flags Optional attributes, including:
7197 * - ACTIVE - IO is already active.
7198 * - AUTO RSP - Automatically generate a good FCP_RSP.
7199 * @param dif T10 DIF operation, or 0 to disable.
7200 * @param bs T10 DIF block size, or 0 if DIF is disabled.
7201 * @param csctl value of csctl field.
7202 * @param app_id value for VM application header.
7203 *
7204 * @return Returns 0 on success, or a non-zero value on failure.
7205 */
7206 int32_t
7207 sli_fcp_cont_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
7208 uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t sec_xri, uint16_t tag,
7209 uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags,
7210 uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id)
7211 {
7212 int32_t rc;
7213
7214 rc = sli_fcp_treceive64_wqe(sli4, buf, size, sgl, first_data_sge, relative_off, xfer_len, xri, tag,
7215 cq_id, xid, rpi, rnode, flags, dif, bs, csctl, app_id);
7216 if (rc == 0) {
7217 sli4_fcp_treceive64_wqe_t *trecv = buf;
7218
7219 trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64;
7220 trecv->dword5.sec_xri_tag = sec_xri;
7221 }
7222 return rc;
7223 }
7224
7225 /**
7226 * @ingroup sli_fc
7227 * @brief Write an FCP_TRSP64_WQE work queue entry.
7228 *
7229 * @param sli4 SLI context.
7230 * @param buf Destination buffer for the WQE.
7231 * @param size Buffer size, in bytes.
7232 * @param sgl DMA memory for the Scatter-Gather List.
7233 * @param rsp_len Response data length.
7234 * @param xri XRI for this exchange.
7235 * @param tag IO tag value.
7236 * @param cq_id The id of the completion queue where the WQE response is sent.
7237 * @param xid OX_ID for the exchange.
7238 * @param rpi remote node indicator (RPI)
7239 * @param rnode Destination request (i.e. remote node).
7240 * @param flags Optional attributes, including:
7241 * - ACTIVE - IO is already active
7242 * - AUTO RSP - Automatically generate a good FCP_RSP.
7243 * @param csctl value of csctl field.
7244 * @param port_owned 0/1 to indicate if the XRI is port owned (used to set XBL=0)
7245 * @param app_id value for VM application header.
7246 *
7247 * @return Returns 0 on success, or a non-zero value on failure.
7248 */
7249 int32_t
7250 sli_fcp_trsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t rsp_len,
7251 uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode,
7252 uint32_t flags, uint8_t csctl, uint8_t port_owned, uint32_t app_id)
7253 {
7254 sli4_fcp_trsp64_wqe_t *trsp = buf;
7255 sli4_fcp_128byte_wqe_t *trsp_128 = buf;
7256
7257 ocs_memset(buf, 0, size);
7258
7259 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
7260 trsp->ag = TRUE;
7261 /*
7262 * The SLI-4 documentation states that the BDE is ignored when
7263 * using auto-good response, but, at least for IF_TYPE 0 devices,
7264 * this does not appear to be true.
7265 */
7266 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
7267 trsp->bde.buffer_length = 12; /* byte size of RSP */
7268 }
7269 } else {
7270 sli4_sge_t *sge = sgl->virt;
7271
7272 if (sli4->config.sgl_pre_registered || port_owned) {
7273 trsp->dbde = TRUE;
7274 } else {
7275 trsp->xbl = TRUE;
7276 }
7277
7278 trsp->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7279 trsp->bde.buffer_length = sge[0].buffer_length;
7280 trsp->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
7281 trsp->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
7282
7283 trsp->fcp_response_length = rsp_len;
7284 }
7285
7286 if (flags & SLI4_IO_CONTINUATION) {
7287 trsp->xc = TRUE;
7288 }
7289
7290 if (rnode->node_group) {
7291 trsp->hlm = TRUE;
7292 trsp->dword5 = rnode->fc_id & 0x00ffffff;
7293 }
7294
7295 trsp->xri_tag = xri;
7296 trsp->rpi = rpi;
7297
7298 trsp->command = SLI4_WQE_FCP_TRSP64;
7299 trsp->class = SLI4_ELS_REQUEST64_CLASS_3;
7300
7301 trsp->remote_xid = xid;
7302 trsp->request_tag = tag;
7303 trsp->dnrx = ((flags & SLI4_IO_DNRX) == 0 ? 0 : 1);
7304 trsp->len_loc = 0x1;
7305 trsp->cq_id = cq_id;
7306 trsp->cmd_type = SLI4_CMD_FCP_TRSP64_WQE;
7307
7308 /* The upper 7 bits of csctl is the priority */
7309 if (csctl & SLI4_MASK_CCP) {
7310 trsp->ccpe = 1;
7311 trsp->ccp = (csctl & SLI4_MASK_CCP);
7312 }
7313
7314 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trsp->eat) {
7315 trsp->app_id_valid = 1;
7316 trsp->wqes = 1;
7317 trsp_128->dw[31] = app_id;
7318 }
7319 return 0;
7320 }
7321
7322 /**
7323 * @ingroup sli_fc
7324 * @brief Write an FCP_TSEND64_WQE work queue entry.
7325 *
7326 * @param sli4 SLI context.
7327 * @param buf Destination buffer for the WQE.
7328 * @param size Buffer size, in bytes.
7329 * @param sgl DMA memory for the scatter gather list.
7330 * @param first_data_sge Index of first data sge (used if perf hints are enabled)
7331 * @param relative_off Relative offset of the IO (if any).
7332 * @param xfer_len Data transfer length.
7333 * @param xri XRI for this exchange.
7334 * @param tag IO tag value.
7335 * @param cq_id The id of the completion queue where the WQE response is sent.
7336 * @param xid OX_ID for the exchange.
7337 * @param rpi remote node indicator (RPI)
7338 * @param rnode Destination request (i.e. remote node).
7339 * @param flags Optional attributes, including:
7340 * - ACTIVE - IO is already active.
7341 * - AUTO RSP - Automatically generate a good FCP_RSP.
7342 * @param dif T10 DIF operation, or 0 to disable.
7343 * @param bs T10 DIF block size, or 0 if DIF is disabled.
7344 * @param csctl value of csctl field.
7345 * @param app_id value for VM application header.
7346 *
7347 * @return Returns 0 on success, or a non-zero value on failure.
7348 */
7349 int32_t
7350 sli_fcp_tsend64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
7351 uint32_t relative_off, uint32_t xfer_len,
7352 uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode,
7353 uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id)
7354 {
7355 sli4_fcp_tsend64_wqe_t *tsend = buf;
7356 sli4_fcp_128byte_wqe_t *tsend_128 = buf;
7357 sli4_sge_t *sge = NULL;
7358
7359 ocs_memset(buf, 0, size);
7360
7361 if (!sgl || !sgl->virt) {
7362 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
7363 sgl, sgl ? sgl->virt : NULL);
7364 return -1;
7365 }
7366 sge = sgl->virt;
7367
7368 if (sli4->config.sgl_pre_registered) {
7369 tsend->xbl = FALSE;
7370
7371 tsend->dbde = TRUE;
7372 tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7373
7374 /* TSEND64_WQE specifies first two SGE are skipped
7375 * (i.e. 3rd is valid) */
7376 tsend->bde.buffer_length = sge[2].buffer_length;
7377 tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
7378 tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
7379 } else {
7380 tsend->xbl = TRUE;
7381
7382 /* if data is a single physical address, use a BDE */
7383 if (!dif && (xfer_len <= sge[2].buffer_length)) {
7384 tsend->dbde = TRUE;
7385 tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7386 /* TSEND64_WQE specifies first two SGE are skipped
7387 * (i.e. 3rd is valid) */
7388 tsend->bde.buffer_length = sge[2].buffer_length;
7389 tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
7390 tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
7391 } else {
7392 tsend->bde.bde_type = SLI4_BDE_TYPE_BLP;
7393 tsend->bde.buffer_length = sgl->size;
7394 tsend->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
7395 tsend->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
7396 }
7397 }
7398
7399 tsend->relative_offset = relative_off;
7400
7401 if (flags & SLI4_IO_CONTINUATION) {
7402 tsend->xc = TRUE;
7403 }
7404 tsend->xri_tag = xri;
7405
7406 tsend->rpi = rpi;
7407
7408 tsend->pu = TRUE; /* WQE uses relative offset */
7409
7410 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
7411 tsend->ar = TRUE;
7412 }
7413
7414 tsend->command = SLI4_WQE_FCP_TSEND64;
7415 tsend->class = SLI4_ELS_REQUEST64_CLASS_3;
7416 tsend->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7417 tsend->dif = dif;
7418 tsend->bs = bs;
7419
7420 tsend->remote_xid = xid;
7421
7422 tsend->request_tag = tag;
7423
7424 tsend->len_loc = 0x2;
7425
7426 if (rnode->node_group) {
7427 tsend->hlm = TRUE;
7428 tsend->dword5 = rnode->fc_id & 0x00ffffff;
7429 }
7430
7431 tsend->cq_id = cq_id;
7432
7433 tsend->cmd_type = SLI4_CMD_FCP_TSEND64_WQE;
7434
7435 tsend->fcp_data_transmit_length = xfer_len;
7436
7437 if (sli4->config.perf_hint) {
7438 tsend->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7439 tsend->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
7440 tsend->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
7441 tsend->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
7442 }
7443
7444 /* The upper 7 bits of csctl is the priority */
7445 if (csctl & SLI4_MASK_CCP) {
7446 tsend->ccpe = 1;
7447 tsend->ccp = (csctl & SLI4_MASK_CCP);
7448 }
7449
7450 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !tsend->eat) {
7451 tsend->app_id_valid = 1;
7452 tsend->wqes = 1;
7453 tsend_128->dw[31] = app_id;
7454 }
7455 return 0;
7456 }
7457
7458 /**
7459 * @ingroup sli_fc
7460 * @brief Write a GEN_REQUEST64 work queue entry.
7461 *
7462 * @note This WQE is only used to send FC-CT commands.
7463 *
7464 * @param sli4 SLI context.
7465 * @param buf Destination buffer for the WQE.
7466 * @param size Buffer size, in bytes.
7467 * @param sgl DMA memory for the request.
7468 * @param req_len Length of request.
7469 * @param max_rsp_len Max length of response.
7470 * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
7471 * @param xri XRI for this exchange.
7472 * @param tag IO tag value.
7473 * @param cq_id The id of the completion queue where the WQE response is sent.
7474 * @param rnode Destination of request (that is, the remote node).
7475 * @param r_ctl R_CTL value for sequence.
7476 * @param type TYPE value for sequence.
7477 * @param df_ctl DF_CTL value for sequence.
7478 *
7479 * @return Returns 0 on success, or a non-zero value on failure.
7480 */
7481 int32_t
7482 sli_gen_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl,
7483 uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout,
7484 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode,
7485 uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
7486 {
7487 sli4_gen_request64_wqe_t *gen = buf;
7488 sli4_sge_t *sge = NULL;
7489
7490 ocs_memset(buf, 0, size);
7491
7492 if (!sgl || !sgl->virt) {
7493 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
7494 sgl, sgl ? sgl->virt : NULL);
7495 return -1;
7496 }
7497 sge = sgl->virt;
7498
7499 if (sli4->config.sgl_pre_registered) {
7500 gen->xbl = FALSE;
7501
7502 gen->dbde = TRUE;
7503 gen->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7504
7505 gen->bde.buffer_length = req_len;
7506 gen->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
7507 gen->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
7508 } else {
7509 gen->xbl = TRUE;
7510
7511 gen->bde.bde_type = SLI4_BDE_TYPE_BLP;
7512
7513 gen->bde.buffer_length = 2 * sizeof(sli4_sge_t);
7514 gen->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
7515 gen->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
7516 }
7517
7518 gen->request_payload_length = req_len;
7519 gen->max_response_payload_length = max_rsp_len;
7520
7521 gen->df_ctl = df_ctl;
7522 gen->type = type;
7523 gen->r_ctl = r_ctl;
7524
7525 gen->xri_tag = xri;
7526
7527 gen->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7528 gen->context_tag = rnode->indicator;
7529
7530 gen->class = SLI4_ELS_REQUEST64_CLASS_3;
7531
7532 gen->command = SLI4_WQE_GEN_REQUEST64;
7533
7534 gen->timer = timeout;
7535
7536 gen->request_tag = tag;
7537
7538 gen->iod = SLI4_ELS_REQUEST64_DIR_READ;
7539
7540 gen->qosd = TRUE;
7541
7542 if (rnode->node_group) {
7543 gen->hlm = TRUE;
7544 gen->remote_n_port_id = rnode->fc_id & 0x00ffffff;
7545 }
7546
7547 gen->cmd_type = SLI4_CMD_GEN_REQUEST64_WQE;
7548
7549 gen->cq_id = cq_id;
7550
7551 return 0;
7552 }
7553
7554 /**
7555 * @ingroup sli_fc
7556 * @brief Write a SEND_FRAME work queue entry
7557 *
7558 * @param sli4 SLI context.
7559 * @param buf Destination buffer for the WQE.
7560 * @param size Buffer size, in bytes.
7561 * @param sof Start of frame value
7562 * @param eof End of frame value
7563 * @param hdr Pointer to FC header data
7564 * @param payload DMA memory for the payload.
7565 * @param req_len Length of payload.
7566 * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
7567 * @param xri XRI for this exchange.
7568 * @param req_tag IO tag value.
7569 *
7570 * @return Returns 0 on success, or a non-zero value on failure.
7571 */
7572 int32_t
7573 sli_send_frame_wqe(sli4_t *sli4, void *buf, size_t size, uint8_t sof, uint8_t eof, uint32_t *hdr,
7574 ocs_dma_t *payload, uint32_t req_len, uint8_t timeout,
7575 uint16_t xri, uint16_t req_tag)
7576 {
7577 sli4_send_frame_wqe_t *sf = buf;
7578
7579 ocs_memset(buf, 0, size);
7580
7581 sf->dbde = TRUE;
7582 sf->bde.buffer_length = req_len;
7583 sf->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
7584 sf->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
7585
7586 /* Copy FC header */
7587 sf->fc_header_0_1[0] = hdr[0];
7588 sf->fc_header_0_1[1] = hdr[1];
7589 sf->fc_header_2_5[0] = hdr[2];
7590 sf->fc_header_2_5[1] = hdr[3];
7591 sf->fc_header_2_5[2] = hdr[4];
7592 sf->fc_header_2_5[3] = hdr[5];
7593
7594 sf->frame_length = req_len;
7595
7596 sf->xri_tag = xri;
7597 sf->pu = 0;
7598 sf->context_tag = 0;
7599
7600 sf->ct = 0;
7601 sf->command = SLI4_WQE_SEND_FRAME;
7602 sf->class = SLI4_ELS_REQUEST64_CLASS_3;
7603 sf->timer = timeout;
7604
7605 sf->request_tag = req_tag;
7606 sf->eof = eof;
7607 sf->sof = sof;
7608
7609 sf->qosd = 0;
7610 sf->lenloc = 1;
7611 sf->xc = 0;
7612
7613 sf->xbl = 1;
7614
7615 sf->cmd_type = SLI4_CMD_SEND_FRAME_WQE;
7616 sf->cq_id = 0xffff;
7617
7618 return 0;
7619 }
7620
7621 /**
7622 * @ingroup sli_fc
7623 * @brief Write a XMIT_SEQUENCE64 work queue entry.
7624 *
7625 * This WQE is used to send FC-CT response frames.
7626 *
7627 * @note This API implements a restricted use for this WQE, a TODO: would
7628 * include passing in sequence initiative, and full SGL's
7629 *
7630 * @param sli4 SLI context.
7631 * @param buf Destination buffer for the WQE.
7632 * @param size Buffer size, in bytes.
7633 * @param payload DMA memory for the request.
7634 * @param payload_len Length of request.
7635 * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
7636 * @param ox_id originator exchange ID
7637 * @param xri XRI for this exchange.
7638 * @param tag IO tag value.
7639 * @param rnode Destination of request (that is, the remote node).
7640 * @param r_ctl R_CTL value for sequence.
7641 * @param type TYPE value for sequence.
7642 * @param df_ctl DF_CTL value for sequence.
7643 *
7644 * @return Returns 0 on success, or a non-zero value on failure.
7645 */
7646 int32_t
7647 sli_xmit_sequence64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload,
7648 uint32_t payload_len, uint8_t timeout, uint16_t ox_id,
7649 uint16_t xri, uint16_t tag, ocs_remote_node_t *rnode,
7650 uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
7651 {
7652 sli4_xmit_sequence64_wqe_t *xmit = buf;
7653
7654 ocs_memset(buf, 0, size);
7655
7656 if ((payload == NULL) || (payload->virt == NULL)) {
7657 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
7658 payload, payload ? payload->virt : NULL);
7659 return -1;
7660 }
7661
7662 if (sli4->config.sgl_pre_registered) {
7663 xmit->dbde = TRUE;
7664 } else {
7665 xmit->xbl = TRUE;
7666 }
7667
7668 xmit->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
7669 xmit->bde.buffer_length = payload_len;
7670 xmit->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
7671 xmit->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
7672 xmit->sequence_payload_len = payload_len;
7673
7674 xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff;
7675
7676 xmit->relative_offset = 0;
7677
7678 xmit->si = 0; /* sequence initiative - this matches what is seen from
7679 * FC switches in response to FCGS commands */
7680 xmit->ft = 0; /* force transmit */
7681 xmit->xo = 0; /* exchange responder */
7682 xmit->ls = 1; /* last in seqence */
7683 xmit->df_ctl = df_ctl;
7684 xmit->type = type;
7685 xmit->r_ctl = r_ctl;
7686
7687 xmit->xri_tag = xri;
7688 xmit->context_tag = rnode->indicator;
7689
7690 xmit->dif = 0;
7691 xmit->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7692 xmit->bs = 0;
7693
7694 xmit->command = SLI4_WQE_XMIT_SEQUENCE64;
7695 xmit->class = SLI4_ELS_REQUEST64_CLASS_3;
7696 xmit->pu = 0;
7697 xmit->timer = timeout;
7698
7699 xmit->abort_tag = 0;
7700 xmit->request_tag = tag;
7701 xmit->remote_xid = ox_id;
7702
7703 xmit->iod = SLI4_ELS_REQUEST64_DIR_READ;
7704
7705 if (rnode->node_group) {
7706 xmit->hlm = TRUE;
7707 xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff;
7708 }
7709
7710 xmit->cmd_type = SLI4_CMD_XMIT_SEQUENCE64_WQE;
7711
7712 xmit->len_loc = 2;
7713
7714 xmit->cq_id = 0xFFFF;
7715
7716 return 0;
7717 }
7718
7719 /**
7720 * @ingroup sli_fc
7721 * @brief Write a REQUEUE_XRI_WQE work queue entry.
7722 *
7723 * @param sli4 SLI context.
7724 * @param buf Destination buffer for the WQE.
7725 * @param size Buffer size, in bytes.
7726 * @param xri XRI for this exchange.
7727 * @param tag IO tag value.
7728 * @param cq_id The id of the completion queue where the WQE response is sent.
7729 *
7730 * @return Returns 0 on success, or a non-zero value on failure.
7731 */
7732 int32_t
7733 sli_requeue_xri_wqe(sli4_t *sli4, void *buf, size_t size, uint16_t xri, uint16_t tag, uint16_t cq_id)
7734 {
7735 sli4_requeue_xri_wqe_t *requeue = buf;
7736
7737 ocs_memset(buf, 0, size);
7738
7739 requeue->command = SLI4_WQE_REQUEUE_XRI;
7740 requeue->xri_tag = xri;
7741 requeue->request_tag = tag;
7742 requeue->xc = 1;
7743 requeue->qosd = 1;
7744 requeue->cq_id = cq_id;
7745 requeue->cmd_type = SLI4_CMD_REQUEUE_XRI_WQE;
7746 return 0;
7747 }
7748
7749 int32_t
7750 sli_xmit_bcast64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload,
7751 uint32_t payload_len, uint8_t timeout, uint16_t xri, uint16_t tag,
7752 uint16_t cq_id, ocs_remote_node_t *rnode,
7753 uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
7754 {
7755 sli4_xmit_bcast64_wqe_t *bcast = buf;
7756
7757 /* Command requires a temporary RPI (i.e. unused remote node) */
7758 if (rnode->attached) {
7759 ocs_log_test(sli4->os, "remote node %d in use\n", rnode->indicator);
7760 return -1;
7761 }
7762
7763 ocs_memset(buf, 0, size);
7764
7765 bcast->dbde = TRUE;
7766 bcast->sequence_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
7767 bcast->sequence_payload.buffer_length = payload_len;
7768 bcast->sequence_payload.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
7769 bcast->sequence_payload.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
7770
7771 bcast->sequence_payload_length = payload_len;
7772
7773 bcast->df_ctl = df_ctl;
7774 bcast->type = type;
7775 bcast->r_ctl = r_ctl;
7776
7777 bcast->xri_tag = xri;
7778
7779 bcast->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
7780 bcast->context_tag = rnode->sport->indicator;
7781
7782 bcast->class = SLI4_ELS_REQUEST64_CLASS_3;
7783
7784 bcast->command = SLI4_WQE_XMIT_BCAST64;
7785
7786 bcast->timer = timeout;
7787
7788 bcast->request_tag = tag;
7789
7790 bcast->temporary_rpi = rnode->indicator;
7791
7792 bcast->len_loc = 0x1;
7793
7794 bcast->iod = SLI4_ELS_REQUEST64_DIR_WRITE;
7795
7796 bcast->cmd_type = SLI4_CMD_XMIT_BCAST64_WQE;
7797
7798 bcast->cq_id = cq_id;
7799
7800 return 0;
7801 }
7802
7803 /**
7804 * @ingroup sli_fc
7805 * @brief Write an XMIT_BLS_RSP64_WQE work queue entry.
7806 *
7807 * @param sli4 SLI context.
7808 * @param buf Destination buffer for the WQE.
7809 * @param size Buffer size, in bytes.
7810 * @param payload Contents of the BLS payload to be sent.
7811 * @param xri XRI for this exchange.
7812 * @param tag IO tag value.
7813 * @param cq_id The id of the completion queue where the WQE response is sent.
7814 * @param rnode Destination of request (that is, the remote node).
7815 * @param s_id Source ID to use in the response. If UINT32_MAX, use SLI Port's ID.
7816 *
7817 * @return Returns 0 on success, or a non-zero value on failure.
7818 */
7819 int32_t
7820 sli_xmit_bls_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, sli_bls_payload_t *payload,
7821 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint32_t s_id)
7822 {
7823 sli4_xmit_bls_rsp_wqe_t *bls = buf;
7824
7825 /*
7826 * Callers can either specify RPI or S_ID, but not both
7827 */
7828 if (rnode->attached && (s_id != UINT32_MAX)) {
7829 ocs_log_test(sli4->os, "S_ID specified for attached remote node %d\n",
7830 rnode->indicator);
7831 return -1;
7832 }
7833
7834 ocs_memset(buf, 0, size);
7835
7836 if (SLI_BLS_ACC == payload->type) {
7837 bls->payload_word0 = (payload->u.acc.seq_id_last << 16) |
7838 (payload->u.acc.seq_id_validity << 24);
7839 bls->high_seq_cnt = payload->u.acc.high_seq_cnt;
7840 bls->low_seq_cnt = payload->u.acc.low_seq_cnt;
7841 } else if (SLI_BLS_RJT == payload->type) {
7842 bls->payload_word0 = *((uint32_t *)&payload->u.rjt);
7843 bls->ar = TRUE;
7844 } else {
7845 ocs_log_test(sli4->os, "bad BLS type %#x\n",
7846 payload->type);
7847 return -1;
7848 }
7849
7850 bls->ox_id = payload->ox_id;
7851 bls->rx_id = payload->rx_id;
7852
7853 if (rnode->attached) {
7854 bls->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7855 bls->context_tag = rnode->indicator;
7856 } else {
7857 bls->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
7858 bls->context_tag = rnode->sport->indicator;
7859
7860 if (UINT32_MAX != s_id) {
7861 bls->local_n_port_id = s_id & 0x00ffffff;
7862 } else {
7863 bls->local_n_port_id = rnode->sport->fc_id & 0x00ffffff;
7864 }
7865 bls->remote_id = rnode->fc_id & 0x00ffffff;
7866
7867 bls->temporary_rpi = rnode->indicator;
7868 }
7869
7870 bls->xri_tag = xri;
7871
7872 bls->class = SLI4_ELS_REQUEST64_CLASS_3;
7873
7874 bls->command = SLI4_WQE_XMIT_BLS_RSP;
7875
7876 bls->request_tag = tag;
7877
7878 bls->qosd = TRUE;
7879
7880 if (rnode->node_group) {
7881 bls->hlm = TRUE;
7882 bls->remote_id = rnode->fc_id & 0x00ffffff;
7883 }
7884
7885 bls->cq_id = cq_id;
7886
7887 bls->cmd_type = SLI4_CMD_XMIT_BLS_RSP64_WQE;
7888
7889 return 0;
7890 }
7891
7892 /**
7893 * @ingroup sli_fc
7894 * @brief Write a XMIT_ELS_RSP64_WQE work queue entry.
7895 *
7896 * @param sli4 SLI context.
7897 * @param buf Destination buffer for the WQE.
7898 * @param size Buffer size, in bytes.
7899 * @param rsp DMA memory for the ELS response.
7900 * @param rsp_len Length of ELS response, in bytes.
7901 * @param xri XRI for this exchange.
7902 * @param tag IO tag value.
7903 * @param cq_id The id of the completion queue where the WQE response is sent.
7904 * @param ox_id OX_ID of the exchange containing the request.
7905 * @param rnode Destination of the ELS response (that is, the remote node).
7906 * @param flags Optional attributes, including:
7907 * - SLI4_IO_CONTINUATION - IO is already active.
7908 * @param s_id S_ID used for special responses.
7909 *
7910 * @return Returns 0 on success, or a non-zero value on failure.
7911 */
7912 int32_t
7913 sli_xmit_els_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *rsp,
7914 uint32_t rsp_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
7915 uint16_t ox_id, ocs_remote_node_t *rnode, uint32_t flags, uint32_t s_id)
7916 {
7917 sli4_xmit_els_rsp64_wqe_t *els = buf;
7918
7919 ocs_memset(buf, 0, size);
7920
7921 if (sli4->config.sgl_pre_registered) {
7922 els->dbde = TRUE;
7923 } else {
7924 els->xbl = TRUE;
7925 }
7926
7927 els->els_response_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
7928 els->els_response_payload.buffer_length = rsp_len;
7929 els->els_response_payload.u.data.buffer_address_low = ocs_addr32_lo(rsp->phys);
7930 els->els_response_payload.u.data.buffer_address_high = ocs_addr32_hi(rsp->phys);
7931
7932 els->els_response_payload_length = rsp_len;
7933
7934 els->xri_tag = xri;
7935
7936 els->class = SLI4_ELS_REQUEST64_CLASS_3;
7937
7938 els->command = SLI4_WQE_ELS_RSP64;
7939
7940 els->request_tag = tag;
7941
7942 els->ox_id = ox_id;
7943
7944 els->iod = SLI4_ELS_REQUEST64_DIR_WRITE;
7945
7946 els->qosd = TRUE;
7947
7948 if (flags & SLI4_IO_CONTINUATION) {
7949 els->xc = TRUE;
7950 }
7951
7952 if (rnode->attached) {
7953 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
7954 els->context_tag = rnode->indicator;
7955 } else {
7956 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
7957 els->context_tag = rnode->sport->indicator;
7958 els->remote_id = rnode->fc_id & 0x00ffffff;
7959 els->temporary_rpi = rnode->indicator;
7960 if (UINT32_MAX != s_id) {
7961 els->sp = TRUE;
7962 els->s_id = s_id & 0x00ffffff;
7963 }
7964 }
7965
7966 if (rnode->node_group) {
7967 els->hlm = TRUE;
7968 els->remote_id = rnode->fc_id & 0x00ffffff;
7969 }
7970
7971 els->cmd_type = SLI4_ELS_REQUEST64_CMD_GEN;
7972
7973 els->cq_id = cq_id;
7974
7975 return 0;
7976 }
7977
7978 /**
7979 * @ingroup sli_fc
7980 * @brief Process an asynchronous Link State event entry.
7981 *
7982 * @par Description
7983 * Parses Asynchronous Completion Queue Entry (ACQE),
7984 * creates an abstracted event, and calls registered callback functions.
7985 *
7986 * @param sli4 SLI context.
7987 * @param acqe Pointer to the ACQE.
7988 *
7989 * @return Returns 0 on success, or a non-zero value on failure.
7990 */
7991 int32_t
7992 sli_fc_process_link_state(sli4_t *sli4, void *acqe)
7993 {
7994 sli4_link_state_t *link_state = acqe;
7995 sli4_link_event_t event = { 0 };
7996 int32_t rc = 0;
7997
7998 if (!sli4->link) {
7999 /* bail if there is no callback */
8000 return 0;
8001 }
8002
8003 if (SLI4_LINK_TYPE_ETHERNET == link_state->link_type) {
8004 event.topology = SLI_LINK_TOPO_NPORT;
8005 event.medium = SLI_LINK_MEDIUM_ETHERNET;
8006 } else {
8007 /* TODO is this supported for anything other than FCoE? */
8008 ocs_log_test(sli4->os, "unsupported link type %#x\n",
8009 link_state->link_type);
8010 event.topology = SLI_LINK_TOPO_MAX;
8011 event.medium = SLI_LINK_MEDIUM_MAX;
8012 rc = -1;
8013 }
8014
8015 switch (link_state->port_link_status) {
8016 case SLI4_PORT_LINK_STATUS_PHYSICAL_DOWN:
8017 case SLI4_PORT_LINK_STATUS_LOGICAL_DOWN:
8018 event.status = SLI_LINK_STATUS_DOWN;
8019 break;
8020 case SLI4_PORT_LINK_STATUS_PHYSICAL_UP:
8021 case SLI4_PORT_LINK_STATUS_LOGICAL_UP:
8022 event.status = SLI_LINK_STATUS_UP;
8023 break;
8024 default:
8025 ocs_log_test(sli4->os, "unsupported link status %#x\n",
8026 link_state->port_link_status);
8027 event.status = SLI_LINK_STATUS_MAX;
8028 rc = -1;
8029 }
8030
8031 switch (link_state->port_speed) {
8032 case 0:
8033 event.speed = 0;
8034 break;
8035 case 1:
8036 event.speed = 10;
8037 break;
8038 case 2:
8039 event.speed = 100;
8040 break;
8041 case 3:
8042 event.speed = 1000;
8043 break;
8044 case 4:
8045 event.speed = 10000;
8046 break;
8047 case 5:
8048 event.speed = 20000;
8049 break;
8050 case 6:
8051 event.speed = 25000;
8052 break;
8053 case 7:
8054 event.speed = 40000;
8055 break;
8056 case 8:
8057 event.speed = 100000;
8058 break;
8059 default:
8060 ocs_log_test(sli4->os, "unsupported port_speed %#x\n",
8061 link_state->port_speed);
8062 rc = -1;
8063 }
8064
8065 sli4->link(sli4->link_arg, (void *)&event);
8066
8067 return rc;
8068 }
8069
8070 /**
8071 * @ingroup sli_fc
8072 * @brief Process an asynchronous Link Attention event entry.
8073 *
8074 * @par Description
8075 * Parses Asynchronous Completion Queue Entry (ACQE),
8076 * creates an abstracted event, and calls the registered callback functions.
8077 *
8078 * @param sli4 SLI context.
8079 * @param acqe Pointer to the ACQE.
8080 *
8081 * @todo XXX all events return LINK_UP.
8082 *
8083 * @return Returns 0 on success, or a non-zero value on failure.
8084 */
8085 int32_t
8086 sli_fc_process_link_attention(sli4_t *sli4, void *acqe)
8087 {
8088 sli4_link_attention_t *link_attn = acqe;
8089 sli4_link_event_t event = { 0 };
8090
8091 ocs_log_debug(sli4->os, "link_number=%d attn_type=%#x topology=%#x port_speed=%#x "
8092 "port_fault=%#x shared_link_status=%#x logical_link_speed=%#x "
8093 "event_tag=%#x\n", link_attn->link_number, link_attn->attn_type,
8094 link_attn->topology, link_attn->port_speed, link_attn->port_fault,
8095 link_attn->shared_link_status, link_attn->logical_link_speed,
8096 link_attn->event_tag);
8097
8098 if (!sli4->link) {
8099 return 0;
8100 }
8101
8102 event.medium = SLI_LINK_MEDIUM_FC;
8103
8104 switch (link_attn->attn_type) {
8105 case SLI4_LINK_ATTN_TYPE_LINK_UP:
8106 event.status = SLI_LINK_STATUS_UP;
8107 break;
8108 case SLI4_LINK_ATTN_TYPE_LINK_DOWN:
8109 event.status = SLI_LINK_STATUS_DOWN;
8110 break;
8111 case SLI4_LINK_ATTN_TYPE_NO_HARD_ALPA:
8112 ocs_log_debug(sli4->os, "attn_type: no hard alpa\n");
8113 event.status = SLI_LINK_STATUS_NO_ALPA;
8114 break;
8115 default:
8116 ocs_log_test(sli4->os, "attn_type: unknown\n");
8117 break;
8118 }
8119
8120 switch (link_attn->event_type) {
8121 case SLI4_FC_EVENT_LINK_ATTENTION:
8122 break;
8123 case SLI4_FC_EVENT_SHARED_LINK_ATTENTION:
8124 ocs_log_debug(sli4->os, "event_type: FC shared link event \n");
8125 break;
8126 default:
8127 ocs_log_test(sli4->os, "event_type: unknown\n");
8128 break;
8129 }
8130
8131 switch (link_attn->topology) {
8132 case SLI4_LINK_ATTN_P2P:
8133 event.topology = SLI_LINK_TOPO_NPORT;
8134 break;
8135 case SLI4_LINK_ATTN_FC_AL:
8136 event.topology = SLI_LINK_TOPO_LOOP;
8137 break;
8138 case SLI4_LINK_ATTN_INTERNAL_LOOPBACK:
8139 ocs_log_debug(sli4->os, "topology Internal loopback\n");
8140 event.topology = SLI_LINK_TOPO_LOOPBACK_INTERNAL;
8141 break;
8142 case SLI4_LINK_ATTN_SERDES_LOOPBACK:
8143 ocs_log_debug(sli4->os, "topology serdes loopback\n");
8144 event.topology = SLI_LINK_TOPO_LOOPBACK_EXTERNAL;
8145 break;
8146 default:
8147 ocs_log_test(sli4->os, "topology: unknown\n");
8148 break;
8149 }
8150
8151 event.speed = link_attn->port_speed * 1000;
8152
8153 sli4->link(sli4->link_arg, (void *)&event);
8154
8155 return 0;
8156 }
8157
8158 /**
8159 * @ingroup sli_fc
8160 * @brief Parse an FC/FCoE work queue CQ entry.
8161 *
8162 * @param sli4 SLI context.
8163 * @param cq CQ to process.
8164 * @param cqe Pointer to the CQ entry.
8165 * @param etype CQ event type.
8166 * @param r_id Resource ID associated with this completion message (such as the IO tag).
8167 *
8168 * @return Returns 0 on success, or a non-zero value on failure.
8169 */
8170 int32_t
8171 sli_fc_cqe_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype,
8172 uint16_t *r_id)
8173 {
8174 uint8_t code = cqe[SLI4_CQE_CODE_OFFSET];
8175 int32_t rc = -1;
8176
8177 switch (code) {
8178 case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION:
8179 {
8180 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8181
8182 *etype = SLI_QENTRY_WQ;
8183 *r_id = wcqe->request_tag;
8184 rc = wcqe->status;
8185
8186 /* Flag errors except for FCP_RSP_FAILURE */
8187 if (rc && (rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE)) {
8188 ocs_log_test(sli4->os, "WCQE: status=%#x hw_status=%#x tag=%#x w1=%#x w2=%#x xb=%d\n",
8189 wcqe->status, wcqe->hw_status,
8190 wcqe->request_tag, wcqe->wqe_specific_1,
8191 wcqe->wqe_specific_2, wcqe->xb);
8192 ocs_log_test(sli4->os, " %08X %08X %08X %08X\n", ((uint32_t*) cqe)[0], ((uint32_t*) cqe)[1],
8193 ((uint32_t*) cqe)[2], ((uint32_t*) cqe)[3]);
8194 }
8195
8196 /* TODO: need to pass additional status back out of here as well
8197 * as status (could overload rc as status/addlstatus are only 8 bits each)
8198 */
8199
8200 break;
8201 }
8202 case SLI4_CQE_CODE_RQ_ASYNC:
8203 {
8204 sli4_fc_async_rcqe_t *rcqe = (void *)cqe;
8205
8206 *etype = SLI_QENTRY_RQ;
8207 *r_id = rcqe->rq_id;
8208 rc = rcqe->status;
8209 break;
8210 }
8211 case SLI4_CQE_CODE_RQ_ASYNC_V1:
8212 {
8213 sli4_fc_async_rcqe_v1_t *rcqe = (void *)cqe;
8214
8215 *etype = SLI_QENTRY_RQ;
8216 *r_id = rcqe->rq_id;
8217 rc = rcqe->status;
8218 break;
8219 }
8220 case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD:
8221 {
8222 sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe;
8223
8224 *etype = SLI_QENTRY_OPT_WRITE_CMD;
8225 *r_id = optcqe->rq_id;
8226 rc = optcqe->status;
8227 break;
8228 }
8229 case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA:
8230 {
8231 sli4_fc_optimized_write_data_cqe_t *dcqe = (void *)cqe;
8232
8233 *etype = SLI_QENTRY_OPT_WRITE_DATA;
8234 *r_id = dcqe->xri;
8235 rc = dcqe->status;
8236
8237 /* Flag errors */
8238 if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) {
8239 ocs_log_test(sli4->os, "Optimized DATA CQE: status=%#x hw_status=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n",
8240 dcqe->status, dcqe->hw_status,
8241 dcqe->xri, dcqe->total_data_placed,
8242 ((uint32_t*) cqe)[3], dcqe->xb);
8243 }
8244 break;
8245 }
8246 case SLI4_CQE_CODE_RQ_COALESCING:
8247 {
8248 sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe;
8249
8250 *etype = SLI_QENTRY_RQ;
8251 *r_id = rcqe->rq_id;
8252 rc = rcqe->status;
8253 break;
8254 }
8255 case SLI4_CQE_CODE_XRI_ABORTED:
8256 {
8257 sli4_fc_xri_aborted_cqe_t *xa = (void *)cqe;
8258
8259 *etype = SLI_QENTRY_XABT;
8260 *r_id = xa->xri;
8261 rc = 0;
8262 break;
8263 }
8264 case SLI4_CQE_CODE_RELEASE_WQE: {
8265 sli4_fc_wqec_t *wqec = (void*) cqe;
8266
8267 *etype = SLI_QENTRY_WQ_RELEASE;
8268 *r_id = wqec->wq_id;
8269 rc = 0;
8270 break;
8271 }
8272 default:
8273 ocs_log_test(sli4->os, "CQE completion code %d not handled\n", code);
8274 *etype = SLI_QENTRY_MAX;
8275 *r_id = UINT16_MAX;
8276 }
8277
8278 return rc;
8279 }
8280
8281 /**
8282 * @ingroup sli_fc
8283 * @brief Return the ELS/CT response length.
8284 *
8285 * @param sli4 SLI context.
8286 * @param cqe Pointer to the CQ entry.
8287 *
8288 * @return Returns the length, in bytes.
8289 */
8290 uint32_t
8291 sli_fc_response_length(sli4_t *sli4, uint8_t *cqe)
8292 {
8293 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8294
8295 return wcqe->wqe_specific_1;
8296 }
8297
8298 /**
8299 * @ingroup sli_fc
8300 * @brief Return the FCP IO length.
8301 *
8302 * @param sli4 SLI context.
8303 * @param cqe Pointer to the CQ entry.
8304 *
8305 * @return Returns the length, in bytes.
8306 */
8307 uint32_t
8308 sli_fc_io_length(sli4_t *sli4, uint8_t *cqe)
8309 {
8310 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8311
8312 return wcqe->wqe_specific_1;
8313 }
8314
8315 /**
8316 * @ingroup sli_fc
8317 * @brief Retrieve the D_ID from the completion.
8318 *
8319 * @param sli4 SLI context.
8320 * @param cqe Pointer to the CQ entry.
8321 * @param d_id Pointer where the D_ID is written.
8322 *
8323 * @return Returns 0 on success, or a non-zero value on failure.
8324 */
8325 int32_t
8326 sli_fc_els_did(sli4_t *sli4, uint8_t *cqe, uint32_t *d_id)
8327 {
8328 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8329
8330 *d_id = 0;
8331
8332 if (wcqe->status) {
8333 return -1;
8334 } else {
8335 *d_id = wcqe->wqe_specific_2 & 0x00ffffff;
8336 return 0;
8337 }
8338 }
8339
8340 uint32_t
8341 sli_fc_ext_status(sli4_t *sli4, uint8_t *cqe)
8342 {
8343 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8344 uint32_t mask;
8345
8346 switch (wcqe->status) {
8347 case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
8348 mask = UINT32_MAX;
8349 break;
8350 case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
8351 case SLI4_FC_WCQE_STATUS_CMD_REJECT:
8352 mask = 0xff;
8353 break;
8354 case SLI4_FC_WCQE_STATUS_NPORT_RJT:
8355 case SLI4_FC_WCQE_STATUS_FABRIC_RJT:
8356 case SLI4_FC_WCQE_STATUS_NPORT_BSY:
8357 case SLI4_FC_WCQE_STATUS_FABRIC_BSY:
8358 case SLI4_FC_WCQE_STATUS_LS_RJT:
8359 mask = UINT32_MAX;
8360 break;
8361 case SLI4_FC_WCQE_STATUS_DI_ERROR:
8362 mask = UINT32_MAX;
8363 break;
8364 default:
8365 mask = 0;
8366 }
8367
8368 return wcqe->wqe_specific_2 & mask;
8369 }
8370
8371 /**
8372 * @ingroup sli_fc
8373 * @brief Retrieve the RQ index from the completion.
8374 *
8375 * @param sli4 SLI context.
8376 * @param cqe Pointer to the CQ entry.
8377 * @param rq_id Pointer where the rq_id is written.
8378 * @param index Pointer where the index is written.
8379 *
8380 * @return Returns 0 on success, or a non-zero value on failure.
8381 */
8382 int32_t
8383 sli_fc_rqe_rqid_and_index(sli4_t *sli4, uint8_t *cqe, uint16_t *rq_id, uint32_t *index)
8384 {
8385 sli4_fc_async_rcqe_t *rcqe = (void *)cqe;
8386 sli4_fc_async_rcqe_v1_t *rcqe_v1 = (void *)cqe;
8387 int32_t rc = -1;
8388 uint8_t code = 0;
8389
8390 *rq_id = 0;
8391 *index = UINT32_MAX;
8392
8393 code = cqe[SLI4_CQE_CODE_OFFSET];
8394
8395 if (code == SLI4_CQE_CODE_RQ_ASYNC) {
8396 *rq_id = rcqe->rq_id;
8397 if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe->status) {
8398 *index = rcqe->rq_element_index;
8399 rc = 0;
8400 } else {
8401 *index = rcqe->rq_element_index;
8402 rc = rcqe->status;
8403 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
8404 rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id,
8405 rcqe->rq_element_index, rcqe->payload_data_placement_length, rcqe->sof_byte,
8406 rcqe->eof_byte, rcqe->header_data_placement_length);
8407 }
8408 } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) {
8409 *rq_id = rcqe_v1->rq_id;
8410 if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe_v1->status) {
8411 *index = rcqe_v1->rq_element_index;
8412 rc = 0;
8413 } else {
8414 *index = rcqe_v1->rq_element_index;
8415 rc = rcqe_v1->status;
8416 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
8417 rcqe_v1->status, sli_fc_get_status_string(rcqe_v1->status),
8418 rcqe_v1->rq_id, rcqe_v1->rq_element_index,
8419 rcqe_v1->payload_data_placement_length, rcqe_v1->sof_byte,
8420 rcqe_v1->eof_byte, rcqe_v1->header_data_placement_length);
8421 }
8422 } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) {
8423 sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe;
8424
8425 *rq_id = optcqe->rq_id;
8426 if (SLI4_FC_ASYNC_RQ_SUCCESS == optcqe->status) {
8427 *index = optcqe->rq_element_index;
8428 rc = 0;
8429 } else {
8430 *index = optcqe->rq_element_index;
8431 rc = optcqe->status;
8432 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x hdpl=%x oox=%d agxr=%d xri=0x%x rpi=0x%x\n",
8433 optcqe->status, sli_fc_get_status_string(optcqe->status), optcqe->rq_id,
8434 optcqe->rq_element_index, optcqe->payload_data_placement_length,
8435 optcqe->header_data_placement_length, optcqe->oox, optcqe->agxr, optcqe->xri,
8436 optcqe->rpi);
8437 }
8438 } else if (code == SLI4_CQE_CODE_RQ_COALESCING) {
8439 sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe;
8440
8441 *rq_id = rcqe->rq_id;
8442 if (SLI4_FC_COALESCE_RQ_SUCCESS == rcqe->status) {
8443 *index = rcqe->rq_element_index;
8444 rc = 0;
8445 } else {
8446 *index = UINT32_MAX;
8447 rc = rcqe->status;
8448
8449 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x rq_id=%#x sdpl=%x\n",
8450 rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id,
8451 rcqe->rq_element_index, rcqe->rq_id, rcqe->sequence_reporting_placement_length);
8452 }
8453 } else {
8454 *index = UINT32_MAX;
8455
8456 rc = rcqe->status;
8457
8458 ocs_log_debug(sli4->os, "status=%02x rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
8459 rcqe->status, rcqe->rq_id, rcqe->rq_element_index, rcqe->payload_data_placement_length,
8460 rcqe->sof_byte, rcqe->eof_byte, rcqe->header_data_placement_length);
8461 }
8462
8463 return rc;
8464 }
8465
8466 /**
8467 * @ingroup sli_fc
8468 * @brief Process an asynchronous FCoE event entry.
8469 *
8470 * @par Description
8471 * Parses Asynchronous Completion Queue Entry (ACQE),
8472 * creates an abstracted event, and calls the registered callback functions.
8473 *
8474 * @param sli4 SLI context.
8475 * @param acqe Pointer to the ACQE.
8476 *
8477 * @return Returns 0 on success, or a non-zero value on failure.
8478 */
8479 int32_t
8480 sli_fc_process_fcoe(sli4_t *sli4, void *acqe)
8481 {
8482 sli4_fcoe_fip_t *fcoe = acqe;
8483 sli4_fip_event_t event = { 0 };
8484 uint32_t mask = UINT32_MAX;
8485
8486 ocs_log_debug(sli4->os, "ACQE FCoE FIP type=%02x count=%d tag=%#x\n",
8487 fcoe->event_type,
8488 fcoe->fcf_count,
8489 fcoe->event_tag);
8490
8491 if (!sli4->fip) {
8492 return 0;
8493 }
8494
8495 event.type = fcoe->event_type;
8496 event.index = UINT32_MAX;
8497
8498 switch (fcoe->event_type) {
8499 case SLI4_FCOE_FIP_FCF_DISCOVERED:
8500 ocs_log_debug(sli4->os, "FCF Discovered index=%d\n", fcoe->event_information);
8501 break;
8502 case SLI4_FCOE_FIP_FCF_TABLE_FULL:
8503 ocs_log_debug(sli4->os, "FCF Table Full\n");
8504 mask = 0;
8505 break;
8506 case SLI4_FCOE_FIP_FCF_DEAD:
8507 ocs_log_debug(sli4->os, "FCF Dead/Gone index=%d\n", fcoe->event_information);
8508 break;
8509 case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
8510 mask = UINT16_MAX;
8511 ocs_log_debug(sli4->os, "Clear VLINK Received VPI=%#x\n", fcoe->event_information & mask);
8512 break;
8513 case SLI4_FCOE_FIP_FCF_MODIFIED:
8514 ocs_log_debug(sli4->os, "FCF Modified\n");
8515 break;
8516 default:
8517 ocs_log_test(sli4->os, "bad FCoE type %#x", fcoe->event_type);
8518 mask = 0;
8519 }
8520
8521 if (mask != 0) {
8522 event.index = fcoe->event_information & mask;
8523 }
8524
8525 sli4->fip(sli4->fip_arg, &event);
8526
8527 return 0;
8528 }
8529
8530 /**
8531 * @ingroup sli_fc
8532 * @brief Allocate a receive queue.
8533 *
8534 * @par Description
8535 * Allocates DMA memory and configures the requested queue type.
8536 *
8537 * @param sli4 SLI context.
8538 * @param q Pointer to the queue object for the header.
8539 * @param n_entries Number of entries to allocate.
8540 * @param buffer_size buffer size for the queue.
8541 * @param cq Associated CQ.
8542 * @param ulp The ULP to bind
8543 * @param is_hdr Used to validate the rq_id and set the type of queue
8544 *
8545 * @return Returns 0 on success, or -1 on failure.
8546 */
8547 int32_t
8548 sli_fc_rq_alloc(sli4_t *sli4, sli4_queue_t *q,
8549 uint32_t n_entries, uint32_t buffer_size,
8550 sli4_queue_t *cq, uint16_t ulp, uint8_t is_hdr)
8551 {
8552 int32_t (*rq_create)(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t);
8553
8554 if ((sli4 == NULL) || (q == NULL)) {
8555 void *os = sli4 != NULL ? sli4->os : NULL;
8556
8557 ocs_log_err(os, "bad parameter sli4=%p q=%p\n", sli4, q);
8558 return -1;
8559 }
8560
8561 if (__sli_queue_init(sli4, q, SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE,
8562 n_entries, SLI_PAGE_SIZE)) {
8563 return -1;
8564 }
8565
8566 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) {
8567 rq_create = sli_cmd_fcoe_rq_create;
8568 } else {
8569 rq_create = sli_cmd_fcoe_rq_create_v1;
8570 }
8571
8572 if (rq_create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma,
8573 cq->id, ulp, buffer_size)) {
8574 if (__sli_create_queue(sli4, q)) {
8575 ocs_dma_free(sli4->os, &q->dma);
8576 return -1;
8577 }
8578 if (is_hdr && q->id & 1) {
8579 ocs_log_test(sli4->os, "bad header RQ_ID %d\n", q->id);
8580 ocs_dma_free(sli4->os, &q->dma);
8581 return -1;
8582 } else if (!is_hdr && (q->id & 1) == 0) {
8583 ocs_log_test(sli4->os, "bad data RQ_ID %d\n", q->id);
8584 ocs_dma_free(sli4->os, &q->dma);
8585 return -1;
8586 }
8587 } else {
8588 return -1;
8589 }
8590 q->u.flag.is_hdr = is_hdr;
8591 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
8592 q->u.flag.rq_batch = TRUE;
8593 }
8594 return 0;
8595 }
8596
8597 /**
8598 * @ingroup sli_fc
8599 * @brief Allocate a receive queue set.
8600 *
8601 * @param sli4 SLI context.
8602 * @param num_rq_pairs to create
8603 * @param qs Pointers to the queue objects for both header and data.
8604 * Length of this arrays should be 2 * num_rq_pairs
8605 * @param base_cq_id. Assumes base_cq_id : (base_cq_id + num_rq_pairs) cqs as allotted.
8606 * @param n_entries number of entries in each RQ queue.
8607 * @param header_buffer_size
8608 * @param payload_buffer_size
8609 * @param ulp The ULP to bind
8610 *
8611 * @return Returns 0 on success, or -1 on failure.
8612 */
8613 int32_t
8614 sli_fc_rq_set_alloc(sli4_t *sli4, uint32_t num_rq_pairs,
8615 sli4_queue_t *qs[], uint32_t base_cq_id,
8616 uint32_t n_entries, uint32_t header_buffer_size,
8617 uint32_t payload_buffer_size, uint16_t ulp)
8618 {
8619 uint32_t i, p, offset = 0;
8620 uint32_t payload_size, total_page_count = 0;
8621 uintptr_t addr;
8622 ocs_dma_t dma;
8623 sli4_res_common_create_queue_set_t *rsp = NULL;
8624 sli4_req_fcoe_rq_create_v2_t *req = NULL;
8625
8626 ocs_memset(&dma, 0, sizeof(dma));
8627
8628 for (i = 0; i < (num_rq_pairs * 2); i++) {
8629 if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE,
8630 n_entries, SLI_PAGE_SIZE)) {
8631 goto error;
8632 }
8633 }
8634
8635 total_page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rq_pairs * 2;
8636
8637 /* Payload length must accommodate both request and response */
8638 payload_size = max((sizeof(sli4_req_fcoe_rq_create_v1_t) + (8 * total_page_count)),
8639 sizeof(sli4_res_common_create_queue_set_t));
8640
8641 if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) {
8642 ocs_log_err(sli4->os, "DMA allocation failed\n");
8643 goto error;
8644 }
8645 ocs_memset(dma.virt, 0, payload_size);
8646
8647 if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
8648 payload_size, &dma) == -1) {
8649 goto error;
8650 }
8651 req = (sli4_req_fcoe_rq_create_v2_t *)((uint8_t *)dma.virt);
8652
8653 /* Fill Header fields */
8654 req->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
8655 req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
8656 req->hdr.version = 2;
8657 req->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v2_t) - sizeof(sli4_req_hdr_t)
8658 + (8 * total_page_count);
8659
8660 /* Fill Payload fields */
8661 req->dnb = TRUE;
8662 req->num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
8663 req->rqe_count = qs[0]->dma.size / SLI4_FCOE_RQE_SIZE;
8664 req->rqe_size = SLI4_FCOE_RQE_SIZE_8;
8665 req->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096;
8666 req->rq_count = num_rq_pairs * 2;
8667 req->base_cq_id = base_cq_id;
8668 req->hdr_buffer_size = header_buffer_size;
8669 req->payload_buffer_size = payload_buffer_size;
8670
8671 for (i = 0; i < (num_rq_pairs * 2); i++) {
8672 for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += SLI_PAGE_SIZE) {
8673 req->page_physical_address[offset].low = ocs_addr32_lo(addr);
8674 req->page_physical_address[offset].high = ocs_addr32_hi(addr);
8675 offset++;
8676 }
8677 }
8678
8679 if (sli_bmbx_command(sli4)){
8680 ocs_log_crit(sli4->os, "bootstrap mailbox write failed RQSet\n");
8681 goto error;
8682 }
8683
8684 rsp = (void *)((uint8_t *)dma.virt);
8685 if (rsp->hdr.status) {
8686 ocs_log_err(sli4->os, "bad create RQSet status=%#x addl=%#x\n",
8687 rsp->hdr.status, rsp->hdr.additional_status);
8688 goto error;
8689 } else {
8690 for (i = 0; i < (num_rq_pairs * 2); i++) {
8691 qs[i]->id = i + rsp->q_id;
8692 if ((qs[i]->id & 1) == 0) {
8693 qs[i]->u.flag.is_hdr = TRUE;
8694 } else {
8695 qs[i]->u.flag.is_hdr = FALSE;
8696 }
8697 qs[i]->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off;
8698 qs[i]->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset;
8699 }
8700 }
8701
8702 ocs_dma_free(sli4->os, &dma);
8703
8704 return 0;
8705
8706 error:
8707 for (i = 0; i < (num_rq_pairs * 2); i++) {
8708 if (qs[i]->dma.size) {
8709 ocs_dma_free(sli4->os, &qs[i]->dma);
8710 }
8711 }
8712
8713 if (dma.size) {
8714 ocs_dma_free(sli4->os, &dma);
8715 }
8716
8717 return -1;
8718 }
8719
8720 /**
8721 * @ingroup sli_fc
8722 * @brief Get the RPI resource requirements.
8723 *
8724 * @param sli4 SLI context.
8725 * @param n_rpi Number of RPIs desired.
8726 *
8727 * @return Returns the number of bytes needed. This value may be zero.
8728 */
8729 uint32_t
8730 sli_fc_get_rpi_requirements(sli4_t *sli4, uint32_t n_rpi)
8731 {
8732 uint32_t bytes = 0;
8733
8734 /* Check if header templates needed */
8735 if (sli4->config.hdr_template_req) {
8736 /* round up to a page */
8737 bytes = SLI_ROUND_PAGE(n_rpi * SLI4_FCOE_HDR_TEMPLATE_SIZE);
8738 }
8739
8740 return bytes;
8741 }
8742
8743 /**
8744 * @ingroup sli_fc
8745 * @brief Return a text string corresponding to a CQE status value
8746 *
8747 * @param status Status value
8748 *
8749 * @return Returns corresponding string, otherwise "unknown"
8750 */
8751 const char *
8752 sli_fc_get_status_string(uint32_t status)
8753 {
8754 static struct {
8755 uint32_t code;
8756 const char *label;
8757 } lookup[] = {
8758 {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"},
8759 {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"},
8760 {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"},
8761 {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"},
8762 {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"},
8763 {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"},
8764 {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"},
8765 {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"},
8766 {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"},
8767 {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"},
8768 {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"},
8769 {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"},
8770 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, "RQ_INSUFF_BUF_NEEDED"},
8771 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"},
8772 {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"},
8773 {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"},
8774 {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"},
8775 {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"},
8776 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, "RQ_INSUFF_XRI_NEEDED"},
8777 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"},
8778 {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"},
8779 {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"},
8780 };
8781 uint32_t i;
8782
8783 for (i = 0; i < ARRAY_SIZE(lookup); i++) {
8784 if (status == lookup[i].code) {
8785 return lookup[i].label;
8786 }
8787 }
8788 return "unknown";
8789 }
Cache object: c062fa847ad03c89d2945c71ae2bf49b
|