1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * Copyright 1994-2009 The FreeBSD Project.
9 * All rights reserved.
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
25 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies,either expressed or implied, of the FreeBSD Project.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_mfi.h"
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/bus.h>
44 #include <sys/conf.h>
45 #include <sys/bio.h>
46 #include <sys/ioccom.h>
47 #include <sys/eventhandler.h>
48 #include <sys/callout.h>
49 #include <sys/uio.h>
50 #include <machine/bus.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54
55 #include <dev/mfi/mfireg.h>
56 #include <dev/mfi/mfi_ioctl.h>
57 #include <dev/mfi/mfivar.h>
58
59 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *);
60 union mfi_mpi2_request_descriptor *
61 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
62 void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
63 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
64 struct mfi_cmd_tbolt *cmd);
65 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
66 *sc, struct mfi_command *cmd);
67 uint8_t
68 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
69 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
70 *sc, struct mfi_command *mfi_cmd);
71 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
72 struct mfi_cmd_tbolt *cmd);
73 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
74 *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
75 void
76 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
77 uint8_t ext_status);
78 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
79 static void mfi_kill_hba (struct mfi_softc *sc);
80 static void mfi_process_fw_state_chg_isr(void *arg);
81 static void mfi_sync_map_complete(struct mfi_command *);
82 static void mfi_queue_map_sync(struct mfi_softc *sc);
83
84 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
85
86 extern int mfi_polled_cmd_timeout;
87 static int mfi_fw_reset_test = 0;
88 #ifdef MFI_DEBUG
89 SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test,
90 0, "Force a firmware reset condition");
91 #endif
92
93 void
94 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
95 {
96 MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
97 MFI_READ4(sc, MFI_OMSK);
98 }
99
100 void
101 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
102 {
103 MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
104 MFI_READ4(sc, MFI_OMSK);
105 }
106
107 int32_t
108 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
109 {
110 return MFI_READ4(sc, MFI_OSP0);
111 }
112
113 int32_t
114 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
115 {
116 int32_t status, mfi_status = 0;
117
118 status = MFI_READ4(sc, MFI_OSTS);
119
120 if (status & 1) {
121 MFI_WRITE4(sc, MFI_OSTS, status);
122 MFI_READ4(sc, MFI_OSTS);
123 if (status & MFI_STATE_CHANGE_INTERRUPT) {
124 mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
125 }
126
127 return mfi_status;
128 }
129 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
130 return 1;
131
132 MFI_READ4(sc, MFI_OSTS);
133 return 0;
134 }
135
136 void
137 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
138 uint32_t frame_cnt)
139 {
140 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
141 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
142 MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
143 MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
144 }
145
146 /*
147 * mfi_tbolt_adp_reset - For controller reset
148 * @regs: MFI register set
149 */
150 int
151 mfi_tbolt_adp_reset(struct mfi_softc *sc)
152 {
153 int retry = 0, i = 0;
154 int HostDiag;
155
156 MFI_WRITE4(sc, MFI_WSR, 0xF);
157 MFI_WRITE4(sc, MFI_WSR, 4);
158 MFI_WRITE4(sc, MFI_WSR, 0xB);
159 MFI_WRITE4(sc, MFI_WSR, 2);
160 MFI_WRITE4(sc, MFI_WSR, 7);
161 MFI_WRITE4(sc, MFI_WSR, 0xD);
162
163 for (i = 0; i < 10000; i++) ;
164
165 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
166
167 while (!( HostDiag & DIAG_WRITE_ENABLE)) {
168 for (i = 0; i < 1000; i++);
169 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
170 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
171 "hostdiag=%#x\n", retry, HostDiag);
172
173 if (retry++ >= 100)
174 return 1;
175 }
176
177 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag);
178
179 MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
180
181 for (i=0; i < 10; i++) {
182 for (i = 0; i < 10000; i++);
183 }
184
185 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
186 while (HostDiag & DIAG_RESET_ADAPTER) {
187 for (i = 0; i < 1000; i++) ;
188 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
189 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
190 "hostdiag=%#x\n", retry, HostDiag);
191
192 if (retry++ >= 1000)
193 return 1;
194 }
195 return 0;
196 }
197
198 /*
199 * This routine initialize Thunderbolt specific device information
200 */
201 void
202 mfi_tbolt_init_globals(struct mfi_softc *sc)
203 {
204 /* Initialize single reply size and Message size */
205 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
206 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
207
208 /*
209 * Calculating how many SGEs allowed in a allocated main message
210 * (size of the Message - Raid SCSI IO message size(except SGE))
211 * / size of SGE
212 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
213 */
214 sc->max_SGEs_in_main_message =
215 (uint8_t)((sc->raid_io_msg_size
216 - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
217 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
218 /*
219 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
220 * / size of SGL ;
221 * (1280 - 256) / 16 = 64
222 */
223 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
224 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
225 /*
226 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command
227 * colscing
228 */
229 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
230 + sc->max_SGEs_in_chain_message - 1;
231 /*
232 * This is the offset in number of 4 * 32bit words to the next chain
233 * (0x100 - 0x10)/0x10 = 0xF(15)
234 */
235 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
236 - sizeof(MPI2_SGE_IO_UNION))/16;
237 sc->chain_offset_value_for_mpt_ptmsg
238 = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
239 sc->mfi_cmd_pool_tbolt = NULL;
240 sc->request_desc_pool = NULL;
241 }
242
243 /*
244 * This function calculates the memory requirement for Thunderbolt
245 * controller, returns the total required memory in bytes
246 */
247
248 uint32_t
249 mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
250 {
251 uint32_t size;
252 size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; /* for Alignment */
253 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
254 size += sc->reply_size * sc->mfi_max_fw_cmds;
255 /* this is for SGL's */
256 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
257 return size;
258 }
259
260 /*
261 * Description:
262 * This function will prepare message pools for the Thunderbolt controller
263 * Arguments:
264 * DevExt - HBA miniport driver's adapter data storage structure
265 * pMemLocation - start of the memory allocated for Thunderbolt.
266 * Return Value:
267 * TRUE if successful
268 * FALSE if failed
269 */
270 int
271 mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
272 uint32_t tbolt_contg_length)
273 {
274 uint32_t offset = 0;
275 uint8_t *addr = mem_location;
276
277 /* Request Descriptor Base physical Address */
278
279 /* For Request Decriptors Virtual Memory */
280 /* Initialise the aligned IO Frames Virtual Memory Pointer */
281 if (((uintptr_t)addr) & (0xFF)) {
282 addr = &addr[sc->raid_io_msg_size];
283 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
284 sc->request_message_pool_align = addr;
285 } else
286 sc->request_message_pool_align = addr;
287
288 offset = sc->request_message_pool_align - sc->request_message_pool;
289 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
290
291 /* DJA XXX should this be bus dma ??? */
292 /* Skip request message pool */
293 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
294 /* Reply Frame Pool is initialized */
295 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
296 if (((uintptr_t)addr) & (0xFF)) {
297 addr = &addr[sc->reply_size];
298 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
299 }
300 sc->reply_frame_pool_align
301 = (struct mfi_mpi2_reply_header *)addr;
302
303 offset = (uintptr_t)sc->reply_frame_pool_align
304 - (uintptr_t)sc->request_message_pool;
305 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
306
307 /* Skip Reply Frame Pool */
308 addr += sc->reply_size * sc->mfi_max_fw_cmds;
309 sc->reply_pool_limit = addr;
310
311 /* initializing reply address to 0xFFFFFFFF */
312 memset((uint8_t *)sc->reply_frame_pool, 0xFF,
313 (sc->reply_size * sc->mfi_max_fw_cmds));
314
315 offset = sc->reply_size * sc->mfi_max_fw_cmds;
316 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
317 /* initialize the last_reply_idx to 0 */
318 sc->last_reply_idx = 0;
319 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
320 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
321 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
322 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
323 if (offset > tbolt_contg_length)
324 device_printf(sc->mfi_dev, "Error:Initialized more than "
325 "allocated\n");
326 return 0;
327 }
328
329 /*
330 * This routine prepare and issue INIT2 frame to the Firmware
331 */
332
333 int
334 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
335 {
336 struct MPI2_IOC_INIT_REQUEST *mpi2IocInit;
337 struct mfi_init_frame *mfi_init;
338 uintptr_t offset = 0;
339 bus_addr_t phyAddress;
340 MFI_ADDRESS *mfiAddressTemp;
341 struct mfi_command *cm, cmd_tmp;
342 int error;
343
344 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
345
346 /* Check if initialization is already completed */
347 if (sc->MFA_enabled) {
348 device_printf(sc->mfi_dev, "tbolt_init already initialised!\n");
349 return 1;
350 }
351
352 if ((cm = mfi_dequeue_free(sc)) == NULL) {
353 device_printf(sc->mfi_dev, "tbolt_init failed to get command "
354 " entry!\n");
355 return (EBUSY);
356 }
357
358 cmd_tmp.cm_frame = cm->cm_frame;
359 cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr;
360 cmd_tmp.cm_dmamap = cm->cm_dmamap;
361
362 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
363 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
364 cm->cm_dmamap = sc->mfi_tb_init_dmamap;
365 cm->cm_frame->header.context = 0;
366
367 /*
368 * Abuse the SG list area of the frame to hold the init_qinfo
369 * object;
370 */
371 mfi_init = &cm->cm_frame->init;
372
373 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
374 bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
375 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
376 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
377
378 /* set MsgVersion and HeaderVersion host driver was built with */
379 mpi2IocInit->MsgVersion = MPI2_VERSION;
380 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
381 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
382 mpi2IocInit->ReplyDescriptorPostQueueDepth
383 = (uint16_t)sc->mfi_max_fw_cmds;
384 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
385
386 /* Get physical address of reply frame pool */
387 offset = (uintptr_t) sc->reply_frame_pool_align
388 - (uintptr_t)sc->request_message_pool;
389 phyAddress = sc->mfi_tb_busaddr + offset;
390 mfiAddressTemp =
391 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
392 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
393 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
394
395 /* Get physical address of request message pool */
396 offset = sc->request_message_pool_align - sc->request_message_pool;
397 phyAddress = sc->mfi_tb_busaddr + offset;
398 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
399 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
400 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
401 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
402 mpi2IocInit->TimeStamp = time_uptime;
403
404 if (sc->verbuf) {
405 snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
406 MEGASAS_VERSION);
407 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
408 mfi_init->driver_ver_hi =
409 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
410 }
411 /* Get the physical address of the mpi2 ioc init command */
412 phyAddress = sc->mfi_tb_ioc_init_busaddr;
413 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
414 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
415 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
416
417 mfi_init->header.cmd = MFI_CMD_INIT;
418 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
419 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
420
421 cm->cm_data = NULL;
422 cm->cm_flags |= MFI_CMD_POLLED;
423 cm->cm_timestamp = time_uptime;
424 if ((error = mfi_mapcmd(sc, cm)) != 0) {
425 device_printf(sc->mfi_dev, "failed to send IOC init2 "
426 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
427 goto out;
428 }
429
430 if (mfi_init->header.cmd_status == MFI_STAT_OK) {
431 sc->MFA_enabled = 1;
432 } else {
433 device_printf(sc->mfi_dev, "Init command Failed %#x\n",
434 mfi_init->header.cmd_status);
435 error = mfi_init->header.cmd_status;
436 goto out;
437 }
438
439 out:
440 cm->cm_frame = cmd_tmp.cm_frame;
441 cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr;
442 cm->cm_dmamap = cmd_tmp.cm_dmamap;
443 mfi_release_command(cm);
444
445 return (error);
446
447 }
448
449 int
450 mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
451 {
452 struct mfi_cmd_tbolt *cmd;
453 bus_addr_t io_req_base_phys;
454 uint8_t *io_req_base;
455 int i = 0, j = 0, offset = 0;
456
457 /*
458 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
459 * Allocate the dynamic array first and then allocate individual
460 * commands.
461 */
462 sc->request_desc_pool = malloc(sizeof(
463 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
464 M_MFIBUF, M_NOWAIT|M_ZERO);
465
466 if (sc->request_desc_pool == NULL) {
467 device_printf(sc->mfi_dev, "Could not alloc "
468 "memory for request_desc_pool\n");
469 return (ENOMEM);
470 }
471
472 sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
473 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
474
475 if (sc->mfi_cmd_pool_tbolt == NULL) {
476 free(sc->request_desc_pool, M_MFIBUF);
477 device_printf(sc->mfi_dev, "Could not alloc "
478 "memory for cmd_pool_tbolt\n");
479 return (ENOMEM);
480 }
481
482 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
483 sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
484 struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
485
486 if (!sc->mfi_cmd_pool_tbolt[i]) {
487 device_printf(sc->mfi_dev, "Could not alloc "
488 "cmd_pool_tbolt entry\n");
489
490 for (j = 0; j < i; j++)
491 free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
492
493 free(sc->request_desc_pool, M_MFIBUF);
494 sc->request_desc_pool = NULL;
495 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
496 sc->mfi_cmd_pool_tbolt = NULL;
497
498 return (ENOMEM);
499 }
500 }
501
502 /*
503 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
504 * list
505 */
506 io_req_base = sc->request_message_pool_align
507 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
508 io_req_base_phys = sc->request_msg_busaddr
509 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
510
511 /*
512 * Add all the commands to command pool (instance->cmd_pool)
513 */
514 /* SMID 0 is reserved. Set SMID/index from 1 */
515
516 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
517 cmd = sc->mfi_cmd_pool_tbolt[i];
518 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
519 cmd->index = i + 1;
520 cmd->request_desc = (union mfi_mpi2_request_descriptor *)
521 (sc->request_desc_pool + i);
522 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
523 (io_req_base + offset);
524 cmd->io_request_phys_addr = io_req_base_phys + offset;
525 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
526 + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
527 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
528 * MEGASAS_MAX_SZ_CHAIN_FRAME;
529 cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
530
531 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
532 }
533 return 0;
534 }
535
536 int
537 mfi_tbolt_reset(struct mfi_softc *sc)
538 {
539 uint32_t fw_state;
540
541 mtx_lock(&sc->mfi_io_lock);
542 if (sc->hw_crit_error) {
543 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
544 mtx_unlock(&sc->mfi_io_lock);
545 return 1;
546 }
547
548 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
549 fw_state = sc->mfi_read_fw_status(sc);
550 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT ||
551 mfi_fw_reset_test) {
552 if ((sc->disableOnlineCtrlReset == 0)
553 && (sc->adpreset == 0)) {
554 device_printf(sc->mfi_dev, "Adapter RESET "
555 "condition is detected\n");
556 sc->adpreset = 1;
557 sc->issuepend_done = 0;
558 sc->MFA_enabled = 0;
559 sc->last_reply_idx = 0;
560 mfi_process_fw_state_chg_isr((void *) sc);
561 }
562 mtx_unlock(&sc->mfi_io_lock);
563 return 0;
564 }
565 }
566 mtx_unlock(&sc->mfi_io_lock);
567 return 1;
568 }
569
570 /*
571 * mfi_intr_tbolt - isr entry point
572 */
573 void
574 mfi_intr_tbolt(void *arg)
575 {
576 struct mfi_softc *sc = (struct mfi_softc *)arg;
577
578 if (sc->mfi_check_clear_intr(sc) == 1) {
579 return;
580 }
581 if (sc->mfi_detaching)
582 return;
583 mtx_lock(&sc->mfi_io_lock);
584 mfi_tbolt_complete_cmd(sc);
585 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
586 mfi_startio(sc);
587 mtx_unlock(&sc->mfi_io_lock);
588 return;
589 }
590
591 /*
592 * map_cmd_status - Maps FW cmd status to OS cmd status
593 * @cmd : Pointer to cmd
594 * @status : status of cmd returned by FW
595 * @ext_status : ext status of cmd returned by FW
596 */
597
598 void
599 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
600 uint8_t ext_status)
601 {
602 switch (status) {
603 case MFI_STAT_OK:
604 mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK;
605 mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK;
606 mfi_cmd->cm_error = MFI_STAT_OK;
607 break;
608
609 case MFI_STAT_SCSI_IO_FAILED:
610 case MFI_STAT_LD_INIT_IN_PROGRESS:
611 mfi_cmd->cm_frame->header.cmd_status = status;
612 mfi_cmd->cm_frame->header.scsi_status = ext_status;
613 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
614 mfi_cmd->cm_frame->dcmd.header.scsi_status
615 = ext_status;
616 break;
617
618 case MFI_STAT_SCSI_DONE_WITH_ERROR:
619 mfi_cmd->cm_frame->header.cmd_status = ext_status;
620 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
621 break;
622
623 case MFI_STAT_LD_OFFLINE:
624 case MFI_STAT_DEVICE_NOT_FOUND:
625 mfi_cmd->cm_frame->header.cmd_status = status;
626 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
627 break;
628
629 default:
630 mfi_cmd->cm_frame->header.cmd_status = status;
631 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
632 break;
633 }
634 }
635
636 /*
637 * mfi_tbolt_return_cmd - Return a cmd to free command pool
638 * @instance: Adapter soft state
639 * @tbolt_cmd: Tbolt command packet to be returned to free command pool
640 * @mfi_cmd: Oning MFI command packe
641 */
642 void
643 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *tbolt_cmd,
644 struct mfi_command *mfi_cmd)
645 {
646 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
647
648 mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT;
649 mfi_cmd->cm_extra_frames = 0;
650 tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
651
652 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next);
653 }
654
655 void
656 mfi_tbolt_complete_cmd(struct mfi_softc *sc)
657 {
658 struct mfi_mpi2_reply_header *desc, *reply_desc;
659 struct mfi_command *cmd_mfi; /* For MFA Cmds */
660 struct mfi_cmd_tbolt *cmd_tbolt;
661 uint16_t smid;
662 uint8_t reply_descript_type;
663 uint32_t status, extStatus;
664 uint16_t num_completed;
665 union desc_value val;
666 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
667
668 desc = (struct mfi_mpi2_reply_header *)
669 ((uintptr_t)sc->reply_frame_pool_align
670 + sc->last_reply_idx * sc->reply_size);
671 reply_desc = desc;
672
673 if (reply_desc == NULL) {
674 device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
675 return;
676 }
677
678 reply_descript_type = reply_desc->ReplyFlags
679 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
680 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
681 return;
682
683 num_completed = 0;
684 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
685
686 /* Read Reply descriptor */
687 while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
688 smid = reply_desc->SMID;
689 if (smid == 0 || smid > sc->mfi_max_fw_cmds) {
690 device_printf(sc->mfi_dev, "smid is %d cannot "
691 "proceed - skipping\n", smid);
692 goto next;
693 }
694 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
695 if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) {
696 device_printf(sc->mfi_dev, "cmd_tbolt %p "
697 "has invalid sync_cmd_idx=%d - skipping\n",
698 cmd_tbolt, cmd_tbolt->sync_cmd_idx);
699 goto next;
700 }
701 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
702
703 status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
704 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
705 map_tbolt_cmd_status(cmd_mfi, status, extStatus);
706
707 /* mfi_tbolt_return_cmd is handled by mfi complete / return */
708 if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 &&
709 (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) {
710 /* polled LD/SYSPD IO command */
711 /* XXX mark okay for now DJA */
712 cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK;
713
714 } else {
715 /* remove command from busy queue if not polled */
716 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
717 mfi_remove_busy(cmd_mfi);
718
719 /* complete the command */
720 mfi_complete(sc, cmd_mfi);
721 }
722
723 next:
724 sc->last_reply_idx++;
725 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
726 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
727 sc->last_reply_idx = 0;
728 }
729
730 /* Set it back to all 0xfff */
731 ((union mfi_mpi2_reply_descriptor*)desc)->words =
732 ~((uint64_t)0x00);
733
734 num_completed++;
735
736 /* Get the next reply descriptor */
737 desc = (struct mfi_mpi2_reply_header *)
738 ((uintptr_t)sc->reply_frame_pool_align
739 + sc->last_reply_idx * sc->reply_size);
740 reply_desc = desc;
741 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
742 reply_descript_type = reply_desc->ReplyFlags
743 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
744 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
745 break;
746 }
747
748 if (!num_completed)
749 return;
750
751 /* update replyIndex to FW */
752 if (sc->last_reply_idx)
753 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
754
755 return;
756 }
757
758 /*
759 * mfi_get_cmd - Get a command from the free pool
760 * @instance: Adapter soft state
761 *
762 * Returns a free command from the pool
763 */
764
765 struct mfi_cmd_tbolt *
766 mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
767 {
768 struct mfi_cmd_tbolt *cmd = NULL;
769
770 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
771
772 if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL)
773 return (NULL);
774 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
775 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
776 memset((uint8_t *)cmd->io_request, 0,
777 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
778
779 cmd->sync_cmd_idx = mfi_cmd->cm_index;
780 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
781 mfi_cmd->cm_flags |= MFI_CMD_TBOLT;
782
783 return cmd;
784 }
785
786 union mfi_mpi2_request_descriptor *
787 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
788 {
789 uint8_t *p;
790
791 if (index >= sc->mfi_max_fw_cmds) {
792 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
793 "for descriptor\n", index);
794 return NULL;
795 }
796 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
797 * index;
798 memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
799 return (union mfi_mpi2_request_descriptor *)p;
800 }
801
802 /* Used to build IOCTL cmd */
803 uint8_t
804 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
805 {
806 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
807 struct mfi_mpi2_request_raid_scsi_io *io_req;
808 struct mfi_cmd_tbolt *cmd;
809
810 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
811 if (!cmd)
812 return EBUSY;
813 io_req = cmd->io_request;
814 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
815
816 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
817 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
818 SGL) / 4;
819 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
820
821 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
822
823 /*
824 In MFI pass thru, nextChainOffset will always be zero to
825 indicate the end of the chain.
826 */
827 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
828 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
829
830 /* setting the length to the maximum length */
831 mpi25_ieee_chain->Length = 1024;
832
833 return 0;
834 }
835
836 void
837 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
838 struct mfi_cmd_tbolt *cmd)
839 {
840 uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
841 struct mfi_mpi2_request_raid_scsi_io *io_request;
842 struct IO_REQUEST_INFO io_info;
843
844 device_id = mfi_cmd->cm_frame->io.header.target_id;
845 io_request = cmd->io_request;
846 io_request->RaidContext.TargetID = device_id;
847 io_request->RaidContext.Status = 0;
848 io_request->RaidContext.exStatus = 0;
849 io_request->RaidContext.regLockFlags = 0;
850
851 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
852 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
853
854 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
855 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
856 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
857 io_info.ldTgtId = device_id;
858 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
859 MFI_FRAME_DIR_READ)
860 io_info.isRead = 1;
861
862 io_request->RaidContext.timeoutValue
863 = MFI_FUSION_FP_DEFAULT_TIMEOUT;
864 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
865 io_request->DevHandle = device_id;
866 cmd->request_desc->header.RequestFlags
867 = (MFI_REQ_DESCRIPT_FLAGS_LD_IO
868 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
869 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
870 io_request->RaidContext.RegLockLength = 0x100;
871 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
872 * MFI_SECTOR_LEN;
873 }
874
875 int
876 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
877 struct mfi_cmd_tbolt *cmd)
878 {
879 struct mfi_mpi2_request_raid_scsi_io *io_request;
880 uint32_t sge_count;
881 uint8_t cdb_len;
882 int readop;
883 u_int64_t lba;
884
885 io_request = cmd->io_request;
886 if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
887 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE))
888 return 1;
889
890 mfi_tbolt_build_ldio(sc, mfi_cmd, cmd);
891
892 /* Convert to SCSI command CDB */
893 bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32));
894 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
895 readop = 0;
896 else
897 readop = 1;
898
899 lba = mfi_cmd->cm_frame->io.lba_hi;
900 lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo;
901 cdb_len = mfi_build_cdb(readop, 0, lba,
902 mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32);
903
904 /* Just the CDB length, rest of the Flags are zero */
905 io_request->IoFlags = cdb_len;
906
907 /*
908 * Construct SGL
909 */
910 sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
911 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
912 if (sge_count > sc->mfi_max_sge) {
913 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
914 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
915 return 1;
916 }
917 io_request->RaidContext.numSGE = sge_count;
918 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
919
920 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
921 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
922 else
923 io_request->Control = MPI2_SCSIIO_CONTROL_READ;
924
925 io_request->SGLOffset0 = offsetof(
926 struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
927
928 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
929 io_request->SenseBufferLength = MFI_SENSE_LEN;
930 io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS;
931 io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS;
932
933 return 0;
934 }
935
936 static int
937 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
938 pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
939 {
940 uint8_t i, sg_processed;
941 uint8_t sge_count, sge_idx;
942 union mfi_sgl *os_sgl;
943 pMpi25IeeeSgeChain64_t sgl_end;
944
945 /*
946 * Return 0 if there is no data transfer
947 */
948 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
949 device_printf(sc->mfi_dev, "Buffer empty \n");
950 return 0;
951 }
952 os_sgl = mfi_cmd->cm_sg;
953 sge_count = mfi_cmd->cm_frame->header.sg_count;
954
955 if (sge_count > sc->mfi_max_sge) {
956 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
957 os_sgl, sge_count);
958 return sge_count;
959 }
960
961 if (sge_count > sc->max_SGEs_in_main_message)
962 /* One element to store the chain info */
963 sge_idx = sc->max_SGEs_in_main_message - 1;
964 else
965 sge_idx = sge_count;
966
967 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) {
968 sgl_end = sgl_ptr + (sc->max_SGEs_in_main_message - 1);
969 sgl_end->Flags = 0;
970 }
971
972 for (i = 0; i < sge_idx; i++) {
973 /*
974 * For 32bit BSD we are getting 32 bit SGL's from OS
975 * but FW only take 64 bit SGL's so copying from 32 bit
976 * SGL's to 64.
977 */
978 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
979 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
980 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
981 } else {
982 sgl_ptr->Length = os_sgl->sg32[i].len;
983 sgl_ptr->Address = os_sgl->sg32[i].addr;
984 }
985 if (i == sge_count - 1 &&
986 (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
987 sgl_ptr->Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
988 else
989 sgl_ptr->Flags = 0;
990 sgl_ptr++;
991 cmd->io_request->ChainOffset = 0;
992 }
993
994 sg_processed = i;
995
996 if (sg_processed < sge_count) {
997 pMpi25IeeeSgeChain64_t sg_chain;
998
999 cmd->io_request->ChainOffset =
1000 sc->chain_offset_value_for_main_message;
1001 sg_chain = sgl_ptr;
1002 /* Prepare chain element */
1003 sg_chain->NextChainOffset = 0;
1004 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))
1005 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1006 else
1007 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1008 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
1009 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) *
1010 (sge_count - sg_processed));
1011 sg_chain->Address = cmd->sg_frame_phys_addr;
1012 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1013 for (; i < sge_count; i++) {
1014 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1015 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1016 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1017 } else {
1018 sgl_ptr->Length = os_sgl->sg32[i].len;
1019 sgl_ptr->Address = os_sgl->sg32[i].addr;
1020 }
1021 if (i == sge_count - 1 &&
1022 (sc->mfi_flags &
1023 (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
1024 sgl_ptr->Flags =
1025 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1026 else
1027 sgl_ptr->Flags = 0;
1028 sgl_ptr++;
1029 }
1030 }
1031 return sge_count;
1032 }
1033
1034 union mfi_mpi2_request_descriptor *
1035 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1036 {
1037 struct mfi_cmd_tbolt *cmd;
1038 union mfi_mpi2_request_descriptor *req_desc = NULL;
1039 uint16_t index;
1040 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
1041 if (cmd == NULL)
1042 return (NULL);
1043
1044 index = cmd->index;
1045 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1046 if (req_desc == NULL) {
1047 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1048 return (NULL);
1049 }
1050
1051 if (mfi_tbolt_build_io(sc, mfi_cmd, cmd) != 0) {
1052 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1053 return (NULL);
1054 }
1055 req_desc->header.SMID = index;
1056 return req_desc;
1057 }
1058
1059 union mfi_mpi2_request_descriptor *
1060 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1061 {
1062 union mfi_mpi2_request_descriptor *req_desc = NULL;
1063 uint16_t index;
1064 if (mfi_build_mpt_pass_thru(sc, cmd)) {
1065 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1066 "cmd\n");
1067 return NULL;
1068 }
1069 /* For fusion the frame_count variable is used for SMID */
1070 index = cmd->cm_extra_frames;
1071
1072 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1073 if (req_desc == NULL)
1074 return NULL;
1075
1076 bzero(req_desc, sizeof(*req_desc));
1077 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1078 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1079 req_desc->header.SMID = index;
1080 return req_desc;
1081 }
1082
1083 int
1084 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1085 {
1086 struct mfi_frame_header *hdr;
1087 union mfi_mpi2_request_descriptor *req_desc = NULL;
1088 int tm = mfi_polled_cmd_timeout * 1000;
1089
1090 hdr = &cm->cm_frame->header;
1091 if (sc->adpreset)
1092 return 1;
1093 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1094 cm->cm_timestamp = time_uptime;
1095 mfi_enqueue_busy(cm);
1096 } else { /* still get interrupts for it */
1097 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1098 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1099 }
1100
1101 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1102 /* check for inquiry commands coming from CLI */
1103 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1104 NULL) {
1105 device_printf(sc->mfi_dev, "Mapping from MFI "
1106 "to MPT Failed \n");
1107 return 1;
1108 }
1109 } else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1110 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1111 cm->cm_flags |= MFI_CMD_SCSI;
1112 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1113 device_printf(sc->mfi_dev, "LDIO Failed \n");
1114 return 1;
1115 }
1116 } else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1117 device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n");
1118 return (1);
1119 }
1120
1121 if (cm->cm_flags & MFI_CMD_SCSI) {
1122 /*
1123 * LD IO needs to be posted since it doesn't get
1124 * acknowledged via a status update so have the
1125 * controller reply via mfi_tbolt_complete_cmd.
1126 */
1127 hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1128 }
1129
1130 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1131 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1132
1133 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1134 return 0;
1135
1136 /*
1137 * This is a polled command, so busy-wait for it to complete.
1138 *
1139 * The value of hdr->cmd_status is updated directly by the hardware
1140 * so there is no guarantee that mfi_tbolt_complete_cmd is called
1141 * prior to this value changing.
1142 */
1143 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1144 DELAY(1000);
1145 tm -= 1;
1146 if (tm <= 0)
1147 break;
1148 if (cm->cm_flags & MFI_CMD_SCSI) {
1149 /*
1150 * Force check reply queue.
1151 * This ensures that dump works correctly
1152 */
1153 mfi_tbolt_complete_cmd(sc);
1154 }
1155 }
1156
1157 /* ensure the command cleanup has been processed before returning */
1158 mfi_tbolt_complete_cmd(sc);
1159
1160 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1161 device_printf(sc->mfi_dev, "Frame %p timed out "
1162 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1163 return (ETIMEDOUT);
1164 }
1165 return 0;
1166 }
1167
1168 static void
1169 mfi_issue_pending_cmds_again(struct mfi_softc *sc)
1170 {
1171 struct mfi_command *cm, *tmp;
1172 struct mfi_cmd_tbolt *cmd;
1173
1174 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1175 TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1176 cm->retry_for_fw_reset++;
1177
1178 /*
1179 * If a command has continuously been tried multiple times
1180 * and causing a FW reset condition, no further recoveries
1181 * should be performed on the controller
1182 */
1183 if (cm->retry_for_fw_reset == 3) {
1184 device_printf(sc->mfi_dev, "megaraid_sas: command %p "
1185 "index=%d was tried multiple times during adapter "
1186 "reset - Shutting down the HBA\n", cm, cm->cm_index);
1187 mfi_kill_hba(sc);
1188 sc->hw_crit_error = 1;
1189 return;
1190 }
1191
1192 mfi_remove_busy(cm);
1193 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
1194 if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <=
1195 sc->mfi_max_fw_cmds) {
1196 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1];
1197 mfi_tbolt_return_cmd(sc, cmd, cm);
1198 } else {
1199 device_printf(sc->mfi_dev,
1200 "Invalid extra_frames: %d detected\n",
1201 cm->cm_extra_frames);
1202 }
1203 }
1204
1205 if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) {
1206 device_printf(sc->mfi_dev,
1207 "APJ ****requeue command %p index=%d\n",
1208 cm, cm->cm_index);
1209 mfi_requeue_ready(cm);
1210 } else
1211 mfi_release_command(cm);
1212 }
1213 mfi_startio(sc);
1214 }
1215
1216 static void
1217 mfi_kill_hba(struct mfi_softc *sc)
1218 {
1219 if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1220 MFI_WRITE4(sc, 0x00, MFI_STOP_ADP);
1221 else
1222 MFI_WRITE4(sc, MFI_IDB, MFI_STOP_ADP);
1223 }
1224
1225 static void
1226 mfi_process_fw_state_chg_isr(void *arg)
1227 {
1228 struct mfi_softc *sc= (struct mfi_softc *)arg;
1229 int error, status;
1230
1231 if (sc->adpreset == 1) {
1232 device_printf(sc->mfi_dev, "First stage of FW reset "
1233 "initiated...\n");
1234
1235 sc->mfi_adp_reset(sc);
1236 sc->mfi_enable_intr(sc);
1237
1238 device_printf(sc->mfi_dev, "First stage of reset complete, "
1239 "second stage initiated...\n");
1240
1241 sc->adpreset = 2;
1242
1243 /* waiting for about 20 second before start the second init */
1244 for (int wait = 0; wait < 20000; wait++)
1245 DELAY(1000);
1246 device_printf(sc->mfi_dev, "Second stage of FW reset "
1247 "initiated...\n");
1248 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1249
1250 sc->mfi_disable_intr(sc);
1251
1252 /* We expect the FW state to be READY */
1253 if (mfi_transition_firmware(sc)) {
1254 device_printf(sc->mfi_dev, "controller is not in "
1255 "ready state\n");
1256 mfi_kill_hba(sc);
1257 sc->hw_crit_error = 1;
1258 return;
1259 }
1260 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
1261 device_printf(sc->mfi_dev, "Failed to initialise MFI "
1262 "queue\n");
1263 mfi_kill_hba(sc);
1264 sc->hw_crit_error = 1;
1265 return;
1266 }
1267
1268 /* Init last reply index and max */
1269 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
1270 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
1271
1272 sc->mfi_enable_intr(sc);
1273 sc->adpreset = 0;
1274 if (sc->mfi_aen_cm != NULL) {
1275 free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1276 mfi_remove_busy(sc->mfi_aen_cm);
1277 mfi_release_command(sc->mfi_aen_cm);
1278 sc->mfi_aen_cm = NULL;
1279 }
1280
1281 if (sc->mfi_map_sync_cm != NULL) {
1282 mfi_remove_busy(sc->mfi_map_sync_cm);
1283 mfi_release_command(sc->mfi_map_sync_cm);
1284 sc->mfi_map_sync_cm = NULL;
1285 }
1286 mfi_issue_pending_cmds_again(sc);
1287
1288 /*
1289 * Issue pending command can result in adapter being marked
1290 * dead because of too many re-tries. Check for that
1291 * condition before clearing the reset condition on the FW
1292 */
1293 if (!sc->hw_crit_error) {
1294 /*
1295 * Initiate AEN (Asynchronous Event Notification) &
1296 * Sync Map
1297 */
1298 mfi_aen_setup(sc, sc->last_seq_num);
1299 mfi_tbolt_sync_map_info(sc);
1300
1301 sc->issuepend_done = 1;
1302 device_printf(sc->mfi_dev, "second stage of reset "
1303 "complete, FW is ready now.\n");
1304 } else {
1305 device_printf(sc->mfi_dev, "second stage of reset "
1306 "never completed, hba was marked offline.\n");
1307 }
1308 } else {
1309 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1310 "called with unhandled value:%d\n", sc->adpreset);
1311 }
1312 }
1313
1314 /*
1315 * The ThunderBolt HW has an option for the driver to directly
1316 * access the underlying disks and operate on the RAID. To
1317 * do this there needs to be a capability to keep the RAID controller
1318 * and driver in sync. The FreeBSD driver does not take advantage
1319 * of this feature since it adds a lot of complexity and slows down
1320 * performance. Performance is gained by using the controller's
1321 * cache etc.
1322 *
1323 * Even though this driver doesn't access the disks directly, an
1324 * AEN like command is used to inform the RAID firmware to "sync"
1325 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This
1326 * command in write mode will return when the RAID firmware has
1327 * detected a change to the RAID state. Examples of this type
1328 * of change are removing a disk. Once the command returns then
1329 * the driver needs to acknowledge this and "sync" all LD's again.
1330 * This repeats until we shutdown. Then we need to cancel this
1331 * pending command.
1332 *
1333 * If this is not done right the RAID firmware will not remove a
1334 * pulled drive and the RAID won't go degraded etc. Effectively,
1335 * stopping any RAID mangement to functions.
1336 *
1337 * Doing another LD sync, requires the use of an event since the
1338 * driver needs to do a mfi_wait_command and can't do that in an
1339 * interrupt thread.
1340 *
1341 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
1342 * That requires a bunch of structure and it is simpler to just do
1343 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
1344 */
1345
1346 void
1347 mfi_tbolt_sync_map_info(struct mfi_softc *sc)
1348 {
1349 int error = 0, i;
1350 struct mfi_command *cmd = NULL;
1351 struct mfi_dcmd_frame *dcmd = NULL;
1352 uint32_t context = 0;
1353 union mfi_ld_ref *ld_sync = NULL;
1354 size_t ld_size;
1355 struct mfi_frame_header *hdr;
1356 struct mfi_command *cm = NULL;
1357 struct mfi_ld_list *list = NULL;
1358
1359 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1360
1361 if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort)
1362 return;
1363
1364 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1365 (void **)&list, sizeof(*list));
1366 if (error)
1367 goto out;
1368
1369 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN;
1370
1371 if (mfi_wait_command(sc, cm) != 0) {
1372 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1373 goto out;
1374 }
1375
1376 hdr = &cm->cm_frame->header;
1377 if (hdr->cmd_status != MFI_STAT_OK) {
1378 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1379 hdr->cmd_status);
1380 goto out;
1381 }
1382
1383 ld_size = sizeof(*ld_sync) * list->ld_count;
1384 ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF,
1385 M_NOWAIT | M_ZERO);
1386 if (ld_sync == NULL) {
1387 device_printf(sc->mfi_dev, "Failed to allocate sync\n");
1388 goto out;
1389 }
1390 for (i = 0; i < list->ld_count; i++)
1391 ld_sync[i].ref = list->ld_list[i].ld.ref;
1392
1393 if ((cmd = mfi_dequeue_free(sc)) == NULL) {
1394 device_printf(sc->mfi_dev, "Failed to get command\n");
1395 free(ld_sync, M_MFIBUF);
1396 goto out;
1397 }
1398
1399 context = cmd->cm_frame->header.context;
1400 bzero(cmd->cm_frame, sizeof(union mfi_frame));
1401 cmd->cm_frame->header.context = context;
1402
1403 dcmd = &cmd->cm_frame->dcmd;
1404 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1405 dcmd->header.cmd = MFI_CMD_DCMD;
1406 dcmd->header.flags = MFI_FRAME_DIR_WRITE;
1407 dcmd->header.timeout = 0;
1408 dcmd->header.data_len = ld_size;
1409 dcmd->header.scsi_status = 0;
1410 dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO;
1411 cmd->cm_sg = &dcmd->sgl;
1412 cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1413 cmd->cm_data = ld_sync;
1414 cmd->cm_private = ld_sync;
1415
1416 cmd->cm_len = ld_size;
1417 cmd->cm_complete = mfi_sync_map_complete;
1418 sc->mfi_map_sync_cm = cmd;
1419
1420 cmd->cm_flags = MFI_CMD_DATAOUT;
1421 cmd->cm_frame->dcmd.mbox[0] = list->ld_count;
1422 cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
1423
1424 if ((error = mfi_mapcmd(sc, cmd)) != 0) {
1425 device_printf(sc->mfi_dev, "failed to send map sync\n");
1426 free(ld_sync, M_MFIBUF);
1427 sc->mfi_map_sync_cm = NULL;
1428 mfi_release_command(cmd);
1429 goto out;
1430 }
1431
1432 out:
1433 if (list)
1434 free(list, M_MFIBUF);
1435 if (cm)
1436 mfi_release_command(cm);
1437 }
1438
1439 static void
1440 mfi_sync_map_complete(struct mfi_command *cm)
1441 {
1442 struct mfi_frame_header *hdr;
1443 struct mfi_softc *sc;
1444 int aborted = 0;
1445
1446 sc = cm->cm_sc;
1447 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1448
1449 hdr = &cm->cm_frame->header;
1450
1451 if (sc->mfi_map_sync_cm == NULL)
1452 return;
1453
1454 if (sc->cm_map_abort ||
1455 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1456 sc->cm_map_abort = 0;
1457 aborted = 1;
1458 }
1459
1460 free(cm->cm_data, M_MFIBUF);
1461 wakeup(&sc->mfi_map_sync_cm);
1462 sc->mfi_map_sync_cm = NULL;
1463 mfi_release_command(cm);
1464
1465 /* set it up again so the driver can catch more events */
1466 if (!aborted)
1467 mfi_queue_map_sync(sc);
1468 }
1469
1470 static void
1471 mfi_queue_map_sync(struct mfi_softc *sc)
1472 {
1473 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1474 taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task);
1475 }
1476
1477 void
1478 mfi_handle_map_sync(void *context, int pending)
1479 {
1480 struct mfi_softc *sc;
1481
1482 sc = context;
1483 mtx_lock(&sc->mfi_io_lock);
1484 mfi_tbolt_sync_map_info(sc);
1485 mtx_unlock(&sc->mfi_io_lock);
1486 }
Cache object: da0d6fd048e3c0463e352ce826ee45b3
|