1 /*-
2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions
4 * are met:
5 *
6 * Copyright 1994-2009 The FreeBSD Project.
7 * All rights reserved.
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * The views and conclusions contained in the software and documentation
28 * are those of the authors and should not be interpreted as representing
29 * official policies,either expressed or implied, of the FreeBSD Project.
30 */
31
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/8.4/sys/dev/mfi/mfi_tbolt.c 243826 2012-12-03 18:47:25Z delphij $");
35
36 #include "opt_mfi.h"
37
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
42 #include <sys/bus.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/ioccom.h>
46 #include <sys/eventhandler.h>
47 #include <sys/callout.h>
48 #include <sys/uio.h>
49 #include <machine/bus.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
53
54 #include <dev/mfi/mfireg.h>
55 #include <dev/mfi/mfi_ioctl.h>
56 #include <dev/mfi/mfivar.h>
57
58 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc);
59 union mfi_mpi2_request_descriptor *
60 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
61 void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
62 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
63 struct mfi_cmd_tbolt *cmd);
64 static inline void mfi_tbolt_return_cmd(struct mfi_softc *sc,
65 struct mfi_cmd_tbolt *cmd);
66 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
67 *sc, struct mfi_command *cmd);
68 uint8_t
69 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
70 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
71 *sc, struct mfi_command *mfi_cmd);
72 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
73 struct mfi_cmd_tbolt *cmd);
74 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
75 *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
76 void
77 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
78 uint8_t ext_status);
79 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
80 static void mfi_kill_hba (struct mfi_softc *sc);
81 static void mfi_process_fw_state_chg_isr(void *arg);
82 static void mfi_sync_map_complete(struct mfi_command *);
83 static void mfi_queue_map_sync(struct mfi_softc *sc);
84
85 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
86
87 void
88 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
89 {
90 MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
91 MFI_READ4(sc, MFI_OMSK);
92 }
93
94 void
95 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
96 {
97 MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
98 MFI_READ4(sc, MFI_OMSK);
99 }
100
101 int32_t
102 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
103 {
104 return MFI_READ4(sc, MFI_OSP0);
105 }
106
107 int32_t
108 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
109 {
110 int32_t status, mfi_status = 0;
111
112 status = MFI_READ4(sc, MFI_OSTS);
113
114 if (status & 1) {
115 MFI_WRITE4(sc, MFI_OSTS, status);
116 MFI_READ4(sc, MFI_OSTS);
117 if (status & MFI_STATE_CHANGE_INTERRUPT) {
118 mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
119 }
120
121 return mfi_status;
122 }
123 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
124 return 1;
125
126 MFI_READ4(sc, MFI_OSTS);
127 return 0;
128 }
129
130
131 void
132 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
133 uint32_t frame_cnt)
134 {
135 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
136 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
137 MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
138 MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
139 }
140
141 /*
142 * mfi_tbolt_adp_reset - For controller reset
143 * @regs: MFI register set
144 */
145 int
146 mfi_tbolt_adp_reset(struct mfi_softc *sc)
147 {
148 int retry = 0, i = 0;
149 int HostDiag;
150
151 MFI_WRITE4(sc, MFI_WSR, 0xF);
152 MFI_WRITE4(sc, MFI_WSR, 4);
153 MFI_WRITE4(sc, MFI_WSR, 0xB);
154 MFI_WRITE4(sc, MFI_WSR, 2);
155 MFI_WRITE4(sc, MFI_WSR, 7);
156 MFI_WRITE4(sc, MFI_WSR, 0xD);
157
158 for (i = 0; i < 10000; i++) ;
159
160 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
161
162 while (!( HostDiag & DIAG_WRITE_ENABLE)) {
163 for (i = 0; i < 1000; i++);
164 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
165 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
166 "hostdiag=%x\n", retry, HostDiag);
167
168 if (retry++ >= 100)
169 return 1;
170 }
171
172 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%x\n", HostDiag);
173
174 MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
175
176 for (i=0; i < 10; i++) {
177 for (i = 0; i < 10000; i++);
178 }
179
180 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
181 while (HostDiag & DIAG_RESET_ADAPTER) {
182 for (i = 0; i < 1000; i++) ;
183 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
184 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
185 "hostdiag=%x\n", retry, HostDiag);
186
187 if (retry++ >= 1000)
188 return 1;
189 }
190 return 0;
191 }
192
193 /*
194 * This routine initialize Thunderbolt specific device information
195 */
196 void
197 mfi_tbolt_init_globals(struct mfi_softc *sc)
198 {
199 /* Initialize single reply size and Message size */
200 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
201 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
202
203 /*
204 * Calculating how many SGEs allowed in a allocated main message
205 * (size of the Message - Raid SCSI IO message size(except SGE))
206 * / size of SGE
207 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
208 */
209 sc->max_SGEs_in_main_message =
210 (uint8_t)((sc->raid_io_msg_size
211 - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
212 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
213 /*
214 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
215 * / size of SGL ;
216 * (1280 - 256) / 16 = 64
217 */
218 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
219 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
220 /*
221 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command
222 * colscing
223 */
224 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
225 + sc->max_SGEs_in_chain_message - 1;
226 /*
227 * This is the offset in number of 4 * 32bit words to the next chain
228 * (0x100 - 0x10)/0x10 = 0xF(15)
229 */
230 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
231 - sizeof(MPI2_SGE_IO_UNION))/16;
232 sc->chain_offset_value_for_mpt_ptmsg
233 = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
234 sc->mfi_cmd_pool_tbolt = NULL;
235 sc->request_desc_pool = NULL;
236 }
237
238 /*
239 * This function calculates the memory requirement for Thunderbolt
240 * controller, returns the total required memory in bytes
241 */
242
243 uint32_t
244 mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
245 {
246 uint32_t size;
247 size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; /* for Alignment */
248 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
249 size += sc->reply_size * sc->mfi_max_fw_cmds;
250 /* this is for SGL's */
251 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
252 return size;
253 }
254
255 /*
256 * Description:
257 * This function will prepare message pools for the Thunderbolt controller
258 * Arguments:
259 * DevExt - HBA miniport driver's adapter data storage structure
260 * pMemLocation - start of the memory allocated for Thunderbolt.
261 * Return Value:
262 * TRUE if successful
263 * FALSE if failed
264 */
265 int
266 mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
267 uint32_t tbolt_contg_length)
268 {
269 uint32_t offset = 0;
270 uint8_t *addr = mem_location;
271
272 /* Request Descriptor Base physical Address */
273
274 /* For Request Decriptors Virtual Memory */
275 /* Initialise the aligned IO Frames Virtual Memory Pointer */
276 if (((uintptr_t)addr) & (0xFF)) {
277 addr = &addr[sc->raid_io_msg_size];
278 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
279 sc->request_message_pool_align = addr;
280 } else
281 sc->request_message_pool_align = addr;
282
283 offset = sc->request_message_pool_align - sc->request_message_pool;
284 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
285
286 /* DJA XXX should this be bus dma ??? */
287 /* Skip request message pool */
288 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
289 /* Reply Frame Pool is initialized */
290 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
291 if (((uintptr_t)addr) & (0xFF)) {
292 addr = &addr[sc->reply_size];
293 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
294 }
295 sc->reply_frame_pool_align
296 = (struct mfi_mpi2_reply_header *)addr;
297
298 offset = (uintptr_t)sc->reply_frame_pool_align
299 - (uintptr_t)sc->request_message_pool;
300 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
301
302 /* Skip Reply Frame Pool */
303 addr += sc->reply_size * sc->mfi_max_fw_cmds;
304 sc->reply_pool_limit = addr;
305
306 /* initializing reply address to 0xFFFFFFFF */
307 memset((uint8_t *)sc->reply_frame_pool, 0xFF,
308 (sc->reply_size * sc->mfi_max_fw_cmds));
309
310 offset = sc->reply_size * sc->mfi_max_fw_cmds;
311 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
312 /* initialize the last_reply_idx to 0 */
313 sc->last_reply_idx = 0;
314 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
315 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
316 if (offset > tbolt_contg_length)
317 device_printf(sc->mfi_dev, "Error:Initialized more than "
318 "allocated\n");
319 return 0;
320 }
321
322 /*
323 * This routine prepare and issue INIT2 frame to the Firmware
324 */
325
326 int
327 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
328 {
329 struct MPI2_IOC_INIT_REQUEST *mpi2IocInit;
330 struct mfi_init_frame *mfi_init;
331 uintptr_t offset = 0;
332 bus_addr_t phyAddress;
333 MFI_ADDRESS *mfiAddressTemp;
334 struct mfi_command *cm;
335 int error;
336
337 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
338 /* Check if initialization is already completed */
339 if (sc->MFA_enabled) {
340 return 1;
341 }
342
343 mtx_lock(&sc->mfi_io_lock);
344 if ((cm = mfi_dequeue_free(sc)) == NULL) {
345 mtx_unlock(&sc->mfi_io_lock);
346 return (EBUSY);
347 }
348 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
349 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
350 cm->cm_dmamap = sc->mfi_tb_init_dmamap;
351 cm->cm_frame->header.context = 0;
352 cm->cm_sc = sc;
353 cm->cm_index = 0;
354
355 /*
356 * Abuse the SG list area of the frame to hold the init_qinfo
357 * object;
358 */
359 mfi_init = &cm->cm_frame->init;
360
361 bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
362 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
363 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
364
365 /* set MsgVersion and HeaderVersion host driver was built with */
366 mpi2IocInit->MsgVersion = MPI2_VERSION;
367 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
368 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
369 mpi2IocInit->ReplyDescriptorPostQueueDepth
370 = (uint16_t)sc->mfi_max_fw_cmds;
371 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
372
373 /* Get physical address of reply frame pool */
374 offset = (uintptr_t) sc->reply_frame_pool_align
375 - (uintptr_t)sc->request_message_pool;
376 phyAddress = sc->mfi_tb_busaddr + offset;
377 mfiAddressTemp =
378 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
379 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
380 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
381
382 /* Get physical address of request message pool */
383 offset = sc->request_message_pool_align - sc->request_message_pool;
384 phyAddress = sc->mfi_tb_busaddr + offset;
385 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
386 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
387 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
388 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
389 mpi2IocInit->TimeStamp = time_uptime;
390
391 if (sc->verbuf) {
392 snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
393 MEGASAS_VERSION);
394 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
395 mfi_init->driver_ver_hi =
396 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
397 }
398 /* Get the physical address of the mpi2 ioc init command */
399 phyAddress = sc->mfi_tb_ioc_init_busaddr;
400 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
401 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
402 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
403
404 mfi_init->header.cmd = MFI_CMD_INIT;
405 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
406 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
407
408 cm->cm_data = NULL;
409 cm->cm_flags |= MFI_CMD_POLLED;
410 cm->cm_timestamp = time_uptime;
411 if ((error = mfi_mapcmd(sc, cm)) != 0) {
412 device_printf(sc->mfi_dev, "failed to send IOC init2 "
413 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
414 mfi_release_command(cm);
415 mtx_unlock(&sc->mfi_io_lock);
416 return (error);
417 }
418 mfi_release_command(cm);
419 mtx_unlock(&sc->mfi_io_lock);
420
421 if (mfi_init->header.cmd_status == 0) {
422 sc->MFA_enabled = 1;
423 }
424 else {
425 device_printf(sc->mfi_dev, "Init command Failed %x\n",
426 mfi_init->header.cmd_status);
427 return 1;
428 }
429
430 return 0;
431
432 }
433
434 int
435 mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
436 {
437 struct mfi_cmd_tbolt *cmd;
438 bus_addr_t io_req_base_phys;
439 uint8_t *io_req_base;
440 int i = 0, j = 0, offset = 0;
441
442 /*
443 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
444 * Allocate the dynamic array first and then allocate individual
445 * commands.
446 */
447 sc->request_desc_pool = malloc(sizeof(
448 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
449 M_MFIBUF, M_NOWAIT|M_ZERO);
450 sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
451 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
452
453 if (!sc->mfi_cmd_pool_tbolt) {
454 device_printf(sc->mfi_dev, "out of memory. Could not alloc "
455 "memory for cmd_list_fusion\n");
456 return 1;
457 }
458
459 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
460 sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
461 struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
462
463 if (!sc->mfi_cmd_pool_tbolt[i]) {
464 device_printf(sc->mfi_dev, "Could not alloc cmd list "
465 "fusion\n");
466
467 for (j = 0; j < i; j++)
468 free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
469
470 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
471 sc->mfi_cmd_pool_tbolt = NULL;
472 }
473 }
474
475 /*
476 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
477 *list
478 */
479 io_req_base = sc->request_message_pool_align
480 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
481 io_req_base_phys = sc->request_msg_busaddr
482 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
483
484 /*
485 * Add all the commands to command pool (instance->cmd_pool)
486 */
487 /* SMID 0 is reserved. Set SMID/index from 1 */
488
489 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
490 cmd = sc->mfi_cmd_pool_tbolt[i];
491 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
492 cmd->index = i + 1;
493 cmd->request_desc = (union mfi_mpi2_request_descriptor *)
494 (sc->request_desc_pool + i);
495 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
496 (io_req_base + offset);
497 cmd->io_request_phys_addr = io_req_base_phys + offset;
498 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
499 + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
500 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
501 * MEGASAS_MAX_SZ_CHAIN_FRAME;
502 cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
503
504 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
505 }
506 return 0;
507 }
508
509 int
510 mfi_tbolt_reset(struct mfi_softc *sc)
511 {
512 uint32_t fw_state;
513
514 mtx_lock(&sc->mfi_io_lock);
515 if (sc->hw_crit_error) {
516 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
517 mtx_unlock(&sc->mfi_io_lock);
518 return 1;
519 }
520
521 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
522 fw_state = sc->mfi_read_fw_status(sc);
523 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT) {
524 if ((sc->disableOnlineCtrlReset == 0)
525 && (sc->adpreset == 0)) {
526 device_printf(sc->mfi_dev, "Adapter RESET "
527 "condition is detected\n");
528 sc->adpreset = 1;
529 sc->issuepend_done = 0;
530 sc->MFA_enabled = 0;
531 sc->last_reply_idx = 0;
532 mfi_process_fw_state_chg_isr((void *) sc);
533 }
534 mtx_unlock(&sc->mfi_io_lock);
535 return 0;
536 }
537 }
538 mtx_unlock(&sc->mfi_io_lock);
539 return 1;
540 }
541
542 /*
543 * mfi_intr_tbolt - isr entry point
544 */
545 void
546 mfi_intr_tbolt(void *arg)
547 {
548 struct mfi_softc *sc = (struct mfi_softc *)arg;
549
550 if (sc->mfi_check_clear_intr(sc) == 1) {
551 return;
552 }
553 if (sc->mfi_detaching)
554 return;
555 mtx_lock(&sc->mfi_io_lock);
556 mfi_tbolt_complete_cmd(sc);
557 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
558 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
559 mfi_startio(sc);
560 mtx_unlock(&sc->mfi_io_lock);
561 return;
562 }
563
564 /*
565 * map_cmd_status - Maps FW cmd status to OS cmd status
566 * @cmd : Pointer to cmd
567 * @status : status of cmd returned by FW
568 * @ext_status : ext status of cmd returned by FW
569 */
570
571 void
572 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
573 uint8_t ext_status)
574 {
575 switch (status) {
576 case MFI_STAT_OK:
577 mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK;
578 mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK;
579 mfi_cmd->cm_error = MFI_STAT_OK;
580 break;
581
582 case MFI_STAT_SCSI_IO_FAILED:
583 case MFI_STAT_LD_INIT_IN_PROGRESS:
584 mfi_cmd->cm_frame->header.cmd_status = status;
585 mfi_cmd->cm_frame->header.scsi_status = ext_status;
586 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
587 mfi_cmd->cm_frame->dcmd.header.scsi_status
588 = ext_status;
589 break;
590
591 case MFI_STAT_SCSI_DONE_WITH_ERROR:
592 mfi_cmd->cm_frame->header.cmd_status = ext_status;
593 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
594 break;
595
596 case MFI_STAT_LD_OFFLINE:
597 case MFI_STAT_DEVICE_NOT_FOUND:
598 mfi_cmd->cm_frame->header.cmd_status = status;
599 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
600 break;
601
602 default:
603 mfi_cmd->cm_frame->header.cmd_status = status;
604 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
605 break;
606 }
607 }
608
609 /*
610 * mfi_tbolt_return_cmd - Return a cmd to free command pool
611 * @instance: Adapter soft state
612 * @cmd: Command packet to be returned to free command pool
613 */
614 static inline void
615 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *cmd)
616 {
617 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
618
619 cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
620 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, cmd, next);
621 }
622
623 void
624 mfi_tbolt_complete_cmd(struct mfi_softc *sc)
625 {
626 struct mfi_mpi2_reply_header *desc, *reply_desc;
627 struct mfi_command *cmd_mfi, *cmd_mfi_check; /* For MFA Cmds */
628 struct mfi_cmd_tbolt *cmd_tbolt;
629 uint16_t smid;
630 uint8_t reply_descript_type;
631 struct mfi_mpi2_request_raid_scsi_io *scsi_io_req;
632 uint32_t status, extStatus;
633 uint16_t num_completed;
634 union desc_value val;
635
636 desc = (struct mfi_mpi2_reply_header *)
637 ((uintptr_t)sc->reply_frame_pool_align
638 + sc->last_reply_idx * sc->reply_size);
639 reply_desc = desc;
640
641 if (!reply_desc)
642 device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
643
644 reply_descript_type = reply_desc->ReplyFlags
645 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
646 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
647 return;
648
649 num_completed = 0;
650 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
651
652 /* Read Reply descriptor */
653 while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
654 smid = reply_desc->SMID;
655 if (!smid || smid > sc->mfi_max_fw_cmds + 1) {
656 device_printf(sc->mfi_dev, "smid is %x. Cannot "
657 "proceed. Returning \n", smid);
658 return;
659 }
660
661 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
662 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
663 scsi_io_req = cmd_tbolt->io_request;
664
665 status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
666 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
667 map_tbolt_cmd_status(cmd_mfi, status, extStatus);
668
669 if (cmd_mfi->cm_flags & MFI_CMD_SCSI &&
670 (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) {
671 /* polled LD/SYSPD IO command */
672 mfi_tbolt_return_cmd(sc, cmd_tbolt);
673 /* XXX mark okay for now DJA */
674 cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK;
675 } else {
676
677 /* remove command from busy queue if not polled */
678 TAILQ_FOREACH(cmd_mfi_check, &sc->mfi_busy, cm_link) {
679 if (cmd_mfi_check == cmd_mfi) {
680 mfi_remove_busy(cmd_mfi);
681 break;
682 }
683 }
684
685 /* complete the command */
686 mfi_complete(sc, cmd_mfi);
687 mfi_tbolt_return_cmd(sc, cmd_tbolt);
688 }
689
690 sc->last_reply_idx++;
691 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
692 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
693 sc->last_reply_idx = 0;
694 }
695 /*set it back to all 0xfff.*/
696 ((union mfi_mpi2_reply_descriptor*)desc)->words =
697 ~((uint64_t)0x00);
698
699 num_completed++;
700
701 /* Get the next reply descriptor */
702 desc = (struct mfi_mpi2_reply_header *)
703 ((uintptr_t)sc->reply_frame_pool_align
704 + sc->last_reply_idx * sc->reply_size);
705 reply_desc = desc;
706 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
707 reply_descript_type = reply_desc->ReplyFlags
708 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
709 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
710 break;
711 }
712
713 if (!num_completed)
714 return;
715
716 /* update replyIndex to FW */
717 if (sc->last_reply_idx)
718 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
719
720 return;
721 }
722
723 /*
724 * mfi_get_cmd - Get a command from the free pool
725 * @instance: Adapter soft state
726 *
727 * Returns a free command from the pool
728 */
729
730 struct mfi_cmd_tbolt *
731 mfi_tbolt_get_cmd(struct mfi_softc *sc)
732 {
733 struct mfi_cmd_tbolt *cmd = NULL;
734
735 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
736
737 cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh);
738 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
739 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
740 memset((uint8_t *)cmd->io_request, 0,
741 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
742 return cmd;
743 }
744
745 union mfi_mpi2_request_descriptor *
746 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
747 {
748 uint8_t *p;
749
750 if (index >= sc->mfi_max_fw_cmds) {
751 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
752 "for descriptor\n", index);
753 return NULL;
754 }
755 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
756 * index;
757 memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
758 return (union mfi_mpi2_request_descriptor *)p;
759 }
760
761
762 /* Used to build IOCTL cmd */
763 uint8_t
764 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
765 {
766 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
767 struct mfi_mpi2_request_raid_scsi_io *io_req;
768 struct mfi_cmd_tbolt *cmd;
769
770 cmd = mfi_tbolt_get_cmd(sc);
771 if (!cmd)
772 return EBUSY;
773 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
774 cmd->sync_cmd_idx = mfi_cmd->cm_index;
775 io_req = cmd->io_request;
776 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
777
778 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
779 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
780 SGL) / 4;
781 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
782
783 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
784
785 /*
786 In MFI pass thru, nextChainOffset will always be zero to
787 indicate the end of the chain.
788 */
789 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
790 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
791
792 /* setting the length to the maximum length */
793 mpi25_ieee_chain->Length = 1024;
794
795 return 0;
796 }
797
798 void
799 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
800 struct mfi_cmd_tbolt *cmd)
801 {
802 uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
803 struct mfi_mpi2_request_raid_scsi_io *io_request;
804 struct IO_REQUEST_INFO io_info;
805
806 device_id = mfi_cmd->cm_frame->io.header.target_id;
807 io_request = cmd->io_request;
808 io_request->RaidContext.TargetID = device_id;
809 io_request->RaidContext.Status = 0;
810 io_request->RaidContext.exStatus =0;
811
812 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
813 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
814
815 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
816 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
817 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
818 io_info.ldTgtId = device_id;
819 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
820 MFI_FRAME_DIR_READ)
821 io_info.isRead = 1;
822
823 io_request->RaidContext.timeoutValue
824 = MFI_FUSION_FP_DEFAULT_TIMEOUT;
825 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
826 io_request->DevHandle = device_id;
827 cmd->request_desc->header.RequestFlags
828 = (MFI_REQ_DESCRIPT_FLAGS_LD_IO
829 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
830 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
831 io_request->RaidContext.RegLockLength = 0x100;
832 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
833 * MFI_SECTOR_LEN;
834 }
835
836 int
837 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
838 struct mfi_cmd_tbolt *cmd)
839 {
840 struct mfi_mpi2_request_raid_scsi_io *io_request;
841 uint32_t sge_count;
842 uint8_t cdb_len;
843 int readop;
844 u_int64_t lba;
845
846 io_request = cmd->io_request;
847 if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
848 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE))
849 return 1;
850
851 mfi_tbolt_build_ldio(sc, mfi_cmd, cmd);
852
853 /* Convert to SCSI command CDB */
854 bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32));
855 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
856 readop = 0;
857 else
858 readop = 1;
859
860 lba = mfi_cmd->cm_frame->io.lba_hi;
861 lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo;
862 cdb_len = mfi_build_cdb(readop, 0, lba,
863 mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32);
864
865 /* Just the CDB length, rest of the Flags are zero */
866 io_request->IoFlags = cdb_len;
867
868 /*
869 * Construct SGL
870 */
871 sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
872 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
873 if (sge_count > sc->mfi_max_sge) {
874 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
875 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
876 return 1;
877 }
878 io_request->RaidContext.numSGE = sge_count;
879 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
880
881 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
882 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
883 else
884 io_request->Control = MPI2_SCSIIO_CONTROL_READ;
885
886 io_request->SGLOffset0 = offsetof(
887 struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
888
889 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
890 io_request->SenseBufferLength = MFI_SENSE_LEN;
891 io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS;
892 io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS;
893
894 return 0;
895 }
896
897
898 static int
899 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
900 pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
901 {
902 uint8_t i, sg_processed, sg_to_process;
903 uint8_t sge_count, sge_idx;
904 union mfi_sgl *os_sgl;
905
906 /*
907 * Return 0 if there is no data transfer
908 */
909 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
910 device_printf(sc->mfi_dev, "Buffer empty \n");
911 return 0;
912 }
913 os_sgl = mfi_cmd->cm_sg;
914 sge_count = mfi_cmd->cm_frame->header.sg_count;
915
916 if (sge_count > sc->mfi_max_sge) {
917 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
918 os_sgl, sge_count);
919 return sge_count;
920 }
921
922 if (sge_count > sc->max_SGEs_in_main_message)
923 /* One element to store the chain info */
924 sge_idx = sc->max_SGEs_in_main_message - 1;
925 else
926 sge_idx = sge_count;
927
928 for (i = 0; i < sge_idx; i++) {
929 /*
930 * For 32bit BSD we are getting 32 bit SGL's from OS
931 * but FW only take 64 bit SGL's so copying from 32 bit
932 * SGL's to 64.
933 */
934 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
935 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
936 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
937 } else {
938 sgl_ptr->Length = os_sgl->sg32[i].len;
939 sgl_ptr->Address = os_sgl->sg32[i].addr;
940 }
941 sgl_ptr->Flags = 0;
942 sgl_ptr++;
943 cmd->io_request->ChainOffset = 0;
944 }
945
946 sg_processed = i;
947
948 if (sg_processed < sge_count) {
949 pMpi25IeeeSgeChain64_t sg_chain;
950 sg_to_process = sge_count - sg_processed;
951 cmd->io_request->ChainOffset =
952 sc->chain_offset_value_for_main_message;
953 sg_chain = sgl_ptr;
954 /* Prepare chain element */
955 sg_chain->NextChainOffset = 0;
956 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
957 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
958 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) *
959 (sge_count - sg_processed));
960 sg_chain->Address = cmd->sg_frame_phys_addr;
961 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
962 for (; i < sge_count; i++) {
963 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
964 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
965 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
966 } else {
967 sgl_ptr->Length = os_sgl->sg32[i].len;
968 sgl_ptr->Address = os_sgl->sg32[i].addr;
969 }
970 sgl_ptr->Flags = 0;
971 sgl_ptr++;
972 }
973 }
974 return sge_count;
975 }
976
977 union mfi_mpi2_request_descriptor *
978 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
979 {
980 struct mfi_cmd_tbolt *cmd;
981 union mfi_mpi2_request_descriptor *req_desc = NULL;
982 uint16_t index;
983 cmd = mfi_tbolt_get_cmd(sc);
984 if (!cmd)
985 return NULL;
986 mfi_cmd->cm_extra_frames = cmd->index;
987 cmd->sync_cmd_idx = mfi_cmd->cm_index;
988
989 index = cmd->index;
990 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
991 if (mfi_tbolt_build_io(sc, mfi_cmd, cmd))
992 return NULL;
993 req_desc->header.SMID = index;
994 return req_desc;
995 }
996
997 union mfi_mpi2_request_descriptor *
998 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
999 {
1000 union mfi_mpi2_request_descriptor *req_desc = NULL;
1001 uint16_t index;
1002 if (mfi_build_mpt_pass_thru(sc, cmd)) {
1003 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1004 "cmd\n");
1005 return NULL;
1006 }
1007 /* For fusion the frame_count variable is used for SMID */
1008 index = cmd->cm_extra_frames;
1009
1010 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1011 if (!req_desc)
1012 return NULL;
1013
1014 bzero(req_desc, sizeof(*req_desc));
1015 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1016 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1017 req_desc->header.SMID = index;
1018 return req_desc;
1019 }
1020
1021 int
1022 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1023 {
1024 struct mfi_frame_header *hdr;
1025 uint8_t *cdb;
1026 union mfi_mpi2_request_descriptor *req_desc = NULL;
1027 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1028
1029 hdr = &cm->cm_frame->header;
1030 cdb = cm->cm_frame->pass.cdb;
1031 if (sc->adpreset)
1032 return 1;
1033 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1034 cm->cm_timestamp = time_uptime;
1035 mfi_enqueue_busy(cm);
1036 } else { /* still get interrupts for it */
1037 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1038 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1039 }
1040
1041 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1042 /* check for inquiry commands coming from CLI */
1043 if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
1044 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1045 NULL) {
1046 device_printf(sc->mfi_dev, "Mapping from MFI "
1047 "to MPT Failed \n");
1048 return 1;
1049 }
1050 }
1051 else
1052 device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
1053 } else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1054 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1055 cm->cm_flags |= MFI_CMD_SCSI;
1056 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1057 device_printf(sc->mfi_dev, "LDIO Failed \n");
1058 return 1;
1059 }
1060 } else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1061 device_printf(sc->mfi_dev, "Mapping from MFI to MPT "
1062 "Failed\n");
1063 return 1;
1064 }
1065
1066 if (cm->cm_flags & MFI_CMD_SCSI) {
1067 /*
1068 * LD IO needs to be posted since it doesn't get
1069 * acknowledged via a status update so have the
1070 * controller reply via mfi_tbolt_complete_cmd.
1071 */
1072 hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1073 }
1074
1075 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1076 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1077
1078 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1079 return 0;
1080
1081 if (cm->cm_flags & MFI_CMD_SCSI) {
1082 /* check reply queue */
1083 mfi_tbolt_complete_cmd(sc);
1084 }
1085
1086 /* This is a polled command, so busy-wait for it to complete. */
1087 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1088 DELAY(1000);
1089 tm -= 1;
1090 if (tm <= 0)
1091 break;
1092 if (cm->cm_flags & MFI_CMD_SCSI) {
1093 /* check reply queue */
1094 mfi_tbolt_complete_cmd(sc);
1095 }
1096 }
1097
1098 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1099 device_printf(sc->mfi_dev, "Frame %p timed out "
1100 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1101 return (ETIMEDOUT);
1102 }
1103 return 0;
1104 }
1105
1106 static void
1107 mfi_issue_pending_cmds_again (struct mfi_softc *sc)
1108 {
1109 struct mfi_command *cm, *tmp;
1110
1111 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1112 TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1113
1114 cm->retry_for_fw_reset++;
1115
1116 /*
1117 * If a command has continuously been tried multiple times
1118 * and causing a FW reset condition, no further recoveries
1119 * should be performed on the controller
1120 */
1121 if (cm->retry_for_fw_reset == 3) {
1122 device_printf(sc->mfi_dev, "megaraid_sas: command %d "
1123 "was tried multiple times during adapter reset"
1124 "Shutting down the HBA\n", cm->cm_index);
1125 mfi_kill_hba(sc);
1126 sc->hw_crit_error = 1;
1127 return;
1128 }
1129
1130 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) {
1131 struct mfi_cmd_tbolt *cmd;
1132 mfi_remove_busy(cm);
1133 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames -
1134 1 ];
1135 mfi_tbolt_return_cmd(sc, cmd);
1136 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) == 0) {
1137 if (cm->cm_frame->dcmd.opcode !=
1138 MFI_DCMD_CTRL_EVENT_WAIT) {
1139 device_printf(sc->mfi_dev,
1140 "APJ ****requeue command %d \n",
1141 cm->cm_index);
1142 mfi_requeue_ready(cm);
1143 }
1144 }
1145 else
1146 mfi_release_command(cm);
1147 }
1148 }
1149 mfi_startio(sc);
1150 }
1151
1152 static void
1153 mfi_kill_hba (struct mfi_softc *sc)
1154 {
1155 if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1156 MFI_WRITE4 (sc, 0x00,MFI_STOP_ADP);
1157 else
1158 MFI_WRITE4 (sc, MFI_IDB,MFI_STOP_ADP);
1159 }
1160
1161 static void
1162 mfi_process_fw_state_chg_isr(void *arg)
1163 {
1164 struct mfi_softc *sc= (struct mfi_softc *)arg;
1165 struct mfi_cmd_tbolt *cmd;
1166 int error, status;
1167
1168 if (sc->adpreset == 1) {
1169 device_printf(sc->mfi_dev, "First stage of FW reset "
1170 "initiated...\n");
1171
1172 sc->mfi_adp_reset(sc);
1173 sc->mfi_enable_intr(sc);
1174
1175 device_printf(sc->mfi_dev, "First stage of reset complete, "
1176 "second stage initiated...\n");
1177
1178 sc->adpreset = 2;
1179
1180 /* waiting for about 20 second before start the second init */
1181 for (int wait = 0; wait < 20000; wait++)
1182 DELAY(1000);
1183 device_printf(sc->mfi_dev, "Second stage of FW reset "
1184 "initiated...\n");
1185 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1186
1187 sc->mfi_disable_intr(sc);
1188
1189 /* We expect the FW state to be READY */
1190 if (mfi_transition_firmware(sc)) {
1191 device_printf(sc->mfi_dev, "controller is not in "
1192 "ready state\n");
1193 mfi_kill_hba(sc);
1194 sc->hw_crit_error= 1;
1195 return ;
1196 }
1197 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0)
1198 return;
1199
1200 mtx_lock(&sc->mfi_io_lock);
1201
1202 sc->mfi_enable_intr(sc);
1203 sc->adpreset = 0;
1204 free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1205 mfi_remove_busy(sc->mfi_aen_cm);
1206 cmd = sc->mfi_cmd_pool_tbolt[sc->mfi_aen_cm->cm_extra_frames
1207 - 1];
1208 mfi_tbolt_return_cmd(sc, cmd);
1209 if (sc->mfi_aen_cm) {
1210 mfi_release_command(sc->mfi_aen_cm);
1211 sc->mfi_aen_cm = NULL;
1212 }
1213 if (sc->mfi_map_sync_cm) {
1214 mfi_release_command(sc->mfi_map_sync_cm);
1215 sc->mfi_map_sync_cm = NULL;
1216 }
1217 mfi_issue_pending_cmds_again(sc);
1218
1219 /*
1220 * Issue pending command can result in adapter being marked
1221 * dead because of too many re-tries. Check for that
1222 * condition before clearing the reset condition on the FW
1223 */
1224 if (!sc->hw_crit_error) {
1225 /*
1226 * Initiate AEN (Asynchronous Event Notification)
1227 */
1228 mfi_aen_setup(sc, sc->last_seq_num);
1229 sc->issuepend_done = 1;
1230 device_printf(sc->mfi_dev, "second stage of reset "
1231 "complete, FW is ready now.\n");
1232 } else {
1233 device_printf(sc->mfi_dev, "second stage of reset "
1234 "never completed, hba was marked offline.\n");
1235 }
1236 } else {
1237 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1238 "called with unhandled value:%d\n", sc->adpreset);
1239 }
1240 mtx_unlock(&sc->mfi_io_lock);
1241 }
1242
1243 /*
1244 * The ThunderBolt HW has an option for the driver to directly
1245 * access the underlying disks and operate on the RAID. To
1246 * do this there needs to be a capability to keep the RAID controller
1247 * and driver in sync. The FreeBSD driver does not take advantage
1248 * of this feature since it adds a lot of complexity and slows down
1249 * performance. Performance is gained by using the controller's
1250 * cache etc.
1251 *
1252 * Even though this driver doesn't access the disks directly, an
1253 * AEN like command is used to inform the RAID firmware to "sync"
1254 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This
1255 * command in write mode will return when the RAID firmware has
1256 * detected a change to the RAID state. Examples of this type
1257 * of change are removing a disk. Once the command returns then
1258 * the driver needs to acknowledge this and "sync" all LD's again.
1259 * This repeats until we shutdown. Then we need to cancel this
1260 * pending command.
1261 *
1262 * If this is not done right the RAID firmware will not remove a
1263 * pulled drive and the RAID won't go degraded etc. Effectively,
1264 * stopping any RAID mangement to functions.
1265 *
1266 * Doing another LD sync, requires the use of an event since the
1267 * driver needs to do a mfi_wait_command and can't do that in an
1268 * interrupt thread.
1269 *
1270 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
1271 * That requires a bunch of structure and it is simplier to just do
1272 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
1273 */
1274
1275 void
1276 mfi_tbolt_sync_map_info(struct mfi_softc *sc)
1277 {
1278 int error = 0, i;
1279 struct mfi_command *cmd;
1280 struct mfi_dcmd_frame *dcmd;
1281 uint32_t context = 0;
1282 union mfi_ld_ref *ld_sync;
1283 size_t ld_size;
1284 struct mfi_frame_header *hdr;
1285 struct mfi_command *cm = NULL;
1286 struct mfi_ld_list *list = NULL;
1287
1288 if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort)
1289 return;
1290
1291 mtx_lock(&sc->mfi_io_lock);
1292 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1293 (void **)&list, sizeof(*list));
1294 if (error)
1295 goto out;
1296
1297 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN;
1298 if (mfi_wait_command(sc, cm) != 0) {
1299 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1300 goto out;
1301 }
1302
1303 hdr = &cm->cm_frame->header;
1304 if (hdr->cmd_status != MFI_STAT_OK) {
1305 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1306 hdr->cmd_status);
1307 goto out;
1308 }
1309
1310 ld_size = sizeof(*ld_sync) * list->ld_count;
1311 mtx_unlock(&sc->mfi_io_lock);
1312 ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF,
1313 M_WAITOK | M_ZERO);
1314 if (ld_sync == NULL) {
1315 device_printf(sc->mfi_dev, "Failed to allocate sync\n");
1316 goto out;
1317 }
1318 for (i = 0; i < list->ld_count; i++) {
1319 ld_sync[i].ref = list->ld_list[i].ld.ref;
1320 }
1321
1322 mtx_lock(&sc->mfi_io_lock);
1323 if ((cmd = mfi_dequeue_free(sc)) == NULL) {
1324 device_printf(sc->mfi_dev, "Failed to get command\n");
1325 free(ld_sync, M_MFIBUF);
1326 goto out;
1327 }
1328
1329 context = cmd->cm_frame->header.context;
1330 bzero(cmd->cm_frame, sizeof(union mfi_frame));
1331 cmd->cm_frame->header.context = context;
1332
1333 dcmd = &cmd->cm_frame->dcmd;
1334 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1335 dcmd->header.cmd = MFI_CMD_DCMD;
1336 dcmd->header.flags = MFI_FRAME_DIR_WRITE;
1337 dcmd->header.timeout = 0;
1338 dcmd->header.data_len = ld_size;
1339 dcmd->header.scsi_status = 0;
1340 dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO;
1341 cmd->cm_sg = &dcmd->sgl;
1342 cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1343 cmd->cm_data = ld_sync;
1344 cmd->cm_private = ld_sync;
1345
1346 cmd->cm_len = ld_size;
1347 cmd->cm_complete = mfi_sync_map_complete;
1348 sc->mfi_map_sync_cm = cmd;
1349
1350 cmd->cm_flags = MFI_CMD_DATAOUT;
1351 cmd->cm_frame->dcmd.mbox[0] = list->ld_count;
1352 cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
1353
1354 if ((error = mfi_mapcmd(sc, cmd)) != 0) {
1355 device_printf(sc->mfi_dev, "failed to send map sync\n");
1356 free(ld_sync, M_MFIBUF);
1357 sc->mfi_map_sync_cm = NULL;
1358 mfi_requeue_ready(cmd);
1359 goto out;
1360 }
1361
1362 out:
1363 if (list)
1364 free(list, M_MFIBUF);
1365 if (cm)
1366 mfi_release_command(cm);
1367 mtx_unlock(&sc->mfi_io_lock);
1368 }
1369
1370 static void
1371 mfi_sync_map_complete(struct mfi_command *cm)
1372 {
1373 struct mfi_frame_header *hdr;
1374 struct mfi_softc *sc;
1375 int aborted = 0;
1376
1377 sc = cm->cm_sc;
1378 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1379
1380 hdr = &cm->cm_frame->header;
1381
1382 if (sc->mfi_map_sync_cm == NULL)
1383 return;
1384
1385 if (sc->cm_map_abort ||
1386 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1387 sc->cm_map_abort = 0;
1388 aborted = 1;
1389 }
1390
1391 free(cm->cm_data, M_MFIBUF);
1392 sc->mfi_map_sync_cm = NULL;
1393 wakeup(&sc->mfi_map_sync_cm);
1394 mfi_release_command(cm);
1395
1396 /* set it up again so the driver can catch more events */
1397 if (!aborted) {
1398 mfi_queue_map_sync(sc);
1399 }
1400 }
1401
1402 static void
1403 mfi_queue_map_sync(struct mfi_softc *sc)
1404 {
1405 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1406 taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task);
1407 }
1408
1409 void
1410 mfi_handle_map_sync(void *context, int pending)
1411 {
1412 struct mfi_softc *sc;
1413
1414 sc = context;
1415 mfi_tbolt_sync_map_info(sc);
1416 }
Cache object: b3b7528d8944a2c2f1b358879907d057
|