FreeBSD/Linux Kernel Cross Reference
sys/dev/mfi/mfi.c
1 /*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26 /*-
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD: releng/8.4/sys/dev/mfi/mfi.c 247828 2013-03-05 06:43:54Z delphij $");
55
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bus_dma.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
112 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
113 static void mfi_timeout(void *);
114 static int mfi_user_command(struct mfi_softc *,
115 struct mfi_ioc_passthru *);
116 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
117 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
118 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
120 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
122 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
123 uint32_t frame_cnt);
124 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
125 uint32_t frame_cnt);
126 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
127 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
128 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
130 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
131
132 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
133 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
134 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
136 0, "event message locale");
137
138 static int mfi_event_class = MFI_EVT_CLASS_INFO;
139 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
141 0, "event message class");
142
143 static int mfi_max_cmds = 128;
144 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
145 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
146 0, "Max commands");
147
148 static int mfi_detect_jbod_change = 1;
149 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
150 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
151 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
152
153 /* Management interface */
154 static d_open_t mfi_open;
155 static d_close_t mfi_close;
156 static d_ioctl_t mfi_ioctl;
157 static d_poll_t mfi_poll;
158
159 static struct cdevsw mfi_cdevsw = {
160 .d_version = D_VERSION,
161 .d_flags = 0,
162 .d_open = mfi_open,
163 .d_close = mfi_close,
164 .d_ioctl = mfi_ioctl,
165 .d_poll = mfi_poll,
166 .d_name = "mfi",
167 };
168
169 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
170
171 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
172 struct mfi_skinny_dma_info mfi_skinny;
173
174 static void
175 mfi_enable_intr_xscale(struct mfi_softc *sc)
176 {
177 MFI_WRITE4(sc, MFI_OMSK, 0x01);
178 }
179
180 static void
181 mfi_enable_intr_ppc(struct mfi_softc *sc)
182 {
183 if (sc->mfi_flags & MFI_FLAGS_1078) {
184 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
185 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
186 }
187 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
188 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
189 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
190 }
191 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
192 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
193 }
194 }
195
196 static int32_t
197 mfi_read_fw_status_xscale(struct mfi_softc *sc)
198 {
199 return MFI_READ4(sc, MFI_OMSG0);
200 }
201
202 static int32_t
203 mfi_read_fw_status_ppc(struct mfi_softc *sc)
204 {
205 return MFI_READ4(sc, MFI_OSP0);
206 }
207
208 static int
209 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
210 {
211 int32_t status;
212
213 status = MFI_READ4(sc, MFI_OSTS);
214 if ((status & MFI_OSTS_INTR_VALID) == 0)
215 return 1;
216
217 MFI_WRITE4(sc, MFI_OSTS, status);
218 return 0;
219 }
220
221 static int
222 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
223 {
224 int32_t status;
225
226 status = MFI_READ4(sc, MFI_OSTS);
227 if (sc->mfi_flags & MFI_FLAGS_1078) {
228 if (!(status & MFI_1078_RM)) {
229 return 1;
230 }
231 }
232 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
233 if (!(status & MFI_GEN2_RM)) {
234 return 1;
235 }
236 }
237 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
238 if (!(status & MFI_SKINNY_RM)) {
239 return 1;
240 }
241 }
242 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
243 MFI_WRITE4(sc, MFI_OSTS, status);
244 else
245 MFI_WRITE4(sc, MFI_ODCR0, status);
246 return 0;
247 }
248
249 static void
250 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 {
252 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
253 }
254
255 static void
256 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 {
258 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
259 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
260 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 } else {
262 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
263 }
264 }
265
266 int
267 mfi_transition_firmware(struct mfi_softc *sc)
268 {
269 uint32_t fw_state, cur_state;
270 int max_wait, i;
271 uint32_t cur_abs_reg_val = 0;
272 uint32_t prev_abs_reg_val = 0;
273
274 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
275 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
276 while (fw_state != MFI_FWSTATE_READY) {
277 if (bootverbose)
278 device_printf(sc->mfi_dev, "Waiting for firmware to "
279 "become ready\n");
280 cur_state = fw_state;
281 switch (fw_state) {
282 case MFI_FWSTATE_FAULT:
283 device_printf(sc->mfi_dev, "Firmware fault\n");
284 return (ENXIO);
285 case MFI_FWSTATE_WAIT_HANDSHAKE:
286 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
287 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 else
289 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
290 max_wait = MFI_RESET_WAIT_TIME;
291 break;
292 case MFI_FWSTATE_OPERATIONAL:
293 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 else
296 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
297 max_wait = MFI_RESET_WAIT_TIME;
298 break;
299 case MFI_FWSTATE_UNDEFINED:
300 case MFI_FWSTATE_BB_INIT:
301 max_wait = MFI_RESET_WAIT_TIME;
302 break;
303 case MFI_FWSTATE_FW_INIT_2:
304 max_wait = MFI_RESET_WAIT_TIME;
305 break;
306 case MFI_FWSTATE_FW_INIT:
307 case MFI_FWSTATE_FLUSH_CACHE:
308 max_wait = MFI_RESET_WAIT_TIME;
309 break;
310 case MFI_FWSTATE_DEVICE_SCAN:
311 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
312 prev_abs_reg_val = cur_abs_reg_val;
313 break;
314 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
315 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
316 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 else
318 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
319 max_wait = MFI_RESET_WAIT_TIME;
320 break;
321 default:
322 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
323 fw_state);
324 return (ENXIO);
325 }
326 for (i = 0; i < (max_wait * 10); i++) {
327 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
328 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
329 if (fw_state == cur_state)
330 DELAY(100000);
331 else
332 break;
333 }
334 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
335 /* Check the device scanning progress */
336 if (prev_abs_reg_val != cur_abs_reg_val) {
337 continue;
338 }
339 }
340 if (fw_state == cur_state) {
341 device_printf(sc->mfi_dev, "Firmware stuck in state "
342 "%#x\n", fw_state);
343 return (ENXIO);
344 }
345 }
346 return (0);
347 }
348
349 static void
350 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
351 {
352 bus_addr_t *addr;
353
354 addr = arg;
355 *addr = segs[0].ds_addr;
356 }
357
358
359 int
360 mfi_attach(struct mfi_softc *sc)
361 {
362 uint32_t status;
363 int error, commsz, framessz, sensesz;
364 int frames, unit, max_fw_sge;
365 uint32_t tb_mem_size = 0;
366
367 if (sc == NULL)
368 return EINVAL;
369
370 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
371 MEGASAS_VERSION);
372
373 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
374 sx_init(&sc->mfi_config_lock, "MFI config");
375 TAILQ_INIT(&sc->mfi_ld_tqh);
376 TAILQ_INIT(&sc->mfi_syspd_tqh);
377 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
378 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
379 TAILQ_INIT(&sc->mfi_evt_queue);
380 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
381 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
382 TAILQ_INIT(&sc->mfi_aen_pids);
383 TAILQ_INIT(&sc->mfi_cam_ccbq);
384
385 mfi_initq_free(sc);
386 mfi_initq_ready(sc);
387 mfi_initq_busy(sc);
388 mfi_initq_bio(sc);
389
390 sc->adpreset = 0;
391 sc->last_seq_num = 0;
392 sc->disableOnlineCtrlReset = 1;
393 sc->issuepend_done = 1;
394 sc->hw_crit_error = 0;
395
396 if (sc->mfi_flags & MFI_FLAGS_1064R) {
397 sc->mfi_enable_intr = mfi_enable_intr_xscale;
398 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
399 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
400 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
401 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
402 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
403 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
404 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
405 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
406 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
407 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
408 sc->mfi_tbolt = 1;
409 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
410 } else {
411 sc->mfi_enable_intr = mfi_enable_intr_ppc;
412 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
413 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
414 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
415 }
416
417
418 /* Before we get too far, see if the firmware is working */
419 if ((error = mfi_transition_firmware(sc)) != 0) {
420 device_printf(sc->mfi_dev, "Firmware not in READY state, "
421 "error %d\n", error);
422 return (ENXIO);
423 }
424
425 /* Start: LSIP200113393 */
426 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
427 1, 0, /* algnmnt, boundary */
428 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
429 BUS_SPACE_MAXADDR, /* highaddr */
430 NULL, NULL, /* filter, filterarg */
431 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
432 1, /* msegments */
433 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
434 0, /* flags */
435 NULL, NULL, /* lockfunc, lockarg */
436 &sc->verbuf_h_dmat)) {
437 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
438 return (ENOMEM);
439 }
440 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
441 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
442 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
443 return (ENOMEM);
444 }
445 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
446 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
447 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
448 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
449 /* End: LSIP200113393 */
450
451 /*
452 * Get information needed for sizing the contiguous memory for the
453 * frame pool. Size down the sgl parameter since we know that
454 * we will never need more than what's required for MAXPHYS.
455 * It would be nice if these constants were available at runtime
456 * instead of compile time.
457 */
458 status = sc->mfi_read_fw_status(sc);
459 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
460 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
461 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
462
463 /* ThunderBolt Support get the contiguous memory */
464
465 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
466 mfi_tbolt_init_globals(sc);
467 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
468 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
469 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
470
471 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
472 1, 0, /* algnmnt, boundary */
473 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
474 BUS_SPACE_MAXADDR, /* highaddr */
475 NULL, NULL, /* filter, filterarg */
476 tb_mem_size, /* maxsize */
477 1, /* msegments */
478 tb_mem_size, /* maxsegsize */
479 0, /* flags */
480 NULL, NULL, /* lockfunc, lockarg */
481 &sc->mfi_tb_dmat)) {
482 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
483 return (ENOMEM);
484 }
485 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
486 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
487 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
488 return (ENOMEM);
489 }
490 bzero(sc->request_message_pool, tb_mem_size);
491 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
492 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
493
494 /* For ThunderBolt memory init */
495 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
496 0x100, 0, /* alignmnt, boundary */
497 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
498 BUS_SPACE_MAXADDR, /* highaddr */
499 NULL, NULL, /* filter, filterarg */
500 MFI_FRAME_SIZE, /* maxsize */
501 1, /* msegments */
502 MFI_FRAME_SIZE, /* maxsegsize */
503 0, /* flags */
504 NULL, NULL, /* lockfunc, lockarg */
505 &sc->mfi_tb_init_dmat)) {
506 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
507 return (ENOMEM);
508 }
509 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
510 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
511 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
512 return (ENOMEM);
513 }
514 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
515 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
516 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
517 &sc->mfi_tb_init_busaddr, 0);
518 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
519 tb_mem_size)) {
520 device_printf(sc->mfi_dev,
521 "Thunderbolt pool preparation error\n");
522 return 0;
523 }
524
525 /*
526 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
527 we are taking it diffrent from what we have allocated for Request
528 and reply descriptors to avoid confusion later
529 */
530 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
531 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
532 1, 0, /* algnmnt, boundary */
533 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
534 BUS_SPACE_MAXADDR, /* highaddr */
535 NULL, NULL, /* filter, filterarg */
536 tb_mem_size, /* maxsize */
537 1, /* msegments */
538 tb_mem_size, /* maxsegsize */
539 0, /* flags */
540 NULL, NULL, /* lockfunc, lockarg */
541 &sc->mfi_tb_ioc_init_dmat)) {
542 device_printf(sc->mfi_dev,
543 "Cannot allocate comms DMA tag\n");
544 return (ENOMEM);
545 }
546 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
547 (void **)&sc->mfi_tb_ioc_init_desc,
548 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
549 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
550 return (ENOMEM);
551 }
552 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
553 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
554 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
555 &sc->mfi_tb_ioc_init_busaddr, 0);
556 }
557 /*
558 * Create the dma tag for data buffers. Used both for block I/O
559 * and for various internal data queries.
560 */
561 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
562 1, 0, /* algnmnt, boundary */
563 BUS_SPACE_MAXADDR, /* lowaddr */
564 BUS_SPACE_MAXADDR, /* highaddr */
565 NULL, NULL, /* filter, filterarg */
566 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
567 sc->mfi_max_sge, /* nsegments */
568 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
569 BUS_DMA_ALLOCNOW, /* flags */
570 busdma_lock_mutex, /* lockfunc */
571 &sc->mfi_io_lock, /* lockfuncarg */
572 &sc->mfi_buffer_dmat)) {
573 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
574 return (ENOMEM);
575 }
576
577 /*
578 * Allocate DMA memory for the comms queues. Keep it under 4GB for
579 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
580 * entry, so the calculated size here will be will be 1 more than
581 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
582 */
583 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
584 sizeof(struct mfi_hwcomms);
585 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
586 1, 0, /* algnmnt, boundary */
587 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
588 BUS_SPACE_MAXADDR, /* highaddr */
589 NULL, NULL, /* filter, filterarg */
590 commsz, /* maxsize */
591 1, /* msegments */
592 commsz, /* maxsegsize */
593 0, /* flags */
594 NULL, NULL, /* lockfunc, lockarg */
595 &sc->mfi_comms_dmat)) {
596 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
597 return (ENOMEM);
598 }
599 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
600 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
601 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
602 return (ENOMEM);
603 }
604 bzero(sc->mfi_comms, commsz);
605 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
606 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
607 /*
608 * Allocate DMA memory for the command frames. Keep them in the
609 * lower 4GB for efficiency. Calculate the size of the commands at
610 * the same time; each command is one 64 byte frame plus a set of
611 * additional frames for holding sg lists or other data.
612 * The assumption here is that the SG list will start at the second
613 * frame and not use the unused bytes in the first frame. While this
614 * isn't technically correct, it simplifies the calculation and allows
615 * for command frames that might be larger than an mfi_io_frame.
616 */
617 if (sizeof(bus_addr_t) == 8) {
618 sc->mfi_sge_size = sizeof(struct mfi_sg64);
619 sc->mfi_flags |= MFI_FLAGS_SG64;
620 } else {
621 sc->mfi_sge_size = sizeof(struct mfi_sg32);
622 }
623 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
624 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
625 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
626 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
627 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
628 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
629 64, 0, /* algnmnt, boundary */
630 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
631 BUS_SPACE_MAXADDR, /* highaddr */
632 NULL, NULL, /* filter, filterarg */
633 framessz, /* maxsize */
634 1, /* nsegments */
635 framessz, /* maxsegsize */
636 0, /* flags */
637 NULL, NULL, /* lockfunc, lockarg */
638 &sc->mfi_frames_dmat)) {
639 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
640 return (ENOMEM);
641 }
642 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
643 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
644 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
645 return (ENOMEM);
646 }
647 bzero(sc->mfi_frames, framessz);
648 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
649 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
650 /*
651 * Allocate DMA memory for the frame sense data. Keep them in the
652 * lower 4GB for efficiency
653 */
654 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
655 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
656 4, 0, /* algnmnt, boundary */
657 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
658 BUS_SPACE_MAXADDR, /* highaddr */
659 NULL, NULL, /* filter, filterarg */
660 sensesz, /* maxsize */
661 1, /* nsegments */
662 sensesz, /* maxsegsize */
663 0, /* flags */
664 NULL, NULL, /* lockfunc, lockarg */
665 &sc->mfi_sense_dmat)) {
666 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
667 return (ENOMEM);
668 }
669 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
670 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
671 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
672 return (ENOMEM);
673 }
674 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
675 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
676 if ((error = mfi_alloc_commands(sc)) != 0)
677 return (error);
678
679 /* Before moving the FW to operational state, check whether
680 * hostmemory is required by the FW or not
681 */
682
683 /* ThunderBolt MFI_IOC2 INIT */
684 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
685 sc->mfi_disable_intr(sc);
686 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
687 device_printf(sc->mfi_dev,
688 "TB Init has failed with error %d\n",error);
689 return error;
690 }
691
692 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
693 return error;
694 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
695 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
696 &sc->mfi_intr)) {
697 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
698 return (EINVAL);
699 }
700 sc->mfi_intr_ptr = mfi_intr_tbolt;
701 sc->mfi_enable_intr(sc);
702 } else {
703 if ((error = mfi_comms_init(sc)) != 0)
704 return (error);
705
706 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
707 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
708 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
709 return (EINVAL);
710 }
711 sc->mfi_intr_ptr = mfi_intr;
712 sc->mfi_enable_intr(sc);
713 }
714 if ((error = mfi_get_controller_info(sc)) != 0)
715 return (error);
716 sc->disableOnlineCtrlReset = 0;
717
718 /* Register a config hook to probe the bus for arrays */
719 sc->mfi_ich.ich_func = mfi_startup;
720 sc->mfi_ich.ich_arg = sc;
721 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
722 device_printf(sc->mfi_dev, "Cannot establish configuration "
723 "hook\n");
724 return (EINVAL);
725 }
726 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
727 mtx_unlock(&sc->mfi_io_lock);
728 return (error);
729 }
730
731 /*
732 * Register a shutdown handler.
733 */
734 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
735 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
736 device_printf(sc->mfi_dev, "Warning: shutdown event "
737 "registration failed\n");
738 }
739
740 /*
741 * Create the control device for doing management
742 */
743 unit = device_get_unit(sc->mfi_dev);
744 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
745 0640, "mfi%d", unit);
746 if (unit == 0)
747 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
748 if (sc->mfi_cdev != NULL)
749 sc->mfi_cdev->si_drv1 = sc;
750 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
751 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
752 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
753 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
754 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
755 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
756 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
757 &sc->mfi_keep_deleted_volumes, 0,
758 "Don't detach the mfid device for a busy volume that is deleted");
759
760 device_add_child(sc->mfi_dev, "mfip", -1);
761 bus_generic_attach(sc->mfi_dev);
762
763 /* Start the timeout watchdog */
764 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
765 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
766 mfi_timeout, sc);
767
768 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
769 mfi_tbolt_sync_map_info(sc);
770 }
771
772 return (0);
773 }
774
775 static int
776 mfi_alloc_commands(struct mfi_softc *sc)
777 {
778 struct mfi_command *cm;
779 int i, ncmds;
780
781 /*
782 * XXX Should we allocate all the commands up front, or allocate on
783 * demand later like 'aac' does?
784 */
785 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
786 if (bootverbose)
787 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
788 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
789
790 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
791 M_WAITOK | M_ZERO);
792
793 for (i = 0; i < ncmds; i++) {
794 cm = &sc->mfi_commands[i];
795 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
796 sc->mfi_cmd_size * i);
797 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
798 sc->mfi_cmd_size * i;
799 cm->cm_frame->header.context = i;
800 cm->cm_sense = &sc->mfi_sense[i];
801 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
802 cm->cm_sc = sc;
803 cm->cm_index = i;
804 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
805 &cm->cm_dmamap) == 0) {
806 mtx_lock(&sc->mfi_io_lock);
807 mfi_release_command(cm);
808 mtx_unlock(&sc->mfi_io_lock);
809 }
810 else
811 break;
812 sc->mfi_total_cmds++;
813 }
814
815 return (0);
816 }
817
818 void
819 mfi_release_command(struct mfi_command *cm)
820 {
821 struct mfi_frame_header *hdr;
822 uint32_t *hdr_data;
823
824 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
825
826 /*
827 * Zero out the important fields of the frame, but make sure the
828 * context field is preserved. For efficiency, handle the fields
829 * as 32 bit words. Clear out the first S/G entry too for safety.
830 */
831 hdr = &cm->cm_frame->header;
832 if (cm->cm_data != NULL && hdr->sg_count) {
833 cm->cm_sg->sg32[0].len = 0;
834 cm->cm_sg->sg32[0].addr = 0;
835 }
836
837 hdr_data = (uint32_t *)cm->cm_frame;
838 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
839 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
840 hdr_data[4] = 0; /* flags, timeout */
841 hdr_data[5] = 0; /* data_len */
842
843 cm->cm_extra_frames = 0;
844 cm->cm_flags = 0;
845 cm->cm_complete = NULL;
846 cm->cm_private = NULL;
847 cm->cm_data = NULL;
848 cm->cm_sg = 0;
849 cm->cm_total_frame_size = 0;
850 cm->retry_for_fw_reset = 0;
851
852 mfi_enqueue_free(cm);
853 }
854
855 int
856 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
857 uint32_t opcode, void **bufp, size_t bufsize)
858 {
859 struct mfi_command *cm;
860 struct mfi_dcmd_frame *dcmd;
861 void *buf = NULL;
862 uint32_t context = 0;
863
864 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
865
866 cm = mfi_dequeue_free(sc);
867 if (cm == NULL)
868 return (EBUSY);
869
870 /* Zero out the MFI frame */
871 context = cm->cm_frame->header.context;
872 bzero(cm->cm_frame, sizeof(union mfi_frame));
873 cm->cm_frame->header.context = context;
874
875 if ((bufsize > 0) && (bufp != NULL)) {
876 if (*bufp == NULL) {
877 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
878 if (buf == NULL) {
879 mfi_release_command(cm);
880 return (ENOMEM);
881 }
882 *bufp = buf;
883 } else {
884 buf = *bufp;
885 }
886 }
887
888 dcmd = &cm->cm_frame->dcmd;
889 bzero(dcmd->mbox, MFI_MBOX_SIZE);
890 dcmd->header.cmd = MFI_CMD_DCMD;
891 dcmd->header.timeout = 0;
892 dcmd->header.flags = 0;
893 dcmd->header.data_len = bufsize;
894 dcmd->header.scsi_status = 0;
895 dcmd->opcode = opcode;
896 cm->cm_sg = &dcmd->sgl;
897 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
898 cm->cm_flags = 0;
899 cm->cm_data = buf;
900 cm->cm_private = buf;
901 cm->cm_len = bufsize;
902
903 *cmp = cm;
904 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
905 *bufp = buf;
906 return (0);
907 }
908
909 static int
910 mfi_comms_init(struct mfi_softc *sc)
911 {
912 struct mfi_command *cm;
913 struct mfi_init_frame *init;
914 struct mfi_init_qinfo *qinfo;
915 int error;
916 uint32_t context = 0;
917
918 mtx_lock(&sc->mfi_io_lock);
919 if ((cm = mfi_dequeue_free(sc)) == NULL)
920 return (EBUSY);
921
922 /* Zero out the MFI frame */
923 context = cm->cm_frame->header.context;
924 bzero(cm->cm_frame, sizeof(union mfi_frame));
925 cm->cm_frame->header.context = context;
926
927 /*
928 * Abuse the SG list area of the frame to hold the init_qinfo
929 * object;
930 */
931 init = &cm->cm_frame->init;
932 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
933
934 bzero(qinfo, sizeof(struct mfi_init_qinfo));
935 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
936 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
937 offsetof(struct mfi_hwcomms, hw_reply_q);
938 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
939 offsetof(struct mfi_hwcomms, hw_pi);
940 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
941 offsetof(struct mfi_hwcomms, hw_ci);
942
943 init->header.cmd = MFI_CMD_INIT;
944 init->header.data_len = sizeof(struct mfi_init_qinfo);
945 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
946 cm->cm_data = NULL;
947 cm->cm_flags = MFI_CMD_POLLED;
948
949 if ((error = mfi_mapcmd(sc, cm)) != 0) {
950 device_printf(sc->mfi_dev, "failed to send init command\n");
951 mtx_unlock(&sc->mfi_io_lock);
952 return (error);
953 }
954 mfi_release_command(cm);
955 mtx_unlock(&sc->mfi_io_lock);
956
957 return (0);
958 }
959
960 static int
961 mfi_get_controller_info(struct mfi_softc *sc)
962 {
963 struct mfi_command *cm = NULL;
964 struct mfi_ctrl_info *ci = NULL;
965 uint32_t max_sectors_1, max_sectors_2;
966 int error;
967
968 mtx_lock(&sc->mfi_io_lock);
969 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
970 (void **)&ci, sizeof(*ci));
971 if (error)
972 goto out;
973 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
974
975 if ((error = mfi_mapcmd(sc, cm)) != 0) {
976 device_printf(sc->mfi_dev, "Failed to get controller info\n");
977 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
978 MFI_SECTOR_LEN;
979 error = 0;
980 goto out;
981 }
982
983 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
984 BUS_DMASYNC_POSTREAD);
985 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
986
987 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
988 max_sectors_2 = ci->max_request_size;
989 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
990 sc->disableOnlineCtrlReset =
991 ci->properties.OnOffProperties.disableOnlineCtrlReset;
992
993 out:
994 if (ci)
995 free(ci, M_MFIBUF);
996 if (cm)
997 mfi_release_command(cm);
998 mtx_unlock(&sc->mfi_io_lock);
999 return (error);
1000 }
1001
1002 static int
1003 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1004 {
1005 struct mfi_command *cm = NULL;
1006 int error;
1007
1008 mtx_lock(&sc->mfi_io_lock);
1009 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1010 (void **)log_state, sizeof(**log_state));
1011 if (error)
1012 goto out;
1013 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1014
1015 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1016 device_printf(sc->mfi_dev, "Failed to get log state\n");
1017 goto out;
1018 }
1019
1020 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1021 BUS_DMASYNC_POSTREAD);
1022 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1023
1024 out:
1025 if (cm)
1026 mfi_release_command(cm);
1027 mtx_unlock(&sc->mfi_io_lock);
1028
1029 return (error);
1030 }
1031
1032 int
1033 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1034 {
1035 struct mfi_evt_log_state *log_state = NULL;
1036 union mfi_evt class_locale;
1037 int error = 0;
1038 uint32_t seq;
1039
1040 class_locale.members.reserved = 0;
1041 class_locale.members.locale = mfi_event_locale;
1042 class_locale.members.evt_class = mfi_event_class;
1043
1044 if (seq_start == 0) {
1045 error = mfi_get_log_state(sc, &log_state);
1046 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1047 if (error) {
1048 if (log_state)
1049 free(log_state, M_MFIBUF);
1050 return (error);
1051 }
1052
1053 /*
1054 * Walk through any events that fired since the last
1055 * shutdown.
1056 */
1057 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1058 log_state->newest_seq_num);
1059 seq = log_state->newest_seq_num;
1060 } else
1061 seq = seq_start;
1062 mfi_aen_register(sc, seq, class_locale.word);
1063 free(log_state, M_MFIBUF);
1064
1065 return 0;
1066 }
1067
1068 int
1069 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1070 {
1071
1072 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1073 cm->cm_complete = NULL;
1074
1075
1076 /*
1077 * MegaCli can issue a DCMD of 0. In this case do nothing
1078 * and return 0 to it as status
1079 */
1080 if (cm->cm_frame->dcmd.opcode == 0) {
1081 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1082 cm->cm_error = 0;
1083 return (cm->cm_error);
1084 }
1085 mfi_enqueue_ready(cm);
1086 mfi_startio(sc);
1087 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1088 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1089 return (cm->cm_error);
1090 }
1091
1092 void
1093 mfi_free(struct mfi_softc *sc)
1094 {
1095 struct mfi_command *cm;
1096 int i;
1097
1098 callout_drain(&sc->mfi_watchdog_callout);
1099
1100 if (sc->mfi_cdev != NULL)
1101 destroy_dev(sc->mfi_cdev);
1102
1103 if (sc->mfi_total_cmds != 0) {
1104 for (i = 0; i < sc->mfi_total_cmds; i++) {
1105 cm = &sc->mfi_commands[i];
1106 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1107 }
1108 free(sc->mfi_commands, M_MFIBUF);
1109 }
1110
1111 if (sc->mfi_intr)
1112 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1113 if (sc->mfi_irq != NULL)
1114 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1115 sc->mfi_irq);
1116
1117 if (sc->mfi_sense_busaddr != 0)
1118 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1119 if (sc->mfi_sense != NULL)
1120 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1121 sc->mfi_sense_dmamap);
1122 if (sc->mfi_sense_dmat != NULL)
1123 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1124
1125 if (sc->mfi_frames_busaddr != 0)
1126 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1127 if (sc->mfi_frames != NULL)
1128 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1129 sc->mfi_frames_dmamap);
1130 if (sc->mfi_frames_dmat != NULL)
1131 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1132
1133 if (sc->mfi_comms_busaddr != 0)
1134 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1135 if (sc->mfi_comms != NULL)
1136 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1137 sc->mfi_comms_dmamap);
1138 if (sc->mfi_comms_dmat != NULL)
1139 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1140
1141 /* ThunderBolt contiguous memory free here */
1142 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1143 if (sc->mfi_tb_busaddr != 0)
1144 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1145 if (sc->request_message_pool != NULL)
1146 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1147 sc->mfi_tb_dmamap);
1148 if (sc->mfi_tb_dmat != NULL)
1149 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1150
1151 /* Version buffer memory free */
1152 /* Start LSIP200113393 */
1153 if (sc->verbuf_h_busaddr != 0)
1154 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1155 if (sc->verbuf != NULL)
1156 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1157 sc->verbuf_h_dmamap);
1158 if (sc->verbuf_h_dmat != NULL)
1159 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1160
1161 /* End LSIP200113393 */
1162 /* ThunderBolt INIT packet memory Free */
1163 if (sc->mfi_tb_init_busaddr != 0)
1164 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1165 if (sc->mfi_tb_init != NULL)
1166 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1167 sc->mfi_tb_init_dmamap);
1168 if (sc->mfi_tb_init_dmat != NULL)
1169 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1170
1171 /* ThunderBolt IOC Init Desc memory free here */
1172 if (sc->mfi_tb_ioc_init_busaddr != 0)
1173 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1174 sc->mfi_tb_ioc_init_dmamap);
1175 if (sc->mfi_tb_ioc_init_desc != NULL)
1176 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1177 sc->mfi_tb_ioc_init_desc,
1178 sc->mfi_tb_ioc_init_dmamap);
1179 if (sc->mfi_tb_ioc_init_dmat != NULL)
1180 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1181 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1182 if (sc->mfi_cmd_pool_tbolt != NULL) {
1183 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1184 free(sc->mfi_cmd_pool_tbolt[i],
1185 M_MFIBUF);
1186 sc->mfi_cmd_pool_tbolt[i] = NULL;
1187 }
1188 }
1189 }
1190 if (sc->mfi_cmd_pool_tbolt != NULL) {
1191 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1192 sc->mfi_cmd_pool_tbolt = NULL;
1193 }
1194 if (sc->request_desc_pool != NULL) {
1195 free(sc->request_desc_pool, M_MFIBUF);
1196 sc->request_desc_pool = NULL;
1197 }
1198 }
1199 if (sc->mfi_buffer_dmat != NULL)
1200 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1201 if (sc->mfi_parent_dmat != NULL)
1202 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1203
1204 if (mtx_initialized(&sc->mfi_io_lock)) {
1205 mtx_destroy(&sc->mfi_io_lock);
1206 sx_destroy(&sc->mfi_config_lock);
1207 }
1208
1209 return;
1210 }
1211
1212 static void
1213 mfi_startup(void *arg)
1214 {
1215 struct mfi_softc *sc;
1216
1217 sc = (struct mfi_softc *)arg;
1218
1219 config_intrhook_disestablish(&sc->mfi_ich);
1220
1221 sc->mfi_enable_intr(sc);
1222 sx_xlock(&sc->mfi_config_lock);
1223 mtx_lock(&sc->mfi_io_lock);
1224 mfi_ldprobe(sc);
1225 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1226 mfi_syspdprobe(sc);
1227 mtx_unlock(&sc->mfi_io_lock);
1228 sx_xunlock(&sc->mfi_config_lock);
1229 }
1230
1231 static void
1232 mfi_intr(void *arg)
1233 {
1234 struct mfi_softc *sc;
1235 struct mfi_command *cm;
1236 uint32_t pi, ci, context;
1237
1238 sc = (struct mfi_softc *)arg;
1239
1240 if (sc->mfi_check_clear_intr(sc))
1241 return;
1242
1243 restart:
1244 pi = sc->mfi_comms->hw_pi;
1245 ci = sc->mfi_comms->hw_ci;
1246 mtx_lock(&sc->mfi_io_lock);
1247 while (ci != pi) {
1248 context = sc->mfi_comms->hw_reply_q[ci];
1249 if (context < sc->mfi_max_fw_cmds) {
1250 cm = &sc->mfi_commands[context];
1251 mfi_remove_busy(cm);
1252 cm->cm_error = 0;
1253 mfi_complete(sc, cm);
1254 }
1255 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1256 ci = 0;
1257 }
1258 }
1259
1260 sc->mfi_comms->hw_ci = ci;
1261
1262 /* Give defered I/O a chance to run */
1263 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1264 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1265 mfi_startio(sc);
1266 mtx_unlock(&sc->mfi_io_lock);
1267
1268 /*
1269 * Dummy read to flush the bus; this ensures that the indexes are up
1270 * to date. Restart processing if more commands have come it.
1271 */
1272 (void)sc->mfi_read_fw_status(sc);
1273 if (pi != sc->mfi_comms->hw_pi)
1274 goto restart;
1275
1276 return;
1277 }
1278
1279 int
1280 mfi_shutdown(struct mfi_softc *sc)
1281 {
1282 struct mfi_dcmd_frame *dcmd;
1283 struct mfi_command *cm;
1284 int error;
1285
1286
1287 if (sc->mfi_aen_cm)
1288 sc->cm_aen_abort = 1;
1289 if (sc->mfi_aen_cm != NULL)
1290 mfi_abort(sc, &sc->mfi_aen_cm);
1291
1292 if (sc->mfi_map_sync_cm)
1293 sc->cm_map_abort = 1;
1294 if (sc->mfi_map_sync_cm != NULL)
1295 mfi_abort(sc, &sc->mfi_map_sync_cm);
1296
1297 mtx_lock(&sc->mfi_io_lock);
1298 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1299 if (error) {
1300 mtx_unlock(&sc->mfi_io_lock);
1301 return (error);
1302 }
1303
1304 dcmd = &cm->cm_frame->dcmd;
1305 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1306 cm->cm_flags = MFI_CMD_POLLED;
1307 cm->cm_data = NULL;
1308
1309 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1310 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1311 }
1312
1313 mfi_release_command(cm);
1314 mtx_unlock(&sc->mfi_io_lock);
1315 return (error);
1316 }
1317
1318 static void
1319 mfi_syspdprobe(struct mfi_softc *sc)
1320 {
1321 struct mfi_frame_header *hdr;
1322 struct mfi_command *cm = NULL;
1323 struct mfi_pd_list *pdlist = NULL;
1324 struct mfi_system_pd *syspd, *tmp;
1325 struct mfi_system_pending *syspd_pend;
1326 int error, i, found;
1327
1328 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1329 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1330 /* Add SYSTEM PD's */
1331 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1332 (void **)&pdlist, sizeof(*pdlist));
1333 if (error) {
1334 device_printf(sc->mfi_dev,
1335 "Error while forming SYSTEM PD list\n");
1336 goto out;
1337 }
1338
1339 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1340 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1341 cm->cm_frame->dcmd.mbox[1] = 0;
1342 if (mfi_mapcmd(sc, cm) != 0) {
1343 device_printf(sc->mfi_dev,
1344 "Failed to get syspd device listing\n");
1345 goto out;
1346 }
1347 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1348 BUS_DMASYNC_POSTREAD);
1349 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1350 hdr = &cm->cm_frame->header;
1351 if (hdr->cmd_status != MFI_STAT_OK) {
1352 device_printf(sc->mfi_dev,
1353 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1354 goto out;
1355 }
1356 /* Get each PD and add it to the system */
1357 for (i = 0; i < pdlist->count; i++) {
1358 if (pdlist->addr[i].device_id ==
1359 pdlist->addr[i].encl_device_id)
1360 continue;
1361 found = 0;
1362 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1363 if (syspd->pd_id == pdlist->addr[i].device_id)
1364 found = 1;
1365 }
1366 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1367 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1368 found = 1;
1369 }
1370 if (found == 0)
1371 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1372 }
1373 /* Delete SYSPD's whose state has been changed */
1374 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1375 found = 0;
1376 for (i = 0; i < pdlist->count; i++) {
1377 if (syspd->pd_id == pdlist->addr[i].device_id)
1378 found = 1;
1379 }
1380 if (found == 0) {
1381 printf("DELETE\n");
1382 mtx_unlock(&sc->mfi_io_lock);
1383 mtx_lock(&Giant);
1384 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1385 mtx_unlock(&Giant);
1386 mtx_lock(&sc->mfi_io_lock);
1387 }
1388 }
1389 out:
1390 if (pdlist)
1391 free(pdlist, M_MFIBUF);
1392 if (cm)
1393 mfi_release_command(cm);
1394
1395 return;
1396 }
1397
1398 static void
1399 mfi_ldprobe(struct mfi_softc *sc)
1400 {
1401 struct mfi_frame_header *hdr;
1402 struct mfi_command *cm = NULL;
1403 struct mfi_ld_list *list = NULL;
1404 struct mfi_disk *ld;
1405 struct mfi_disk_pending *ld_pend;
1406 int error, i;
1407
1408 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1409 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1410
1411 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1412 (void **)&list, sizeof(*list));
1413 if (error)
1414 goto out;
1415
1416 cm->cm_flags = MFI_CMD_DATAIN;
1417 if (mfi_wait_command(sc, cm) != 0) {
1418 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1419 goto out;
1420 }
1421
1422 hdr = &cm->cm_frame->header;
1423 if (hdr->cmd_status != MFI_STAT_OK) {
1424 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1425 hdr->cmd_status);
1426 goto out;
1427 }
1428
1429 for (i = 0; i < list->ld_count; i++) {
1430 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1431 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1432 goto skip_add;
1433 }
1434 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1435 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1436 goto skip_add;
1437 }
1438 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1439 skip_add:;
1440 }
1441 out:
1442 if (list)
1443 free(list, M_MFIBUF);
1444 if (cm)
1445 mfi_release_command(cm);
1446
1447 return;
1448 }
1449
1450 /*
1451 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1452 * the bits in 24-31 are all set, then it is the number of seconds since
1453 * boot.
1454 */
1455 static const char *
1456 format_timestamp(uint32_t timestamp)
1457 {
1458 static char buffer[32];
1459
1460 if ((timestamp & 0xff000000) == 0xff000000)
1461 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1462 0x00ffffff);
1463 else
1464 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1465 return (buffer);
1466 }
1467
1468 static const char *
1469 format_class(int8_t class)
1470 {
1471 static char buffer[6];
1472
1473 switch (class) {
1474 case MFI_EVT_CLASS_DEBUG:
1475 return ("debug");
1476 case MFI_EVT_CLASS_PROGRESS:
1477 return ("progress");
1478 case MFI_EVT_CLASS_INFO:
1479 return ("info");
1480 case MFI_EVT_CLASS_WARNING:
1481 return ("WARN");
1482 case MFI_EVT_CLASS_CRITICAL:
1483 return ("CRIT");
1484 case MFI_EVT_CLASS_FATAL:
1485 return ("FATAL");
1486 case MFI_EVT_CLASS_DEAD:
1487 return ("DEAD");
1488 default:
1489 snprintf(buffer, sizeof(buffer), "%d", class);
1490 return (buffer);
1491 }
1492 }
1493
1494 static void
1495 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1496 {
1497 struct mfi_system_pd *syspd = NULL;
1498
1499 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1500 format_timestamp(detail->time), detail->evt_class.members.locale,
1501 format_class(detail->evt_class.members.evt_class),
1502 detail->description);
1503
1504 /* Don't act on old AEN's or while shutting down */
1505 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1506 return;
1507
1508 switch (detail->arg_type) {
1509 case MR_EVT_ARGS_NONE:
1510 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1511 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1512 if (mfi_detect_jbod_change) {
1513 /*
1514 * Probe for new SYSPD's and Delete
1515 * invalid SYSPD's
1516 */
1517 sx_xlock(&sc->mfi_config_lock);
1518 mtx_lock(&sc->mfi_io_lock);
1519 mfi_syspdprobe(sc);
1520 mtx_unlock(&sc->mfi_io_lock);
1521 sx_xunlock(&sc->mfi_config_lock);
1522 }
1523 }
1524 break;
1525 case MR_EVT_ARGS_LD_STATE:
1526 /* During load time driver reads all the events starting
1527 * from the one that has been logged after shutdown. Avoid
1528 * these old events.
1529 */
1530 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1531 /* Remove the LD */
1532 struct mfi_disk *ld;
1533 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1534 if (ld->ld_id ==
1535 detail->args.ld_state.ld.target_id)
1536 break;
1537 }
1538 /*
1539 Fix: for kernel panics when SSCD is removed
1540 KASSERT(ld != NULL, ("volume dissappeared"));
1541 */
1542 if (ld != NULL) {
1543 mtx_lock(&Giant);
1544 device_delete_child(sc->mfi_dev, ld->ld_dev);
1545 mtx_unlock(&Giant);
1546 }
1547 }
1548 break;
1549 case MR_EVT_ARGS_PD:
1550 if (detail->code == MR_EVT_PD_REMOVED) {
1551 if (mfi_detect_jbod_change) {
1552 /*
1553 * If the removed device is a SYSPD then
1554 * delete it
1555 */
1556 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1557 pd_link) {
1558 if (syspd->pd_id ==
1559 detail->args.pd.device_id) {
1560 mtx_lock(&Giant);
1561 device_delete_child(
1562 sc->mfi_dev,
1563 syspd->pd_dev);
1564 mtx_unlock(&Giant);
1565 break;
1566 }
1567 }
1568 }
1569 }
1570 if (detail->code == MR_EVT_PD_INSERTED) {
1571 if (mfi_detect_jbod_change) {
1572 /* Probe for new SYSPD's */
1573 sx_xlock(&sc->mfi_config_lock);
1574 mtx_lock(&sc->mfi_io_lock);
1575 mfi_syspdprobe(sc);
1576 mtx_unlock(&sc->mfi_io_lock);
1577 sx_xunlock(&sc->mfi_config_lock);
1578 }
1579 }
1580 break;
1581 }
1582 }
1583
1584 static void
1585 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1586 {
1587 struct mfi_evt_queue_elm *elm;
1588
1589 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1590 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1591 if (elm == NULL)
1592 return;
1593 memcpy(&elm->detail, detail, sizeof(*detail));
1594 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1595 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1596 }
1597
1598 static void
1599 mfi_handle_evt(void *context, int pending)
1600 {
1601 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1602 struct mfi_softc *sc;
1603 struct mfi_evt_queue_elm *elm;
1604
1605 sc = context;
1606 TAILQ_INIT(&queue);
1607 mtx_lock(&sc->mfi_io_lock);
1608 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1609 mtx_unlock(&sc->mfi_io_lock);
1610 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1611 TAILQ_REMOVE(&queue, elm, link);
1612 mfi_decode_evt(sc, &elm->detail);
1613 free(elm, M_MFIBUF);
1614 }
1615 }
1616
1617 static int
1618 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1619 {
1620 struct mfi_command *cm;
1621 struct mfi_dcmd_frame *dcmd;
1622 union mfi_evt current_aen, prior_aen;
1623 struct mfi_evt_detail *ed = NULL;
1624 int error = 0;
1625
1626 current_aen.word = locale;
1627 if (sc->mfi_aen_cm != NULL) {
1628 prior_aen.word =
1629 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1630 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1631 !((prior_aen.members.locale & current_aen.members.locale)
1632 ^current_aen.members.locale)) {
1633 return (0);
1634 } else {
1635 prior_aen.members.locale |= current_aen.members.locale;
1636 if (prior_aen.members.evt_class
1637 < current_aen.members.evt_class)
1638 current_aen.members.evt_class =
1639 prior_aen.members.evt_class;
1640 mfi_abort(sc, &sc->mfi_aen_cm);
1641 }
1642 }
1643
1644 mtx_lock(&sc->mfi_io_lock);
1645 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1646 (void **)&ed, sizeof(*ed));
1647 mtx_unlock(&sc->mfi_io_lock);
1648 if (error) {
1649 goto out;
1650 }
1651
1652 dcmd = &cm->cm_frame->dcmd;
1653 ((uint32_t *)&dcmd->mbox)[0] = seq;
1654 ((uint32_t *)&dcmd->mbox)[1] = locale;
1655 cm->cm_flags = MFI_CMD_DATAIN;
1656 cm->cm_complete = mfi_aen_complete;
1657
1658 sc->last_seq_num = seq;
1659 sc->mfi_aen_cm = cm;
1660
1661 mtx_lock(&sc->mfi_io_lock);
1662 mfi_enqueue_ready(cm);
1663 mfi_startio(sc);
1664 mtx_unlock(&sc->mfi_io_lock);
1665
1666 out:
1667 return (error);
1668 }
1669
1670 static void
1671 mfi_aen_complete(struct mfi_command *cm)
1672 {
1673 struct mfi_frame_header *hdr;
1674 struct mfi_softc *sc;
1675 struct mfi_evt_detail *detail;
1676 struct mfi_aen *mfi_aen_entry, *tmp;
1677 int seq = 0, aborted = 0;
1678
1679 sc = cm->cm_sc;
1680 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1681
1682 hdr = &cm->cm_frame->header;
1683
1684 if (sc->mfi_aen_cm == NULL)
1685 return;
1686
1687 if (sc->cm_aen_abort ||
1688 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1689 sc->cm_aen_abort = 0;
1690 aborted = 1;
1691 } else {
1692 sc->mfi_aen_triggered = 1;
1693 if (sc->mfi_poll_waiting) {
1694 sc->mfi_poll_waiting = 0;
1695 selwakeup(&sc->mfi_select);
1696 }
1697 detail = cm->cm_data;
1698 mfi_queue_evt(sc, detail);
1699 seq = detail->seq + 1;
1700 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1701 tmp) {
1702 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1703 aen_link);
1704 PROC_LOCK(mfi_aen_entry->p);
1705 psignal(mfi_aen_entry->p, SIGIO);
1706 PROC_UNLOCK(mfi_aen_entry->p);
1707 free(mfi_aen_entry, M_MFIBUF);
1708 }
1709 }
1710
1711 free(cm->cm_data, M_MFIBUF);
1712 sc->mfi_aen_cm = NULL;
1713 wakeup(&sc->mfi_aen_cm);
1714 mfi_release_command(cm);
1715
1716 /* set it up again so the driver can catch more events */
1717 if (!aborted) {
1718 mtx_unlock(&sc->mfi_io_lock);
1719 mfi_aen_setup(sc, seq);
1720 mtx_lock(&sc->mfi_io_lock);
1721 }
1722 }
1723
1724 #define MAX_EVENTS 15
1725
1726 static int
1727 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1728 {
1729 struct mfi_command *cm;
1730 struct mfi_dcmd_frame *dcmd;
1731 struct mfi_evt_list *el;
1732 union mfi_evt class_locale;
1733 int error, i, seq, size;
1734
1735 class_locale.members.reserved = 0;
1736 class_locale.members.locale = mfi_event_locale;
1737 class_locale.members.evt_class = mfi_event_class;
1738
1739 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1740 * (MAX_EVENTS - 1);
1741 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1742 if (el == NULL)
1743 return (ENOMEM);
1744
1745 for (seq = start_seq;;) {
1746 mtx_lock(&sc->mfi_io_lock);
1747 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1748 free(el, M_MFIBUF);
1749 mtx_unlock(&sc->mfi_io_lock);
1750 return (EBUSY);
1751 }
1752 mtx_unlock(&sc->mfi_io_lock);
1753
1754 dcmd = &cm->cm_frame->dcmd;
1755 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1756 dcmd->header.cmd = MFI_CMD_DCMD;
1757 dcmd->header.timeout = 0;
1758 dcmd->header.data_len = size;
1759 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1760 ((uint32_t *)&dcmd->mbox)[0] = seq;
1761 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1762 cm->cm_sg = &dcmd->sgl;
1763 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1764 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1765 cm->cm_data = el;
1766 cm->cm_len = size;
1767
1768 mtx_lock(&sc->mfi_io_lock);
1769 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1770 device_printf(sc->mfi_dev,
1771 "Failed to get controller entries\n");
1772 mfi_release_command(cm);
1773 mtx_unlock(&sc->mfi_io_lock);
1774 break;
1775 }
1776
1777 mtx_unlock(&sc->mfi_io_lock);
1778 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1779 BUS_DMASYNC_POSTREAD);
1780 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1781
1782 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1783 mtx_lock(&sc->mfi_io_lock);
1784 mfi_release_command(cm);
1785 mtx_unlock(&sc->mfi_io_lock);
1786 break;
1787 }
1788 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1789 device_printf(sc->mfi_dev,
1790 "Error %d fetching controller entries\n",
1791 dcmd->header.cmd_status);
1792 mtx_lock(&sc->mfi_io_lock);
1793 mfi_release_command(cm);
1794 mtx_unlock(&sc->mfi_io_lock);
1795 break;
1796 }
1797 mtx_lock(&sc->mfi_io_lock);
1798 mfi_release_command(cm);
1799 mtx_unlock(&sc->mfi_io_lock);
1800
1801 for (i = 0; i < el->count; i++) {
1802 /*
1803 * If this event is newer than 'stop_seq' then
1804 * break out of the loop. Note that the log
1805 * is a circular buffer so we have to handle
1806 * the case that our stop point is earlier in
1807 * the buffer than our start point.
1808 */
1809 if (el->event[i].seq >= stop_seq) {
1810 if (start_seq <= stop_seq)
1811 break;
1812 else if (el->event[i].seq < start_seq)
1813 break;
1814 }
1815 mtx_lock(&sc->mfi_io_lock);
1816 mfi_queue_evt(sc, &el->event[i]);
1817 mtx_unlock(&sc->mfi_io_lock);
1818 }
1819 seq = el->event[el->count - 1].seq + 1;
1820 }
1821
1822 free(el, M_MFIBUF);
1823 return (0);
1824 }
1825
1826 static int
1827 mfi_add_ld(struct mfi_softc *sc, int id)
1828 {
1829 struct mfi_command *cm;
1830 struct mfi_dcmd_frame *dcmd = NULL;
1831 struct mfi_ld_info *ld_info = NULL;
1832 struct mfi_disk_pending *ld_pend;
1833 int error;
1834
1835 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1836
1837 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1838 if (ld_pend != NULL) {
1839 ld_pend->ld_id = id;
1840 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1841 }
1842
1843 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1844 (void **)&ld_info, sizeof(*ld_info));
1845 if (error) {
1846 device_printf(sc->mfi_dev,
1847 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1848 if (ld_info)
1849 free(ld_info, M_MFIBUF);
1850 return (error);
1851 }
1852 cm->cm_flags = MFI_CMD_DATAIN;
1853 dcmd = &cm->cm_frame->dcmd;
1854 dcmd->mbox[0] = id;
1855 if (mfi_wait_command(sc, cm) != 0) {
1856 device_printf(sc->mfi_dev,
1857 "Failed to get logical drive: %d\n", id);
1858 free(ld_info, M_MFIBUF);
1859 return (0);
1860 }
1861 if (ld_info->ld_config.params.isSSCD != 1)
1862 mfi_add_ld_complete(cm);
1863 else {
1864 mfi_release_command(cm);
1865 if (ld_info) /* SSCD drives ld_info free here */
1866 free(ld_info, M_MFIBUF);
1867 }
1868 return (0);
1869 }
1870
1871 static void
1872 mfi_add_ld_complete(struct mfi_command *cm)
1873 {
1874 struct mfi_frame_header *hdr;
1875 struct mfi_ld_info *ld_info;
1876 struct mfi_softc *sc;
1877 device_t child;
1878
1879 sc = cm->cm_sc;
1880 hdr = &cm->cm_frame->header;
1881 ld_info = cm->cm_private;
1882
1883 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1884 free(ld_info, M_MFIBUF);
1885 wakeup(&sc->mfi_map_sync_cm);
1886 mfi_release_command(cm);
1887 return;
1888 }
1889 wakeup(&sc->mfi_map_sync_cm);
1890 mfi_release_command(cm);
1891
1892 mtx_unlock(&sc->mfi_io_lock);
1893 mtx_lock(&Giant);
1894 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1895 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1896 free(ld_info, M_MFIBUF);
1897 mtx_unlock(&Giant);
1898 mtx_lock(&sc->mfi_io_lock);
1899 return;
1900 }
1901
1902 device_set_ivars(child, ld_info);
1903 device_set_desc(child, "MFI Logical Disk");
1904 bus_generic_attach(sc->mfi_dev);
1905 mtx_unlock(&Giant);
1906 mtx_lock(&sc->mfi_io_lock);
1907 }
1908
1909 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1910 {
1911 struct mfi_command *cm;
1912 struct mfi_dcmd_frame *dcmd = NULL;
1913 struct mfi_pd_info *pd_info = NULL;
1914 struct mfi_system_pending *syspd_pend;
1915 int error;
1916
1917 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1918
1919 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1920 if (syspd_pend != NULL) {
1921 syspd_pend->pd_id = id;
1922 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1923 }
1924
1925 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1926 (void **)&pd_info, sizeof(*pd_info));
1927 if (error) {
1928 device_printf(sc->mfi_dev,
1929 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1930 error);
1931 if (pd_info)
1932 free(pd_info, M_MFIBUF);
1933 return (error);
1934 }
1935 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1936 dcmd = &cm->cm_frame->dcmd;
1937 dcmd->mbox[0]=id;
1938 dcmd->header.scsi_status = 0;
1939 dcmd->header.pad0 = 0;
1940 if (mfi_mapcmd(sc, cm) != 0) {
1941 device_printf(sc->mfi_dev,
1942 "Failed to get physical drive info %d\n", id);
1943 free(pd_info, M_MFIBUF);
1944 return (0);
1945 }
1946 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1947 BUS_DMASYNC_POSTREAD);
1948 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1949 mfi_add_sys_pd_complete(cm);
1950 return (0);
1951 }
1952
1953 static void
1954 mfi_add_sys_pd_complete(struct mfi_command *cm)
1955 {
1956 struct mfi_frame_header *hdr;
1957 struct mfi_pd_info *pd_info;
1958 struct mfi_softc *sc;
1959 device_t child;
1960
1961 sc = cm->cm_sc;
1962 hdr = &cm->cm_frame->header;
1963 pd_info = cm->cm_private;
1964
1965 if (hdr->cmd_status != MFI_STAT_OK) {
1966 free(pd_info, M_MFIBUF);
1967 mfi_release_command(cm);
1968 return;
1969 }
1970 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1971 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1972 pd_info->ref.v.device_id);
1973 free(pd_info, M_MFIBUF);
1974 mfi_release_command(cm);
1975 return;
1976 }
1977 mfi_release_command(cm);
1978
1979 mtx_unlock(&sc->mfi_io_lock);
1980 mtx_lock(&Giant);
1981 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1982 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1983 free(pd_info, M_MFIBUF);
1984 mtx_unlock(&Giant);
1985 mtx_lock(&sc->mfi_io_lock);
1986 return;
1987 }
1988
1989 device_set_ivars(child, pd_info);
1990 device_set_desc(child, "MFI System PD");
1991 bus_generic_attach(sc->mfi_dev);
1992 mtx_unlock(&Giant);
1993 mtx_lock(&sc->mfi_io_lock);
1994 }
1995
1996 static struct mfi_command *
1997 mfi_bio_command(struct mfi_softc *sc)
1998 {
1999 struct bio *bio;
2000 struct mfi_command *cm = NULL;
2001
2002 /*reserving two commands to avoid starvation for IOCTL*/
2003 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2004 return (NULL);
2005 }
2006 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2007 return (NULL);
2008 }
2009 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2010 cm = mfi_build_ldio(sc, bio);
2011 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2012 cm = mfi_build_syspdio(sc, bio);
2013 }
2014 if (!cm)
2015 mfi_enqueue_bio(sc, bio);
2016 return cm;
2017 }
2018
2019 /*
2020 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2021 */
2022
2023 int
2024 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2025 {
2026 int cdb_len;
2027
2028 if (((lba & 0x1fffff) == lba)
2029 && ((block_count & 0xff) == block_count)
2030 && (byte2 == 0)) {
2031 /* We can fit in a 6 byte cdb */
2032 struct scsi_rw_6 *scsi_cmd;
2033
2034 scsi_cmd = (struct scsi_rw_6 *)cdb;
2035 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2036 scsi_ulto3b(lba, scsi_cmd->addr);
2037 scsi_cmd->length = block_count & 0xff;
2038 scsi_cmd->control = 0;
2039 cdb_len = sizeof(*scsi_cmd);
2040 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2041 /* Need a 10 byte CDB */
2042 struct scsi_rw_10 *scsi_cmd;
2043
2044 scsi_cmd = (struct scsi_rw_10 *)cdb;
2045 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2046 scsi_cmd->byte2 = byte2;
2047 scsi_ulto4b(lba, scsi_cmd->addr);
2048 scsi_cmd->reserved = 0;
2049 scsi_ulto2b(block_count, scsi_cmd->length);
2050 scsi_cmd->control = 0;
2051 cdb_len = sizeof(*scsi_cmd);
2052 } else if (((block_count & 0xffffffff) == block_count) &&
2053 ((lba & 0xffffffff) == lba)) {
2054 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2055 struct scsi_rw_12 *scsi_cmd;
2056
2057 scsi_cmd = (struct scsi_rw_12 *)cdb;
2058 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2059 scsi_cmd->byte2 = byte2;
2060 scsi_ulto4b(lba, scsi_cmd->addr);
2061 scsi_cmd->reserved = 0;
2062 scsi_ulto4b(block_count, scsi_cmd->length);
2063 scsi_cmd->control = 0;
2064 cdb_len = sizeof(*scsi_cmd);
2065 } else {
2066 /*
2067 * 16 byte CDB. We'll only get here if the LBA is larger
2068 * than 2^32
2069 */
2070 struct scsi_rw_16 *scsi_cmd;
2071
2072 scsi_cmd = (struct scsi_rw_16 *)cdb;
2073 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2074 scsi_cmd->byte2 = byte2;
2075 scsi_u64to8b(lba, scsi_cmd->addr);
2076 scsi_cmd->reserved = 0;
2077 scsi_ulto4b(block_count, scsi_cmd->length);
2078 scsi_cmd->control = 0;
2079 cdb_len = sizeof(*scsi_cmd);
2080 }
2081
2082 return cdb_len;
2083 }
2084
2085 static struct mfi_command *
2086 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2087 {
2088 struct mfi_command *cm;
2089 struct mfi_pass_frame *pass;
2090 uint32_t context = 0;
2091 int flags = 0, blkcount = 0, readop;
2092 uint8_t cdb_len;
2093
2094 if ((cm = mfi_dequeue_free(sc)) == NULL)
2095 return (NULL);
2096
2097 /* Zero out the MFI frame */
2098 context = cm->cm_frame->header.context;
2099 bzero(cm->cm_frame, sizeof(union mfi_frame));
2100 cm->cm_frame->header.context = context;
2101 pass = &cm->cm_frame->pass;
2102 bzero(pass->cdb, 16);
2103 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2104 switch (bio->bio_cmd & 0x03) {
2105 case BIO_READ:
2106 flags = MFI_CMD_DATAIN;
2107 readop = 1;
2108 break;
2109 case BIO_WRITE:
2110 flags = MFI_CMD_DATAOUT;
2111 readop = 0;
2112 break;
2113 default:
2114 /* TODO: what about BIO_DELETE??? */
2115 panic("Unsupported bio command %x\n", bio->bio_cmd);
2116 }
2117
2118 /* Cheat with the sector length to avoid a non-constant division */
2119 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2120 /* Fill the LBA and Transfer length in CDB */
2121 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2122 pass->cdb);
2123 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2124 pass->header.lun_id = 0;
2125 pass->header.timeout = 0;
2126 pass->header.flags = 0;
2127 pass->header.scsi_status = 0;
2128 pass->header.sense_len = MFI_SENSE_LEN;
2129 pass->header.data_len = bio->bio_bcount;
2130 pass->header.cdb_len = cdb_len;
2131 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2132 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2133 cm->cm_complete = mfi_bio_complete;
2134 cm->cm_private = bio;
2135 cm->cm_data = bio->bio_data;
2136 cm->cm_len = bio->bio_bcount;
2137 cm->cm_sg = &pass->sgl;
2138 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2139 cm->cm_flags = flags;
2140 return (cm);
2141 }
2142
2143 static struct mfi_command *
2144 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2145 {
2146 struct mfi_io_frame *io;
2147 struct mfi_command *cm;
2148 int flags;
2149 uint32_t blkcount;
2150 uint32_t context = 0;
2151
2152 if ((cm = mfi_dequeue_free(sc)) == NULL)
2153 return (NULL);
2154
2155 /* Zero out the MFI frame */
2156 context = cm->cm_frame->header.context;
2157 bzero(cm->cm_frame, sizeof(union mfi_frame));
2158 cm->cm_frame->header.context = context;
2159 io = &cm->cm_frame->io;
2160 switch (bio->bio_cmd & 0x03) {
2161 case BIO_READ:
2162 io->header.cmd = MFI_CMD_LD_READ;
2163 flags = MFI_CMD_DATAIN;
2164 break;
2165 case BIO_WRITE:
2166 io->header.cmd = MFI_CMD_LD_WRITE;
2167 flags = MFI_CMD_DATAOUT;
2168 break;
2169 default:
2170 /* TODO: what about BIO_DELETE??? */
2171 panic("Unsupported bio command %x\n", bio->bio_cmd);
2172 }
2173
2174 /* Cheat with the sector length to avoid a non-constant division */
2175 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2176 io->header.target_id = (uintptr_t)bio->bio_driver1;
2177 io->header.timeout = 0;
2178 io->header.flags = 0;
2179 io->header.scsi_status = 0;
2180 io->header.sense_len = MFI_SENSE_LEN;
2181 io->header.data_len = blkcount;
2182 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2183 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2184 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2185 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2186 cm->cm_complete = mfi_bio_complete;
2187 cm->cm_private = bio;
2188 cm->cm_data = bio->bio_data;
2189 cm->cm_len = bio->bio_bcount;
2190 cm->cm_sg = &io->sgl;
2191 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2192 cm->cm_flags = flags;
2193 return (cm);
2194 }
2195
2196 static void
2197 mfi_bio_complete(struct mfi_command *cm)
2198 {
2199 struct bio *bio;
2200 struct mfi_frame_header *hdr;
2201 struct mfi_softc *sc;
2202
2203 bio = cm->cm_private;
2204 hdr = &cm->cm_frame->header;
2205 sc = cm->cm_sc;
2206
2207 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2208 bio->bio_flags |= BIO_ERROR;
2209 bio->bio_error = EIO;
2210 device_printf(sc->mfi_dev, "I/O error, status= %d "
2211 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2212 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2213 } else if (cm->cm_error != 0) {
2214 bio->bio_flags |= BIO_ERROR;
2215 }
2216
2217 mfi_release_command(cm);
2218 mfi_disk_complete(bio);
2219 }
2220
2221 void
2222 mfi_startio(struct mfi_softc *sc)
2223 {
2224 struct mfi_command *cm;
2225 struct ccb_hdr *ccbh;
2226
2227 for (;;) {
2228 /* Don't bother if we're short on resources */
2229 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2230 break;
2231
2232 /* Try a command that has already been prepared */
2233 cm = mfi_dequeue_ready(sc);
2234
2235 if (cm == NULL) {
2236 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2237 cm = sc->mfi_cam_start(ccbh);
2238 }
2239
2240 /* Nope, so look for work on the bioq */
2241 if (cm == NULL)
2242 cm = mfi_bio_command(sc);
2243
2244 /* No work available, so exit */
2245 if (cm == NULL)
2246 break;
2247
2248 /* Send the command to the controller */
2249 if (mfi_mapcmd(sc, cm) != 0) {
2250 mfi_requeue_ready(cm);
2251 break;
2252 }
2253 }
2254 }
2255
2256 int
2257 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2258 {
2259 int error, polled;
2260
2261 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2262
2263 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2264 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2265 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2266 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2267 if (error == EINPROGRESS) {
2268 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2269 return (0);
2270 }
2271 } else {
2272 if (sc->MFA_enabled)
2273 error = mfi_tbolt_send_frame(sc, cm);
2274 else
2275 error = mfi_send_frame(sc, cm);
2276 }
2277
2278 return (error);
2279 }
2280
2281 static void
2282 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2283 {
2284 struct mfi_frame_header *hdr;
2285 struct mfi_command *cm;
2286 union mfi_sgl *sgl;
2287 struct mfi_softc *sc;
2288 int i, j, first, dir;
2289 int sge_size;
2290
2291 cm = (struct mfi_command *)arg;
2292 sc = cm->cm_sc;
2293 hdr = &cm->cm_frame->header;
2294 sgl = cm->cm_sg;
2295
2296 if (error) {
2297 printf("error %d in callback\n", error);
2298 cm->cm_error = error;
2299 mfi_complete(sc, cm);
2300 return;
2301 }
2302 /* Use IEEE sgl only for IO's on a SKINNY controller
2303 * For other commands on a SKINNY controller use either
2304 * sg32 or sg64 based on the sizeof(bus_addr_t).
2305 * Also calculate the total frame size based on the type
2306 * of SGL used.
2307 */
2308 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2309 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2310 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2311 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2312 for (i = 0; i < nsegs; i++) {
2313 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2314 sgl->sg_skinny[i].len = segs[i].ds_len;
2315 sgl->sg_skinny[i].flag = 0;
2316 }
2317 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2318 sge_size = sizeof(struct mfi_sg_skinny);
2319 hdr->sg_count = nsegs;
2320 } else {
2321 j = 0;
2322 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2323 first = cm->cm_stp_len;
2324 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2325 sgl->sg32[j].addr = segs[0].ds_addr;
2326 sgl->sg32[j++].len = first;
2327 } else {
2328 sgl->sg64[j].addr = segs[0].ds_addr;
2329 sgl->sg64[j++].len = first;
2330 }
2331 } else
2332 first = 0;
2333 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2334 for (i = 0; i < nsegs; i++) {
2335 sgl->sg32[j].addr = segs[i].ds_addr + first;
2336 sgl->sg32[j++].len = segs[i].ds_len - first;
2337 first = 0;
2338 }
2339 } else {
2340 for (i = 0; i < nsegs; i++) {
2341 sgl->sg64[j].addr = segs[i].ds_addr + first;
2342 sgl->sg64[j++].len = segs[i].ds_len - first;
2343 first = 0;
2344 }
2345 hdr->flags |= MFI_FRAME_SGL64;
2346 }
2347 hdr->sg_count = j;
2348 sge_size = sc->mfi_sge_size;
2349 }
2350
2351 dir = 0;
2352 if (cm->cm_flags & MFI_CMD_DATAIN) {
2353 dir |= BUS_DMASYNC_PREREAD;
2354 hdr->flags |= MFI_FRAME_DIR_READ;
2355 }
2356 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2357 dir |= BUS_DMASYNC_PREWRITE;
2358 hdr->flags |= MFI_FRAME_DIR_WRITE;
2359 }
2360 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2361 cm->cm_flags |= MFI_CMD_MAPPED;
2362
2363 /*
2364 * Instead of calculating the total number of frames in the
2365 * compound frame, it's already assumed that there will be at
2366 * least 1 frame, so don't compensate for the modulo of the
2367 * following division.
2368 */
2369 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2370 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2371
2372 if (sc->MFA_enabled)
2373 mfi_tbolt_send_frame(sc, cm);
2374 else
2375 mfi_send_frame(sc, cm);
2376
2377 return;
2378 }
2379
2380 static int
2381 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2382 {
2383 struct mfi_frame_header *hdr;
2384 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2385
2386 hdr = &cm->cm_frame->header;
2387
2388 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2389 cm->cm_timestamp = time_uptime;
2390 mfi_enqueue_busy(cm);
2391 } else {
2392 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2393 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2394 }
2395
2396 /*
2397 * The bus address of the command is aligned on a 64 byte boundary,
2398 * leaving the least 6 bits as zero. For whatever reason, the
2399 * hardware wants the address shifted right by three, leaving just
2400 * 3 zero bits. These three bits are then used as a prefetching
2401 * hint for the hardware to predict how many frames need to be
2402 * fetched across the bus. If a command has more than 8 frames
2403 * then the 3 bits are set to 0x7 and the firmware uses other
2404 * information in the command to determine the total amount to fetch.
2405 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2406 * is enough for both 32bit and 64bit systems.
2407 */
2408 if (cm->cm_extra_frames > 7)
2409 cm->cm_extra_frames = 7;
2410
2411 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2412
2413 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2414 return (0);
2415
2416 /* This is a polled command, so busy-wait for it to complete. */
2417 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2418 DELAY(1000);
2419 tm -= 1;
2420 if (tm <= 0)
2421 break;
2422 }
2423
2424 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2425 device_printf(sc->mfi_dev, "Frame %p timed out "
2426 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2427 return (ETIMEDOUT);
2428 }
2429
2430 return (0);
2431 }
2432
2433
2434 void
2435 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2436 {
2437 int dir;
2438
2439 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2440 dir = 0;
2441 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2442 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2443 dir |= BUS_DMASYNC_POSTREAD;
2444 if (cm->cm_flags & MFI_CMD_DATAOUT)
2445 dir |= BUS_DMASYNC_POSTWRITE;
2446
2447 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2448 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2449 cm->cm_flags &= ~MFI_CMD_MAPPED;
2450 }
2451
2452 cm->cm_flags |= MFI_CMD_COMPLETED;
2453
2454 if (cm->cm_complete != NULL)
2455 cm->cm_complete(cm);
2456 else
2457 wakeup(cm);
2458 }
2459
2460 static int
2461 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2462 {
2463 struct mfi_command *cm;
2464 struct mfi_abort_frame *abort;
2465 int i = 0;
2466 uint32_t context = 0;
2467
2468 mtx_lock(&sc->mfi_io_lock);
2469 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2470 return (EBUSY);
2471 }
2472
2473 /* Zero out the MFI frame */
2474 context = cm->cm_frame->header.context;
2475 bzero(cm->cm_frame, sizeof(union mfi_frame));
2476 cm->cm_frame->header.context = context;
2477
2478 abort = &cm->cm_frame->abort;
2479 abort->header.cmd = MFI_CMD_ABORT;
2480 abort->header.flags = 0;
2481 abort->header.scsi_status = 0;
2482 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2483 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2484 abort->abort_mfi_addr_hi =
2485 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2486 cm->cm_data = NULL;
2487 cm->cm_flags = MFI_CMD_POLLED;
2488
2489 mfi_mapcmd(sc, cm);
2490 mfi_release_command(cm);
2491
2492 mtx_unlock(&sc->mfi_io_lock);
2493 while (i < 5 && *cm_abort != NULL) {
2494 tsleep(cm_abort, 0, "mfiabort",
2495 5 * hz);
2496 i++;
2497 }
2498 if (*cm_abort != NULL) {
2499 /* Force a complete if command didn't abort */
2500 mtx_lock(&sc->mfi_io_lock);
2501 (*cm_abort)->cm_complete(*cm_abort);
2502 mtx_unlock(&sc->mfi_io_lock);
2503 }
2504
2505 return (0);
2506 }
2507
2508 int
2509 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2510 int len)
2511 {
2512 struct mfi_command *cm;
2513 struct mfi_io_frame *io;
2514 int error;
2515 uint32_t context = 0;
2516
2517 if ((cm = mfi_dequeue_free(sc)) == NULL)
2518 return (EBUSY);
2519
2520 /* Zero out the MFI frame */
2521 context = cm->cm_frame->header.context;
2522 bzero(cm->cm_frame, sizeof(union mfi_frame));
2523 cm->cm_frame->header.context = context;
2524
2525 io = &cm->cm_frame->io;
2526 io->header.cmd = MFI_CMD_LD_WRITE;
2527 io->header.target_id = id;
2528 io->header.timeout = 0;
2529 io->header.flags = 0;
2530 io->header.scsi_status = 0;
2531 io->header.sense_len = MFI_SENSE_LEN;
2532 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2533 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2534 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2535 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2536 io->lba_lo = lba & 0xffffffff;
2537 cm->cm_data = virt;
2538 cm->cm_len = len;
2539 cm->cm_sg = &io->sgl;
2540 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2541 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2542
2543 error = mfi_mapcmd(sc, cm);
2544 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2545 BUS_DMASYNC_POSTWRITE);
2546 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2547 mfi_release_command(cm);
2548
2549 return (error);
2550 }
2551
2552 int
2553 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2554 int len)
2555 {
2556 struct mfi_command *cm;
2557 struct mfi_pass_frame *pass;
2558 int error, readop, cdb_len;
2559 uint32_t blkcount;
2560
2561 if ((cm = mfi_dequeue_free(sc)) == NULL)
2562 return (EBUSY);
2563
2564 pass = &cm->cm_frame->pass;
2565 bzero(pass->cdb, 16);
2566 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2567
2568 readop = 0;
2569 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2570 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2571 pass->header.target_id = id;
2572 pass->header.timeout = 0;
2573 pass->header.flags = 0;
2574 pass->header.scsi_status = 0;
2575 pass->header.sense_len = MFI_SENSE_LEN;
2576 pass->header.data_len = len;
2577 pass->header.cdb_len = cdb_len;
2578 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2579 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2580 cm->cm_data = virt;
2581 cm->cm_len = len;
2582 cm->cm_sg = &pass->sgl;
2583 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2584 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2585
2586 error = mfi_mapcmd(sc, cm);
2587 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2588 BUS_DMASYNC_POSTWRITE);
2589 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2590 mfi_release_command(cm);
2591
2592 return (error);
2593 }
2594
2595 static int
2596 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2597 {
2598 struct mfi_softc *sc;
2599 int error;
2600
2601 sc = dev->si_drv1;
2602
2603 mtx_lock(&sc->mfi_io_lock);
2604 if (sc->mfi_detaching)
2605 error = ENXIO;
2606 else {
2607 sc->mfi_flags |= MFI_FLAGS_OPEN;
2608 error = 0;
2609 }
2610 mtx_unlock(&sc->mfi_io_lock);
2611
2612 return (error);
2613 }
2614
2615 static int
2616 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2617 {
2618 struct mfi_softc *sc;
2619 struct mfi_aen *mfi_aen_entry, *tmp;
2620
2621 sc = dev->si_drv1;
2622
2623 mtx_lock(&sc->mfi_io_lock);
2624 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2625
2626 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2627 if (mfi_aen_entry->p == curproc) {
2628 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2629 aen_link);
2630 free(mfi_aen_entry, M_MFIBUF);
2631 }
2632 }
2633 mtx_unlock(&sc->mfi_io_lock);
2634 return (0);
2635 }
2636
2637 static int
2638 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2639 {
2640
2641 switch (opcode) {
2642 case MFI_DCMD_LD_DELETE:
2643 case MFI_DCMD_CFG_ADD:
2644 case MFI_DCMD_CFG_CLEAR:
2645 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2646 sx_xlock(&sc->mfi_config_lock);
2647 return (1);
2648 default:
2649 return (0);
2650 }
2651 }
2652
2653 static void
2654 mfi_config_unlock(struct mfi_softc *sc, int locked)
2655 {
2656
2657 if (locked)
2658 sx_xunlock(&sc->mfi_config_lock);
2659 }
2660
2661 /*
2662 * Perform pre-issue checks on commands from userland and possibly veto
2663 * them.
2664 */
2665 static int
2666 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2667 {
2668 struct mfi_disk *ld, *ld2;
2669 int error;
2670 struct mfi_system_pd *syspd = NULL;
2671 uint16_t syspd_id;
2672 uint16_t *mbox;
2673
2674 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2675 error = 0;
2676 switch (cm->cm_frame->dcmd.opcode) {
2677 case MFI_DCMD_LD_DELETE:
2678 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2679 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2680 break;
2681 }
2682 if (ld == NULL)
2683 error = ENOENT;
2684 else
2685 error = mfi_disk_disable(ld);
2686 break;
2687 case MFI_DCMD_CFG_CLEAR:
2688 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2689 error = mfi_disk_disable(ld);
2690 if (error)
2691 break;
2692 }
2693 if (error) {
2694 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2695 if (ld2 == ld)
2696 break;
2697 mfi_disk_enable(ld2);
2698 }
2699 }
2700 break;
2701 case MFI_DCMD_PD_STATE_SET:
2702 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2703 syspd_id = mbox[0];
2704 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2705 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2706 if (syspd->pd_id == syspd_id)
2707 break;
2708 }
2709 }
2710 else
2711 break;
2712 if (syspd)
2713 error = mfi_syspd_disable(syspd);
2714 break;
2715 default:
2716 break;
2717 }
2718 return (error);
2719 }
2720
2721 /* Perform post-issue checks on commands from userland. */
2722 static void
2723 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2724 {
2725 struct mfi_disk *ld, *ldn;
2726 struct mfi_system_pd *syspd = NULL;
2727 uint16_t syspd_id;
2728 uint16_t *mbox;
2729
2730 switch (cm->cm_frame->dcmd.opcode) {
2731 case MFI_DCMD_LD_DELETE:
2732 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2733 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2734 break;
2735 }
2736 KASSERT(ld != NULL, ("volume dissappeared"));
2737 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2738 mtx_unlock(&sc->mfi_io_lock);
2739 mtx_lock(&Giant);
2740 device_delete_child(sc->mfi_dev, ld->ld_dev);
2741 mtx_unlock(&Giant);
2742 mtx_lock(&sc->mfi_io_lock);
2743 } else
2744 mfi_disk_enable(ld);
2745 break;
2746 case MFI_DCMD_CFG_CLEAR:
2747 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2748 mtx_unlock(&sc->mfi_io_lock);
2749 mtx_lock(&Giant);
2750 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2751 device_delete_child(sc->mfi_dev, ld->ld_dev);
2752 }
2753 mtx_unlock(&Giant);
2754 mtx_lock(&sc->mfi_io_lock);
2755 } else {
2756 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2757 mfi_disk_enable(ld);
2758 }
2759 break;
2760 case MFI_DCMD_CFG_ADD:
2761 mfi_ldprobe(sc);
2762 break;
2763 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2764 mfi_ldprobe(sc);
2765 break;
2766 case MFI_DCMD_PD_STATE_SET:
2767 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2768 syspd_id = mbox[0];
2769 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2770 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2771 if (syspd->pd_id == syspd_id)
2772 break;
2773 }
2774 }
2775 else
2776 break;
2777 /* If the transition fails then enable the syspd again */
2778 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2779 mfi_syspd_enable(syspd);
2780 break;
2781 }
2782 }
2783
2784 static int
2785 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2786 {
2787 struct mfi_config_data *conf_data;
2788 struct mfi_command *ld_cm = NULL;
2789 struct mfi_ld_info *ld_info = NULL;
2790 struct mfi_ld_config *ld;
2791 char *p;
2792 int error = 0;
2793
2794 conf_data = (struct mfi_config_data *)cm->cm_data;
2795
2796 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2797 p = (char *)conf_data->array;
2798 p += conf_data->array_size * conf_data->array_count;
2799 ld = (struct mfi_ld_config *)p;
2800 if (ld->params.isSSCD == 1)
2801 error = 1;
2802 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2803 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2804 (void **)&ld_info, sizeof(*ld_info));
2805 if (error) {
2806 device_printf(sc->mfi_dev, "Failed to allocate"
2807 "MFI_DCMD_LD_GET_INFO %d", error);
2808 if (ld_info)
2809 free(ld_info, M_MFIBUF);
2810 return 0;
2811 }
2812 ld_cm->cm_flags = MFI_CMD_DATAIN;
2813 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2814 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2815 if (mfi_wait_command(sc, ld_cm) != 0) {
2816 device_printf(sc->mfi_dev, "failed to get log drv\n");
2817 mfi_release_command(ld_cm);
2818 free(ld_info, M_MFIBUF);
2819 return 0;
2820 }
2821
2822 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2823 free(ld_info, M_MFIBUF);
2824 mfi_release_command(ld_cm);
2825 return 0;
2826 }
2827 else
2828 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2829
2830 if (ld_info->ld_config.params.isSSCD == 1)
2831 error = 1;
2832
2833 mfi_release_command(ld_cm);
2834 free(ld_info, M_MFIBUF);
2835
2836 }
2837 return error;
2838 }
2839
2840 static int
2841 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2842 {
2843 uint8_t i;
2844 struct mfi_ioc_packet *ioc;
2845 ioc = (struct mfi_ioc_packet *)arg;
2846 int sge_size, error;
2847 struct megasas_sge *kern_sge;
2848
2849 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2850 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2851 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2852
2853 if (sizeof(bus_addr_t) == 8) {
2854 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2855 cm->cm_extra_frames = 2;
2856 sge_size = sizeof(struct mfi_sg64);
2857 } else {
2858 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2859 sge_size = sizeof(struct mfi_sg32);
2860 }
2861
2862 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2863 for (i = 0; i < ioc->mfi_sge_count; i++) {
2864 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2865 1, 0, /* algnmnt, boundary */
2866 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2867 BUS_SPACE_MAXADDR, /* highaddr */
2868 NULL, NULL, /* filter, filterarg */
2869 ioc->mfi_sgl[i].iov_len,/* maxsize */
2870 2, /* nsegments */
2871 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2872 BUS_DMA_ALLOCNOW, /* flags */
2873 NULL, NULL, /* lockfunc, lockarg */
2874 &sc->mfi_kbuff_arr_dmat[i])) {
2875 device_printf(sc->mfi_dev,
2876 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2877 return (ENOMEM);
2878 }
2879
2880 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2881 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2882 &sc->mfi_kbuff_arr_dmamap[i])) {
2883 device_printf(sc->mfi_dev,
2884 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2885 return (ENOMEM);
2886 }
2887
2888 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2889 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2890 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2891 &sc->mfi_kbuff_arr_busaddr[i], 0);
2892
2893 if (!sc->kbuff_arr[i]) {
2894 device_printf(sc->mfi_dev,
2895 "Could not allocate memory for kbuff_arr info\n");
2896 return -1;
2897 }
2898 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2899 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2900
2901 if (sizeof(bus_addr_t) == 8) {
2902 cm->cm_frame->stp.sgl.sg64[i].addr =
2903 kern_sge[i].phys_addr;
2904 cm->cm_frame->stp.sgl.sg64[i].len =
2905 ioc->mfi_sgl[i].iov_len;
2906 } else {
2907 cm->cm_frame->stp.sgl.sg32[i].addr =
2908 kern_sge[i].phys_addr;
2909 cm->cm_frame->stp.sgl.sg32[i].len =
2910 ioc->mfi_sgl[i].iov_len;
2911 }
2912
2913 error = copyin(ioc->mfi_sgl[i].iov_base,
2914 sc->kbuff_arr[i],
2915 ioc->mfi_sgl[i].iov_len);
2916 if (error != 0) {
2917 device_printf(sc->mfi_dev, "Copy in failed\n");
2918 return error;
2919 }
2920 }
2921
2922 cm->cm_flags |=MFI_CMD_MAPPED;
2923 return 0;
2924 }
2925
2926 static int
2927 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2928 {
2929 struct mfi_command *cm;
2930 struct mfi_dcmd_frame *dcmd;
2931 void *ioc_buf = NULL;
2932 uint32_t context;
2933 int error = 0, locked;
2934
2935
2936 if (ioc->buf_size > 0) {
2937 if (ioc->buf_size > 1024 * 1024)
2938 return (ENOMEM);
2939 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2940 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2941 if (error) {
2942 device_printf(sc->mfi_dev, "failed to copyin\n");
2943 free(ioc_buf, M_MFIBUF);
2944 return (error);
2945 }
2946 }
2947
2948 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2949
2950 mtx_lock(&sc->mfi_io_lock);
2951 while ((cm = mfi_dequeue_free(sc)) == NULL)
2952 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2953
2954 /* Save context for later */
2955 context = cm->cm_frame->header.context;
2956
2957 dcmd = &cm->cm_frame->dcmd;
2958 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2959
2960 cm->cm_sg = &dcmd->sgl;
2961 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2962 cm->cm_data = ioc_buf;
2963 cm->cm_len = ioc->buf_size;
2964
2965 /* restore context */
2966 cm->cm_frame->header.context = context;
2967
2968 /* Cheat since we don't know if we're writing or reading */
2969 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2970
2971 error = mfi_check_command_pre(sc, cm);
2972 if (error)
2973 goto out;
2974
2975 error = mfi_wait_command(sc, cm);
2976 if (error) {
2977 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2978 goto out;
2979 }
2980 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2981 mfi_check_command_post(sc, cm);
2982 out:
2983 mfi_release_command(cm);
2984 mtx_unlock(&sc->mfi_io_lock);
2985 mfi_config_unlock(sc, locked);
2986 if (ioc->buf_size > 0)
2987 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2988 if (ioc_buf)
2989 free(ioc_buf, M_MFIBUF);
2990 return (error);
2991 }
2992
2993 #define PTRIN(p) ((void *)(uintptr_t)(p))
2994
2995 static int
2996 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2997 {
2998 struct mfi_softc *sc;
2999 union mfi_statrequest *ms;
3000 struct mfi_ioc_packet *ioc;
3001 #ifdef COMPAT_FREEBSD32
3002 struct mfi_ioc_packet32 *ioc32;
3003 #endif
3004 struct mfi_ioc_aen *aen;
3005 struct mfi_command *cm = NULL;
3006 uint32_t context = 0;
3007 union mfi_sense_ptr sense_ptr;
3008 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3009 size_t len;
3010 int i, res;
3011 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3012 #ifdef COMPAT_FREEBSD32
3013 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3014 struct mfi_ioc_passthru iop_swab;
3015 #endif
3016 int error, locked;
3017 union mfi_sgl *sgl;
3018 sc = dev->si_drv1;
3019 error = 0;
3020
3021 if (sc->adpreset)
3022 return EBUSY;
3023
3024 if (sc->hw_crit_error)
3025 return EBUSY;
3026
3027 if (sc->issuepend_done == 0)
3028 return EBUSY;
3029
3030 switch (cmd) {
3031 case MFIIO_STATS:
3032 ms = (union mfi_statrequest *)arg;
3033 switch (ms->ms_item) {
3034 case MFIQ_FREE:
3035 case MFIQ_BIO:
3036 case MFIQ_READY:
3037 case MFIQ_BUSY:
3038 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3039 sizeof(struct mfi_qstat));
3040 break;
3041 default:
3042 error = ENOIOCTL;
3043 break;
3044 }
3045 break;
3046 case MFIIO_QUERY_DISK:
3047 {
3048 struct mfi_query_disk *qd;
3049 struct mfi_disk *ld;
3050
3051 qd = (struct mfi_query_disk *)arg;
3052 mtx_lock(&sc->mfi_io_lock);
3053 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3054 if (ld->ld_id == qd->array_id)
3055 break;
3056 }
3057 if (ld == NULL) {
3058 qd->present = 0;
3059 mtx_unlock(&sc->mfi_io_lock);
3060 return (0);
3061 }
3062 qd->present = 1;
3063 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3064 qd->open = 1;
3065 bzero(qd->devname, SPECNAMELEN + 1);
3066 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3067 mtx_unlock(&sc->mfi_io_lock);
3068 break;
3069 }
3070 case MFI_CMD:
3071 #ifdef COMPAT_FREEBSD32
3072 case MFI_CMD32:
3073 #endif
3074 {
3075 devclass_t devclass;
3076 ioc = (struct mfi_ioc_packet *)arg;
3077 int adapter;
3078
3079 adapter = ioc->mfi_adapter_no;
3080 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3081 devclass = devclass_find("mfi");
3082 sc = devclass_get_softc(devclass, adapter);
3083 }
3084 mtx_lock(&sc->mfi_io_lock);
3085 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3086 mtx_unlock(&sc->mfi_io_lock);
3087 return (EBUSY);
3088 }
3089 mtx_unlock(&sc->mfi_io_lock);
3090 locked = 0;
3091
3092 /*
3093 * save off original context since copying from user
3094 * will clobber some data
3095 */
3096 context = cm->cm_frame->header.context;
3097 cm->cm_frame->header.context = cm->cm_index;
3098
3099 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3100 2 * MEGAMFI_FRAME_SIZE);
3101 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3102 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3103 cm->cm_frame->header.scsi_status = 0;
3104 cm->cm_frame->header.pad0 = 0;
3105 if (ioc->mfi_sge_count) {
3106 cm->cm_sg =
3107 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3108 }
3109 sgl = cm->cm_sg;
3110 cm->cm_flags = 0;
3111 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3112 cm->cm_flags |= MFI_CMD_DATAIN;
3113 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3114 cm->cm_flags |= MFI_CMD_DATAOUT;
3115 /* Legacy app shim */
3116 if (cm->cm_flags == 0)
3117 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3118 cm->cm_len = cm->cm_frame->header.data_len;
3119 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3120 #ifdef COMPAT_FREEBSD32
3121 if (cmd == MFI_CMD) {
3122 #endif
3123 /* Native */
3124 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3125 #ifdef COMPAT_FREEBSD32
3126 } else {
3127 /* 32bit on 64bit */
3128 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3129 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3130 }
3131 #endif
3132 cm->cm_len += cm->cm_stp_len;
3133 }
3134 if (cm->cm_len &&
3135 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3136 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3137 M_WAITOK | M_ZERO);
3138 if (cm->cm_data == NULL) {
3139 device_printf(sc->mfi_dev, "Malloc failed\n");
3140 goto out;
3141 }
3142 } else {
3143 cm->cm_data = 0;
3144 }
3145
3146 /* restore header context */
3147 cm->cm_frame->header.context = context;
3148
3149 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3150 res = mfi_stp_cmd(sc, cm, arg);
3151 if (res != 0)
3152 goto out;
3153 } else {
3154 temp = data;
3155 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3156 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3157 for (i = 0; i < ioc->mfi_sge_count; i++) {
3158 #ifdef COMPAT_FREEBSD32
3159 if (cmd == MFI_CMD) {
3160 #endif
3161 /* Native */
3162 addr = ioc->mfi_sgl[i].iov_base;
3163 len = ioc->mfi_sgl[i].iov_len;
3164 #ifdef COMPAT_FREEBSD32
3165 } else {
3166 /* 32bit on 64bit */
3167 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3168 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3169 len = ioc32->mfi_sgl[i].iov_len;
3170 }
3171 #endif
3172 error = copyin(addr, temp, len);
3173 if (error != 0) {
3174 device_printf(sc->mfi_dev,
3175 "Copy in failed\n");
3176 goto out;
3177 }
3178 temp = &temp[len];
3179 }
3180 }
3181 }
3182
3183 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3184 locked = mfi_config_lock(sc,
3185 cm->cm_frame->dcmd.opcode);
3186
3187 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3188 cm->cm_frame->pass.sense_addr_lo =
3189 (uint32_t)cm->cm_sense_busaddr;
3190 cm->cm_frame->pass.sense_addr_hi =
3191 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3192 }
3193 mtx_lock(&sc->mfi_io_lock);
3194 skip_pre_post = mfi_check_for_sscd (sc, cm);
3195 if (!skip_pre_post) {
3196 error = mfi_check_command_pre(sc, cm);
3197 if (error) {
3198 mtx_unlock(&sc->mfi_io_lock);
3199 goto out;
3200 }
3201 }
3202 if ((error = mfi_wait_command(sc, cm)) != 0) {
3203 device_printf(sc->mfi_dev,
3204 "Controller polled failed\n");
3205 mtx_unlock(&sc->mfi_io_lock);
3206 goto out;
3207 }
3208 if (!skip_pre_post) {
3209 mfi_check_command_post(sc, cm);
3210 }
3211 mtx_unlock(&sc->mfi_io_lock);
3212
3213 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3214 temp = data;
3215 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3216 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3217 for (i = 0; i < ioc->mfi_sge_count; i++) {
3218 #ifdef COMPAT_FREEBSD32
3219 if (cmd == MFI_CMD) {
3220 #endif
3221 /* Native */
3222 addr = ioc->mfi_sgl[i].iov_base;
3223 len = ioc->mfi_sgl[i].iov_len;
3224 #ifdef COMPAT_FREEBSD32
3225 } else {
3226 /* 32bit on 64bit */
3227 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3228 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3229 len = ioc32->mfi_sgl[i].iov_len;
3230 }
3231 #endif
3232 error = copyout(temp, addr, len);
3233 if (error != 0) {
3234 device_printf(sc->mfi_dev,
3235 "Copy out failed\n");
3236 goto out;
3237 }
3238 temp = &temp[len];
3239 }
3240 }
3241 }
3242
3243 if (ioc->mfi_sense_len) {
3244 /* get user-space sense ptr then copy out sense */
3245 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3246 &sense_ptr.sense_ptr_data[0],
3247 sizeof(sense_ptr.sense_ptr_data));
3248 #ifdef COMPAT_FREEBSD32
3249 if (cmd != MFI_CMD) {
3250 /*
3251 * not 64bit native so zero out any address
3252 * over 32bit */
3253 sense_ptr.addr.high = 0;
3254 }
3255 #endif
3256 error = copyout(cm->cm_sense, sense_ptr.user_space,
3257 ioc->mfi_sense_len);
3258 if (error != 0) {
3259 device_printf(sc->mfi_dev,
3260 "Copy out failed\n");
3261 goto out;
3262 }
3263 }
3264
3265 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3266 out:
3267 mfi_config_unlock(sc, locked);
3268 if (data)
3269 free(data, M_MFIBUF);
3270 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3271 for (i = 0; i < 2; i++) {
3272 if (sc->kbuff_arr[i]) {
3273 if (sc->mfi_kbuff_arr_busaddr != 0)
3274 bus_dmamap_unload(
3275 sc->mfi_kbuff_arr_dmat[i],
3276 sc->mfi_kbuff_arr_dmamap[i]
3277 );
3278 if (sc->kbuff_arr[i] != NULL)
3279 bus_dmamem_free(
3280 sc->mfi_kbuff_arr_dmat[i],
3281 sc->kbuff_arr[i],
3282 sc->mfi_kbuff_arr_dmamap[i]
3283 );
3284 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3285 bus_dma_tag_destroy(
3286 sc->mfi_kbuff_arr_dmat[i]);
3287 }
3288 }
3289 }
3290 if (cm) {
3291 mtx_lock(&sc->mfi_io_lock);
3292 mfi_release_command(cm);
3293 mtx_unlock(&sc->mfi_io_lock);
3294 }
3295
3296 break;
3297 }
3298 case MFI_SET_AEN:
3299 aen = (struct mfi_ioc_aen *)arg;
3300 error = mfi_aen_register(sc, aen->aen_seq_num,
3301 aen->aen_class_locale);
3302
3303 break;
3304 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3305 {
3306 devclass_t devclass;
3307 struct mfi_linux_ioc_packet l_ioc;
3308 int adapter;
3309
3310 devclass = devclass_find("mfi");
3311 if (devclass == NULL)
3312 return (ENOENT);
3313
3314 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3315 if (error)
3316 return (error);
3317 adapter = l_ioc.lioc_adapter_no;
3318 sc = devclass_get_softc(devclass, adapter);
3319 if (sc == NULL)
3320 return (ENOENT);
3321 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3322 cmd, arg, flag, td));
3323 break;
3324 }
3325 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3326 {
3327 devclass_t devclass;
3328 struct mfi_linux_ioc_aen l_aen;
3329 int adapter;
3330
3331 devclass = devclass_find("mfi");
3332 if (devclass == NULL)
3333 return (ENOENT);
3334
3335 error = copyin(arg, &l_aen, sizeof(l_aen));
3336 if (error)
3337 return (error);
3338 adapter = l_aen.laen_adapter_no;
3339 sc = devclass_get_softc(devclass, adapter);
3340 if (sc == NULL)
3341 return (ENOENT);
3342 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3343 cmd, arg, flag, td));
3344 break;
3345 }
3346 #ifdef COMPAT_FREEBSD32
3347 case MFIIO_PASSTHRU32:
3348 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3349 error = ENOTTY;
3350 break;
3351 }
3352 iop_swab.ioc_frame = iop32->ioc_frame;
3353 iop_swab.buf_size = iop32->buf_size;
3354 iop_swab.buf = PTRIN(iop32->buf);
3355 iop = &iop_swab;
3356 /* FALLTHROUGH */
3357 #endif
3358 case MFIIO_PASSTHRU:
3359 error = mfi_user_command(sc, iop);
3360 #ifdef COMPAT_FREEBSD32
3361 if (cmd == MFIIO_PASSTHRU32)
3362 iop32->ioc_frame = iop_swab.ioc_frame;
3363 #endif
3364 break;
3365 default:
3366 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3367 error = ENOTTY;
3368 break;
3369 }
3370
3371 return (error);
3372 }
3373
3374 static int
3375 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3376 {
3377 struct mfi_softc *sc;
3378 struct mfi_linux_ioc_packet l_ioc;
3379 struct mfi_linux_ioc_aen l_aen;
3380 struct mfi_command *cm = NULL;
3381 struct mfi_aen *mfi_aen_entry;
3382 union mfi_sense_ptr sense_ptr;
3383 uint32_t context = 0;
3384 uint8_t *data = NULL, *temp;
3385 int i;
3386 int error, locked;
3387
3388 sc = dev->si_drv1;
3389 error = 0;
3390 switch (cmd) {
3391 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3392 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3393 if (error != 0)
3394 return (error);
3395
3396 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3397 return (EINVAL);
3398 }
3399
3400 mtx_lock(&sc->mfi_io_lock);
3401 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3402 mtx_unlock(&sc->mfi_io_lock);
3403 return (EBUSY);
3404 }
3405 mtx_unlock(&sc->mfi_io_lock);
3406 locked = 0;
3407
3408 /*
3409 * save off original context since copying from user
3410 * will clobber some data
3411 */
3412 context = cm->cm_frame->header.context;
3413
3414 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3415 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3416 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3417 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3418 cm->cm_frame->header.scsi_status = 0;
3419 cm->cm_frame->header.pad0 = 0;
3420 if (l_ioc.lioc_sge_count)
3421 cm->cm_sg =
3422 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3423 cm->cm_flags = 0;
3424 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3425 cm->cm_flags |= MFI_CMD_DATAIN;
3426 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3427 cm->cm_flags |= MFI_CMD_DATAOUT;
3428 cm->cm_len = cm->cm_frame->header.data_len;
3429 if (cm->cm_len &&
3430 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3431 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3432 M_WAITOK | M_ZERO);
3433 if (cm->cm_data == NULL) {
3434 device_printf(sc->mfi_dev, "Malloc failed\n");
3435 goto out;
3436 }
3437 } else {
3438 cm->cm_data = 0;
3439 }
3440
3441 /* restore header context */
3442 cm->cm_frame->header.context = context;
3443
3444 temp = data;
3445 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3446 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3447 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3448 temp,
3449 l_ioc.lioc_sgl[i].iov_len);
3450 if (error != 0) {
3451 device_printf(sc->mfi_dev,
3452 "Copy in failed\n");
3453 goto out;
3454 }
3455 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3456 }
3457 }
3458
3459 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3460 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3461
3462 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3463 cm->cm_frame->pass.sense_addr_lo =
3464 (uint32_t)cm->cm_sense_busaddr;
3465 cm->cm_frame->pass.sense_addr_hi =
3466 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3467 }
3468
3469 mtx_lock(&sc->mfi_io_lock);
3470 error = mfi_check_command_pre(sc, cm);
3471 if (error) {
3472 mtx_unlock(&sc->mfi_io_lock);
3473 goto out;
3474 }
3475
3476 if ((error = mfi_wait_command(sc, cm)) != 0) {
3477 device_printf(sc->mfi_dev,
3478 "Controller polled failed\n");
3479 mtx_unlock(&sc->mfi_io_lock);
3480 goto out;
3481 }
3482
3483 mfi_check_command_post(sc, cm);
3484 mtx_unlock(&sc->mfi_io_lock);
3485
3486 temp = data;
3487 if (cm->cm_flags & MFI_CMD_DATAIN) {
3488 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3489 error = copyout(temp,
3490 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3491 l_ioc.lioc_sgl[i].iov_len);
3492 if (error != 0) {
3493 device_printf(sc->mfi_dev,
3494 "Copy out failed\n");
3495 goto out;
3496 }
3497 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3498 }
3499 }
3500
3501 if (l_ioc.lioc_sense_len) {
3502 /* get user-space sense ptr then copy out sense */
3503 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3504 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3505 &sense_ptr.sense_ptr_data[0],
3506 sizeof(sense_ptr.sense_ptr_data));
3507 #ifdef __amd64__
3508 /*
3509 * only 32bit Linux support so zero out any
3510 * address over 32bit
3511 */
3512 sense_ptr.addr.high = 0;
3513 #endif
3514 error = copyout(cm->cm_sense, sense_ptr.user_space,
3515 l_ioc.lioc_sense_len);
3516 if (error != 0) {
3517 device_printf(sc->mfi_dev,
3518 "Copy out failed\n");
3519 goto out;
3520 }
3521 }
3522
3523 error = copyout(&cm->cm_frame->header.cmd_status,
3524 &((struct mfi_linux_ioc_packet*)arg)
3525 ->lioc_frame.hdr.cmd_status,
3526 1);
3527 if (error != 0) {
3528 device_printf(sc->mfi_dev,
3529 "Copy out failed\n");
3530 goto out;
3531 }
3532
3533 out:
3534 mfi_config_unlock(sc, locked);
3535 if (data)
3536 free(data, M_MFIBUF);
3537 if (cm) {
3538 mtx_lock(&sc->mfi_io_lock);
3539 mfi_release_command(cm);
3540 mtx_unlock(&sc->mfi_io_lock);
3541 }
3542
3543 return (error);
3544 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3545 error = copyin(arg, &l_aen, sizeof(l_aen));
3546 if (error != 0)
3547 return (error);
3548 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3549 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3550 M_WAITOK);
3551 mtx_lock(&sc->mfi_io_lock);
3552 if (mfi_aen_entry != NULL) {
3553 mfi_aen_entry->p = curproc;
3554 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3555 aen_link);
3556 }
3557 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3558 l_aen.laen_class_locale);
3559
3560 if (error != 0) {
3561 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3562 aen_link);
3563 free(mfi_aen_entry, M_MFIBUF);
3564 }
3565 mtx_unlock(&sc->mfi_io_lock);
3566
3567 return (error);
3568 default:
3569 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3570 error = ENOENT;
3571 break;
3572 }
3573
3574 return (error);
3575 }
3576
3577 static int
3578 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3579 {
3580 struct mfi_softc *sc;
3581 int revents = 0;
3582
3583 sc = dev->si_drv1;
3584
3585 if (poll_events & (POLLIN | POLLRDNORM)) {
3586 if (sc->mfi_aen_triggered != 0) {
3587 revents |= poll_events & (POLLIN | POLLRDNORM);
3588 sc->mfi_aen_triggered = 0;
3589 }
3590 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3591 revents |= POLLERR;
3592 }
3593 }
3594
3595 if (revents == 0) {
3596 if (poll_events & (POLLIN | POLLRDNORM)) {
3597 sc->mfi_poll_waiting = 1;
3598 selrecord(td, &sc->mfi_select);
3599 }
3600 }
3601
3602 return revents;
3603 }
3604
3605 static void
3606 mfi_dump_all(void)
3607 {
3608 struct mfi_softc *sc;
3609 struct mfi_command *cm;
3610 devclass_t dc;
3611 time_t deadline;
3612 int timedout;
3613 int i;
3614
3615 dc = devclass_find("mfi");
3616 if (dc == NULL) {
3617 printf("No mfi dev class\n");
3618 return;
3619 }
3620
3621 for (i = 0; ; i++) {
3622 sc = devclass_get_softc(dc, i);
3623 if (sc == NULL)
3624 break;
3625 device_printf(sc->mfi_dev, "Dumping\n\n");
3626 timedout = 0;
3627 deadline = time_uptime - MFI_CMD_TIMEOUT;
3628 mtx_lock(&sc->mfi_io_lock);
3629 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3630 if (cm->cm_timestamp < deadline) {
3631 device_printf(sc->mfi_dev,
3632 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3633 cm, (int)(time_uptime - cm->cm_timestamp));
3634 MFI_PRINT_CMD(cm);
3635 timedout++;
3636 }
3637 }
3638
3639 #if 0
3640 if (timedout)
3641 MFI_DUMP_CMDS(SC);
3642 #endif
3643
3644 mtx_unlock(&sc->mfi_io_lock);
3645 }
3646
3647 return;
3648 }
3649
3650 static void
3651 mfi_timeout(void *data)
3652 {
3653 struct mfi_softc *sc = (struct mfi_softc *)data;
3654 struct mfi_command *cm;
3655 time_t deadline;
3656 int timedout = 0;
3657
3658 deadline = time_uptime - MFI_CMD_TIMEOUT;
3659 if (sc->adpreset == 0) {
3660 if (!mfi_tbolt_reset(sc)) {
3661 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3662 return;
3663 }
3664 }
3665 mtx_lock(&sc->mfi_io_lock);
3666 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3667 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3668 continue;
3669 if (cm->cm_timestamp < deadline) {
3670 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3671 cm->cm_timestamp = time_uptime;
3672 } else {
3673 device_printf(sc->mfi_dev,
3674 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3675 cm, (int)(time_uptime - cm->cm_timestamp)
3676 );
3677 MFI_PRINT_CMD(cm);
3678 MFI_VALIDATE_CMD(sc, cm);
3679 timedout++;
3680 }
3681 }
3682 }
3683
3684 #if 0
3685 if (timedout)
3686 MFI_DUMP_CMDS(SC);
3687 #endif
3688
3689 mtx_unlock(&sc->mfi_io_lock);
3690
3691 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3692 mfi_timeout, sc);
3693
3694 if (0)
3695 mfi_dump_all();
3696 return;
3697 }
Cache object: 7ce55e9781ef392d1ab3a9757ce2d5e8
|