FreeBSD/Linux Kernel Cross Reference
sys/dev/mfi/mfi.c
1 /*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include "opt_mfi.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/poll.h>
37 #include <sys/select.h>
38 #include <sys/bus.h>
39 #include <sys/conf.h>
40 #include <sys/eventhandler.h>
41 #include <sys/ioccom.h>
42 #include <sys/uio.h>
43 #include <sys/proc.h>
44 #include <sys/signalvar.h>
45
46 #include <machine/bus_memio.h>
47 #include <machine/bus_pio.h>
48 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <sys/rman.h>
51
52 #include <dev/mfi/mfi_compat.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55
56 #include <dev/mfi/mfireg.h>
57 #include <dev/mfi/mfi_ioctl.h>
58 #include <dev/mfi/mfivar.h>
59
60 static int mfi_alloc_commands(struct mfi_softc *);
61 static void mfi_release_command(struct mfi_command *cm);
62 static int mfi_comms_init(struct mfi_softc *);
63 static int mfi_polled_command(struct mfi_softc *, struct mfi_command *);
64 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
65 static int mfi_get_controller_info(struct mfi_softc *);
66 static int mfi_get_log_state(struct mfi_softc *,
67 struct mfi_evt_log_state **);
68 #ifdef NOTYET
69 static int mfi_get_entry(struct mfi_softc *, int);
70 #endif
71 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
72 uint32_t, void **, size_t);
73 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
74 static void mfi_startup(void *arg);
75 static void mfi_intr(void *arg);
76 static void mfi_enable_intr(struct mfi_softc *sc);
77 static void mfi_ldprobe(struct mfi_softc *sc);
78 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
79 static void mfi_aen_complete(struct mfi_command *);
80 static int mfi_aen_setup(struct mfi_softc *, uint32_t);
81 static int mfi_add_ld(struct mfi_softc *sc, int);
82 static void mfi_add_ld_complete(struct mfi_command *);
83 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
84 static void mfi_bio_complete(struct mfi_command *);
85 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
86 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
87 static void mfi_complete(struct mfi_softc *, struct mfi_command *);
88 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
89 #ifdef notyet
90 static int mfi_linux_ioctl_int(dev_t, u_long, caddr_t, int, d_thread_t *);
91 #endif
92
93 /* Management interface */
94 static d_open_t mfi_open;
95 static d_close_t mfi_close;
96 static d_ioctl_t mfi_ioctl;
97 static d_poll_t mfi_poll;
98
99 #define MFI_CDEV_MAJOR 177
100
101 static struct cdevsw mfi_cdevsw = {
102 mfi_open,
103 mfi_close,
104 noread,
105 nowrite,
106 mfi_ioctl,
107 mfi_poll,
108 nommap,
109 nostrategy,
110 "mfi",
111 MFI_CDEV_MAJOR,
112 nodump,
113 nopsize,
114 0
115 };
116
117 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
118
119 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
120
121 static int
122 mfi_transition_firmware(struct mfi_softc *sc)
123 {
124 int32_t fw_state, cur_state;
125 int max_wait, i;
126
127 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
128 while (fw_state != MFI_FWSTATE_READY) {
129 if (bootverbose)
130 device_printf(sc->mfi_dev, "Waiting for firmware to "
131 "become ready\n");
132 cur_state = fw_state;
133 switch (fw_state) {
134 case MFI_FWSTATE_FAULT:
135 device_printf(sc->mfi_dev, "Firmware fault\n");
136 return (ENXIO);
137 case MFI_FWSTATE_WAIT_HANDSHAKE:
138 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
139 max_wait = 2;
140 break;
141 case MFI_FWSTATE_OPERATIONAL:
142 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
143 max_wait = 10;
144 break;
145 case MFI_FWSTATE_UNDEFINED:
146 case MFI_FWSTATE_BB_INIT:
147 max_wait = 2;
148 break;
149 case MFI_FWSTATE_FW_INIT:
150 case MFI_FWSTATE_DEVICE_SCAN:
151 case MFI_FWSTATE_FLUSH_CACHE:
152 max_wait = 20;
153 break;
154 default:
155 device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
156 fw_state);
157 return (ENXIO);
158 }
159 for (i = 0; i < (max_wait * 10); i++) {
160 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
161 if (fw_state == cur_state)
162 DELAY(100000);
163 else
164 break;
165 }
166 if (fw_state == cur_state) {
167 device_printf(sc->mfi_dev, "firmware stuck in state "
168 "%#x\n", fw_state);
169 return (ENXIO);
170 }
171 }
172 return (0);
173 }
174
175 static void
176 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
177 {
178 uint32_t *addr;
179
180 addr = arg;
181 *addr = segs[0].ds_addr;
182 }
183
184 int
185 mfi_attach(struct mfi_softc *sc)
186 {
187 uint32_t status;
188 int error, commsz, framessz, sensesz;
189 int frames, unit;
190
191 TAILQ_INIT(&sc->mfi_ld_tqh);
192 TAILQ_INIT(&sc->mfi_aen_pids);
193
194 mfi_initq_free(sc);
195 mfi_initq_ready(sc);
196 mfi_initq_busy(sc);
197 mfi_initq_bio(sc);
198
199 /* Before we get too far, see if the firmware is working */
200 if ((error = mfi_transition_firmware(sc)) != 0) {
201 device_printf(sc->mfi_dev, "Firmware not in READY state, "
202 "error %d\n", error);
203 return (ENXIO);
204 }
205
206 /*
207 * Get information needed for sizing the contiguous memory for the
208 * frame pool. Size down the sgl parameter since we know that
209 * we will never need more than what's required for MAXPHYS.
210 * It would be nice if these constants were available at runtime
211 * instead of compile time.
212 */
213 status = MFI_READ4(sc, MFI_OMSG0);
214 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
215 sc->mfi_max_fw_sgl = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
216 sc->mfi_total_sgl = min(sc->mfi_max_fw_sgl, ((MAXPHYS / PAGE_SIZE) +1));
217
218 /*
219 * Create the dma tag for data buffers. Used both for block I/O
220 * and for various internal data queries.
221 */
222 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
223 1, 0, /* algnmnt, boundary */
224 BUS_SPACE_MAXADDR, /* lowaddr */
225 BUS_SPACE_MAXADDR, /* highaddr */
226 NULL, NULL, /* filter, filterarg */
227 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
228 sc->mfi_total_sgl, /* nsegments */
229 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
230 BUS_DMA_ALLOCNOW, /* flags */
231 &sc->mfi_buffer_dmat)) {
232 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
233 return (ENOMEM);
234 }
235
236 /*
237 * Allocate DMA memory for the comms queues. Keep it under 4GB for
238 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
239 * entry, so the calculated size here will be will be 1 more than
240 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
241 */
242 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
243 sizeof(struct mfi_hwcomms);
244 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
245 1, 0, /* algnmnt, boundary */
246 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
247 BUS_SPACE_MAXADDR, /* highaddr */
248 NULL, NULL, /* filter, filterarg */
249 commsz, /* maxsize */
250 1, /* msegments */
251 commsz, /* maxsegsize */
252 0, /* flags */
253 &sc->mfi_comms_dmat)) {
254 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
255 return (ENOMEM);
256 }
257 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
258 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
259 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
260 return (ENOMEM);
261 }
262 bzero(sc->mfi_comms, commsz);
263 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
264 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
265
266 /*
267 * Allocate DMA memory for the command frames. Keep them in the
268 * lower 4GB for efficiency. Calculate the size of the frames at
269 * the same time; the frame is 64 bytes plus space for the SG lists.
270 * The assumption here is that the SG list will start at the second
271 * 64 byte segment of the frame and not use the unused bytes in the
272 * frame. While this might seem wasteful, apparently the frames must
273 * be 64 byte aligned, so any savings would be negated by the extra
274 * alignment padding.
275 */
276 if (sizeof(bus_addr_t) == 8) {
277 sc->mfi_sgsize = sizeof(struct mfi_sg64);
278 sc->mfi_flags |= MFI_FLAGS_SG64;
279 } else {
280 sc->mfi_sgsize = sizeof(struct mfi_sg32);
281 }
282 frames = (sc->mfi_sgsize * sc->mfi_total_sgl + MFI_FRAME_SIZE - 1) /
283 MFI_FRAME_SIZE + 1;
284 sc->mfi_frame_size = frames * MFI_FRAME_SIZE;
285 framessz = sc->mfi_frame_size * sc->mfi_max_fw_cmds;
286 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
287 64, 0, /* algnmnt, boundary */
288 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
289 BUS_SPACE_MAXADDR, /* highaddr */
290 NULL, NULL, /* filter, filterarg */
291 framessz, /* maxsize */
292 1, /* nsegments */
293 framessz, /* maxsegsize */
294 0, /* flags */
295 &sc->mfi_frames_dmat)) {
296 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
297 return (ENOMEM);
298 }
299 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
300 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
301 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
302 return (ENOMEM);
303 }
304 bzero(sc->mfi_frames, framessz);
305 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
306 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
307
308 /*
309 * Allocate DMA memory for the frame sense data. Keep them in the
310 * lower 4GB for efficiency
311 */
312 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
313 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
314 4, 0, /* algnmnt, boundary */
315 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
316 BUS_SPACE_MAXADDR, /* highaddr */
317 NULL, NULL, /* filter, filterarg */
318 sensesz, /* maxsize */
319 1, /* nsegments */
320 sensesz, /* maxsegsize */
321 0, /* flags */
322 &sc->mfi_sense_dmat)) {
323 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
324 return (ENOMEM);
325 }
326 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
327 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
328 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
329 return (ENOMEM);
330 }
331 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
332 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
333
334 if ((error = mfi_alloc_commands(sc)) != 0)
335 return (error);
336
337 if ((error = mfi_comms_init(sc)) != 0)
338 return (error);
339
340 if ((error = mfi_get_controller_info(sc)) != 0)
341 return (error);
342
343 if ((error = mfi_aen_setup(sc, 0), 0) != 0)
344 return (error);
345
346 /*
347 * Set up the interrupt handler. XXX This should happen in
348 * mfi_pci.c
349 */
350 sc->mfi_irq_rid = 0;
351 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
352 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
353 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
354 return (EINVAL);
355 }
356 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
357 mfi_intr, sc, &sc->mfi_intr)) {
358 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
359 return (EINVAL);
360 }
361
362 /* Register a config hook to probe the bus for arrays */
363 sc->mfi_ich.ich_func = mfi_startup;
364 sc->mfi_ich.ich_arg = sc;
365 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
366 device_printf(sc->mfi_dev, "Cannot establish configuration "
367 "hook\n");
368 return (EINVAL);
369 }
370
371 /*
372 * Register a shutdown handler.
373 */
374 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
375 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
376 device_printf(sc->mfi_dev, "Warning: shutdown event "
377 "registration failed\n");
378 }
379
380 /*
381 * Create the control device for doing management
382 */
383 unit = device_get_unit(sc->mfi_dev);
384 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
385 0640, "mfi%d", unit);
386 if (sc->mfi_cdev != NULL)
387 sc->mfi_cdev->si_drv1 = sc;
388
389 return (0);
390 }
391
392 static int
393 mfi_alloc_commands(struct mfi_softc *sc)
394 {
395 struct mfi_command *cm;
396 int i, ncmds;
397
398 /*
399 * XXX Should we allocate all the commands up front, or allocate on
400 * demand later like 'aac' does?
401 */
402 ncmds = sc->mfi_max_fw_cmds;
403 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
404 M_WAITOK | M_ZERO);
405
406 for (i = 0; i < ncmds; i++) {
407 cm = &sc->mfi_commands[i];
408 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
409 sc->mfi_frame_size * i);
410 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
411 sc->mfi_frame_size * i;
412 cm->cm_frame->header.context = i;
413 cm->cm_sense = &sc->mfi_sense[i];
414 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
415 cm->cm_sc = sc;
416 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
417 &cm->cm_dmamap) == 0)
418 mfi_release_command(cm);
419 else
420 break;
421 sc->mfi_total_cmds++;
422 }
423
424 return (0);
425 }
426
427 static void
428 mfi_release_command(struct mfi_command *cm)
429 {
430 uint32_t *hdr_data;
431
432 /*
433 * Zero out the important fields of the frame, but make sure the
434 * context field is preserved
435 */
436 hdr_data = (uint32_t *)cm->cm_frame;
437 hdr_data[0] = 0;
438 hdr_data[1] = 0;
439
440 cm->cm_extra_frames = 0;
441 cm->cm_flags = 0;
442 cm->cm_complete = NULL;
443 cm->cm_private = NULL;
444 cm->cm_sg = 0;
445 cm->cm_total_frame_size = 0;
446 mfi_enqueue_free(cm);
447 }
448
449 static int
450 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
451 void **bufp, size_t bufsize)
452 {
453 struct mfi_command *cm;
454 struct mfi_dcmd_frame *dcmd;
455 void *buf = NULL;
456
457 cm = mfi_dequeue_free(sc);
458 if (cm == NULL)
459 return (EBUSY);
460
461 if ((bufsize > 0) && (bufp != NULL)) {
462 if (*bufp == NULL) {
463 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
464 if (buf == NULL) {
465 mfi_release_command(cm);
466 return (ENOMEM);
467 }
468 *bufp = buf;
469 } else {
470 buf = *bufp;
471 }
472 }
473
474 dcmd = &cm->cm_frame->dcmd;
475 bzero(dcmd->mbox, MFI_MBOX_SIZE);
476 dcmd->header.cmd = MFI_CMD_DCMD;
477 dcmd->header.timeout = 0;
478 dcmd->header.flags = 0;
479 dcmd->header.data_len = bufsize;
480 dcmd->opcode = opcode;
481 cm->cm_sg = &dcmd->sgl;
482 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
483 cm->cm_flags = 0;
484 cm->cm_data = buf;
485 cm->cm_private = buf;
486 cm->cm_len = bufsize;
487
488 *cmp = cm;
489 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
490 *bufp = buf;
491 return (0);
492 }
493
494 static int
495 mfi_comms_init(struct mfi_softc *sc)
496 {
497 struct mfi_command *cm;
498 struct mfi_init_frame *init;
499 struct mfi_init_qinfo *qinfo;
500 int error;
501
502 if ((cm = mfi_dequeue_free(sc)) == NULL)
503 return (EBUSY);
504
505 /*
506 * Abuse the SG list area of the frame to hold the init_qinfo
507 * object;
508 */
509 init = &cm->cm_frame->init;
510 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
511
512 bzero(qinfo, sizeof(struct mfi_init_qinfo));
513 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
514 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
515 offsetof(struct mfi_hwcomms, hw_reply_q);
516 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
517 offsetof(struct mfi_hwcomms, hw_pi);
518 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
519 offsetof(struct mfi_hwcomms, hw_ci);
520
521 init->header.cmd = MFI_CMD_INIT;
522 init->header.data_len = sizeof(struct mfi_init_qinfo);
523 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
524
525 if ((error = mfi_polled_command(sc, cm)) != 0) {
526 device_printf(sc->mfi_dev, "failed to send init command\n");
527 return (error);
528 }
529 mfi_release_command(cm);
530
531 return (0);
532 }
533
534 static int
535 mfi_get_controller_info(struct mfi_softc *sc)
536 {
537 struct mfi_command *cm = NULL;
538 struct mfi_ctrl_info *ci = NULL;
539 uint32_t max_sectors_1, max_sectors_2;
540 int error;
541
542 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
543 (void **)&ci, sizeof(*ci));
544 if (error)
545 goto out;
546 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
547
548 if ((error = mfi_mapcmd(sc, cm)) != 0) {
549 device_printf(sc->mfi_dev, "Controller info buffer map failed\n");
550 free(ci, M_MFIBUF);
551 mfi_release_command(cm);
552 return (error);
553 }
554
555 /* It's ok if this fails, just use default info instead */
556 if ((error = mfi_polled_command(sc, cm)) != 0) {
557 device_printf(sc->mfi_dev, "Failed to get controller info\n");
558 sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
559 MFI_SECTOR_LEN;
560 error = 0;
561 goto out;
562 }
563
564 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
565 BUS_DMASYNC_POSTREAD);
566 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
567
568 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
569 max_sectors_2 = ci->max_request_size;
570 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
571
572 out:
573 if (ci)
574 free(ci, M_MFIBUF);
575 if (cm)
576 mfi_release_command(cm);
577 return (error);
578 }
579
580 static int
581 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
582 {
583 struct mfi_command *cm = NULL;
584 int error;
585
586 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
587 (void **)log_state, sizeof(**log_state));
588 if (error)
589 goto out;
590 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
591
592 if ((error = mfi_mapcmd(sc, cm)) != 0) {
593 device_printf(sc->mfi_dev, "Log state buffer map failed\n");
594 goto out;
595 }
596
597 if ((error = mfi_polled_command(sc, cm)) != 0) {
598 device_printf(sc->mfi_dev, "Failed to get log state\n");
599 goto out;
600 }
601
602 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
603 BUS_DMASYNC_POSTREAD);
604 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
605
606 out:
607 if (cm)
608 mfi_release_command(cm);
609
610 return (error);
611 }
612
613 static int
614 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
615 {
616 struct mfi_evt_log_state *log_state = NULL;
617 union mfi_evt class_locale;
618 int error = 0;
619 uint32_t seq;
620
621 class_locale.members.reserved = 0;
622 class_locale.members.locale = MFI_EVT_LOCALE_ALL;
623 class_locale.members.class = MFI_EVT_CLASS_DEBUG;
624
625 if (seq_start == 0) {
626 error = mfi_get_log_state(sc, &log_state);
627 if (error) {
628 if (log_state)
629 free(log_state, M_MFIBUF);
630 return (error);
631 }
632 /*
633 * Don't run them yet since we can't parse them.
634 * We can indirectly get the contents from
635 * the AEN mechanism via setting it lower then
636 * current. The firmware will iterate through them.
637 */
638 #ifdef NOTYET
639 for (seq = log_state->shutdown_seq_num;
640 seq <= log_state->newest_seq_num; seq++) {
641 mfi_get_entry(sc, seq);
642 }
643 #endif
644
645 seq = log_state->shutdown_seq_num + 1;
646 } else
647 seq = seq_start;
648 mfi_aen_register(sc, seq, class_locale.word);
649 free(log_state, M_MFIBUF);
650
651 return 0;
652 }
653
654 static int
655 mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
656 {
657 struct mfi_frame_header *hdr;
658 int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
659
660 hdr = &cm->cm_frame->header;
661 hdr->cmd_status = 0xff;
662 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
663
664 mfi_send_frame(sc, cm);
665
666 while (hdr->cmd_status == 0xff) {
667 DELAY(1000);
668 tm -= 1000;
669 if (tm <= 0)
670 break;
671 }
672
673 if (hdr->cmd_status == 0xff) {
674 device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
675 return (ETIMEDOUT);
676 }
677
678 return (0);
679 }
680
681 static int
682 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
683 {
684 int error, s;
685
686 cm->cm_complete = NULL;
687
688 s = splbio();
689 mfi_enqueue_ready(cm);
690 mfi_startio(sc);
691 error = tsleep(cm, PRIBIO, "mfiwait", 0);
692 splx(s);
693 return (error);
694 }
695
696 void
697 mfi_free(struct mfi_softc *sc)
698 {
699 struct mfi_command *cm;
700 int i;
701
702 if (sc->mfi_cdev != NULL)
703 destroy_dev(sc->mfi_cdev);
704
705 if (sc->mfi_total_cmds != 0) {
706 for (i = 0; i < sc->mfi_total_cmds; i++) {
707 cm = &sc->mfi_commands[i];
708 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
709 }
710 free(sc->mfi_commands, M_MFIBUF);
711 }
712
713 if (sc->mfi_intr)
714 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
715 if (sc->mfi_irq != NULL)
716 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
717 sc->mfi_irq);
718
719 if (sc->mfi_sense_busaddr != 0)
720 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
721 if (sc->mfi_sense != NULL)
722 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
723 sc->mfi_sense_dmamap);
724 if (sc->mfi_sense_dmat != NULL)
725 bus_dma_tag_destroy(sc->mfi_sense_dmat);
726
727 if (sc->mfi_frames_busaddr != 0)
728 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
729 if (sc->mfi_frames != NULL)
730 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
731 sc->mfi_frames_dmamap);
732 if (sc->mfi_frames_dmat != NULL)
733 bus_dma_tag_destroy(sc->mfi_frames_dmat);
734
735 if (sc->mfi_comms_busaddr != 0)
736 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
737 if (sc->mfi_comms != NULL)
738 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
739 sc->mfi_comms_dmamap);
740 if (sc->mfi_comms_dmat != NULL)
741 bus_dma_tag_destroy(sc->mfi_comms_dmat);
742
743 if (sc->mfi_buffer_dmat != NULL)
744 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
745 if (sc->mfi_parent_dmat != NULL)
746 bus_dma_tag_destroy(sc->mfi_parent_dmat);
747
748 return;
749 }
750
751 static void
752 mfi_startup(void *arg)
753 {
754 struct mfi_softc *sc;
755
756 sc = (struct mfi_softc *)arg;
757
758 config_intrhook_disestablish(&sc->mfi_ich);
759
760 mfi_enable_intr(sc);
761 mfi_ldprobe(sc);
762 }
763
764 static void
765 mfi_intr(void *arg)
766 {
767 struct mfi_softc *sc;
768 struct mfi_command *cm;
769 uint32_t status, pi, ci, context;
770 int s;
771
772 sc = (struct mfi_softc *)arg;
773
774 status = MFI_READ4(sc, MFI_OSTS);
775 if ((status & MFI_OSTS_INTR_VALID) == 0)
776 return;
777 MFI_WRITE4(sc, MFI_OSTS, status);
778
779 pi = sc->mfi_comms->hw_pi;
780 ci = sc->mfi_comms->hw_ci;
781 s = splbio();
782 while (ci != pi) {
783 context = sc->mfi_comms->hw_reply_q[ci];
784 sc->mfi_comms->hw_reply_q[ci] = 0xffffffff;
785 if (context == 0xffffffff) {
786 device_printf(sc->mfi_dev, "mfi_intr: invalid context "
787 "pi= %d ci= %d\n", pi, ci);
788 } else {
789 cm = &sc->mfi_commands[context];
790 mfi_remove_busy(cm);
791 mfi_complete(sc, cm);
792 }
793 ci++;
794 if (ci == (sc->mfi_max_fw_cmds + 1)) {
795 ci = 0;
796 }
797 }
798 splx(s);
799
800 sc->mfi_comms->hw_ci = ci;
801
802 return;
803 }
804
805 int
806 mfi_shutdown(struct mfi_softc *sc)
807 {
808 struct mfi_dcmd_frame *dcmd;
809 struct mfi_command *cm;
810 int error;
811
812 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
813 if (error)
814 return (error);
815
816 if (sc->mfi_aen_cm != NULL)
817 mfi_abort(sc, sc->mfi_aen_cm);
818
819 dcmd = &cm->cm_frame->dcmd;
820 dcmd->header.flags = MFI_FRAME_DIR_NONE;
821
822 if ((error = mfi_polled_command(sc, cm)) != 0) {
823 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
824 }
825
826 mfi_release_command(cm);
827 return (error);
828 }
829
830 static void
831 mfi_enable_intr(struct mfi_softc *sc)
832 {
833
834 MFI_WRITE4(sc, MFI_OMSK, 0x01);
835 }
836
837 static void
838 mfi_ldprobe(struct mfi_softc *sc)
839 {
840 struct mfi_frame_header *hdr;
841 struct mfi_command *cm = NULL;
842 struct mfi_ld_list *list = NULL;
843 int error, i;
844
845 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
846 (void **)&list, sizeof(*list));
847 if (error)
848 goto out;
849
850 cm->cm_flags = MFI_CMD_DATAIN;
851 if (mfi_wait_command(sc, cm) != 0) {
852 device_printf(sc->mfi_dev, "Failed to get device listing\n");
853 goto out;
854 }
855
856 hdr = &cm->cm_frame->header;
857 if (hdr->cmd_status != MFI_STAT_OK) {
858 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
859 hdr->cmd_status);
860 goto out;
861 }
862
863 for (i = 0; i < list->ld_count; i++)
864 mfi_add_ld(sc, list->ld_list[i].ld.target_id);
865 out:
866 if (list)
867 free(list, M_MFIBUF);
868 if (cm)
869 mfi_release_command(cm);
870 return;
871 }
872
873 #ifdef NOTYET
874 static void
875 mfi_decode_log(struct mfi_softc *sc, struct mfi_log_detail *detail)
876 {
877 switch (detail->arg_type) {
878 default:
879 device_printf(sc->mfi_dev, "%d - Log entry type %d\n",
880 detail->seq,
881 detail->arg_type
882 );
883 break;
884 }
885 }
886 #endif
887
888 static void
889 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
890 {
891 switch (detail->arg_type) {
892 case MR_EVT_ARGS_NONE:
893 device_printf(sc->mfi_dev, "%d - %s\n",
894 detail->seq,
895 detail->description
896 );
897 break;
898 case MR_EVT_ARGS_CDB_SENSE:
899 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) CDB %*D"
900 "Sense %*D\n: %s\n",
901 detail->seq,
902 detail->args.cdb_sense.pd.device_id,
903 detail->args.cdb_sense.pd.enclosure_index,
904 detail->args.cdb_sense.pd.slot_number,
905 detail->args.cdb_sense.cdb_len,
906 detail->args.cdb_sense.cdb,
907 ":",
908 detail->args.cdb_sense.sense_len,
909 detail->args.cdb_sense.sense,
910 ":",
911 detail->description
912 );
913 break;
914 case MR_EVT_ARGS_LD:
915 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
916 "event: %s\n",
917 detail->seq,
918 detail->args.ld.ld_index,
919 detail->args.ld.target_id,
920 detail->description
921 );
922 break;
923 case MR_EVT_ARGS_LD_COUNT:
924 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
925 "count %lld: %s\n",
926 detail->seq,
927 detail->args.ld_count.ld.ld_index,
928 detail->args.ld_count.ld.target_id,
929 (long long)detail->args.ld_count.count,
930 detail->description
931 );
932 break;
933 case MR_EVT_ARGS_LD_LBA:
934 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
935 "lba %lld: %s\n",
936 detail->seq,
937 detail->args.ld_lba.ld.ld_index,
938 detail->args.ld_lba.ld.target_id,
939 (long long)detail->args.ld_lba.lba,
940 detail->description
941 );
942 break;
943 case MR_EVT_ARGS_LD_OWNER:
944 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
945 "owner changed: prior %d, new %d: %s\n",
946 detail->seq,
947 detail->args.ld_owner.ld.ld_index,
948 detail->args.ld_owner.ld.target_id,
949 detail->args.ld_owner.pre_owner,
950 detail->args.ld_owner.new_owner,
951 detail->description
952 );
953 break;
954 case MR_EVT_ARGS_LD_LBA_PD_LBA:
955 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
956 "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
957 detail->seq,
958 detail->args.ld_lba_pd_lba.ld.ld_index,
959 detail->args.ld_lba_pd_lba.ld.target_id,
960 (long long)detail->args.ld_lba_pd_lba.ld_lba,
961 detail->args.ld_lba_pd_lba.pd.device_id,
962 detail->args.ld_lba_pd_lba.pd.enclosure_index,
963 detail->args.ld_lba_pd_lba.pd.slot_number,
964 (long long)detail->args.ld_lba_pd_lba.pd_lba,
965 detail->description
966 );
967 break;
968 case MR_EVT_ARGS_LD_PROG:
969 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
970 "progress %d%% in %ds: %s\n",
971 detail->seq,
972 detail->args.ld_prog.ld.ld_index,
973 detail->args.ld_prog.ld.target_id,
974 detail->args.ld_prog.prog.progress/655,
975 detail->args.ld_prog.prog.elapsed_seconds,
976 detail->description
977 );
978 break;
979 case MR_EVT_ARGS_LD_STATE:
980 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
981 "state prior %d new %d: %s\n",
982 detail->seq,
983 detail->args.ld_state.ld.ld_index,
984 detail->args.ld_state.ld.target_id,
985 detail->args.ld_state.prev_state,
986 detail->args.ld_state.new_state,
987 detail->description
988 );
989 break;
990 case MR_EVT_ARGS_LD_STRIP:
991 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
992 "strip %lld: %s\n",
993 detail->seq,
994 detail->args.ld_strip.ld.ld_index,
995 detail->args.ld_strip.ld.target_id,
996 (long long)detail->args.ld_strip.strip,
997 detail->description
998 );
999 break;
1000 case MR_EVT_ARGS_PD:
1001 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1002 "event: %s\n",
1003 detail->seq,
1004 detail->args.pd.device_id,
1005 detail->args.pd.enclosure_index,
1006 detail->args.pd.slot_number,
1007 detail->description
1008 );
1009 break;
1010 case MR_EVT_ARGS_PD_ERR:
1011 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1012 "err %d: %s\n",
1013 detail->seq,
1014 detail->args.pd_err.pd.device_id,
1015 detail->args.pd_err.pd.enclosure_index,
1016 detail->args.pd_err.pd.slot_number,
1017 detail->args.pd_err.err,
1018 detail->description
1019 );
1020 break;
1021 case MR_EVT_ARGS_PD_LBA:
1022 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1023 "lba %lld: %s\n",
1024 detail->seq,
1025 detail->args.pd_lba.pd.device_id,
1026 detail->args.pd_lba.pd.enclosure_index,
1027 detail->args.pd_lba.pd.slot_number,
1028 (long long)detail->args.pd_lba.lba,
1029 detail->description
1030 );
1031 break;
1032 case MR_EVT_ARGS_PD_LBA_LD:
1033 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1034 "lba %lld VD %02d/%d: %s\n",
1035 detail->seq,
1036 detail->args.pd_lba_ld.pd.device_id,
1037 detail->args.pd_lba_ld.pd.enclosure_index,
1038 detail->args.pd_lba_ld.pd.slot_number,
1039 (long long)detail->args.pd_lba.lba,
1040 detail->args.pd_lba_ld.ld.ld_index,
1041 detail->args.pd_lba_ld.ld.target_id,
1042 detail->description
1043 );
1044 break;
1045 case MR_EVT_ARGS_PD_PROG:
1046 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1047 "progress %d%% seconds %ds: %s\n",
1048 detail->seq,
1049 detail->args.pd_prog.pd.device_id,
1050 detail->args.pd_prog.pd.enclosure_index,
1051 detail->args.pd_prog.pd.slot_number,
1052 detail->args.pd_prog.prog.progress/655,
1053 detail->args.pd_prog.prog.elapsed_seconds,
1054 detail->description
1055 );
1056 break;
1057 case MR_EVT_ARGS_PD_STATE:
1058 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1059 "state prior %d new %d: %s\n",
1060 detail->seq,
1061 detail->args.pd_prog.pd.device_id,
1062 detail->args.pd_prog.pd.enclosure_index,
1063 detail->args.pd_prog.pd.slot_number,
1064 detail->args.pd_state.prev_state,
1065 detail->args.pd_state.new_state,
1066 detail->description
1067 );
1068 break;
1069 case MR_EVT_ARGS_PCI:
1070 device_printf(sc->mfi_dev, "%d - PCI 0x04%x 0x04%x "
1071 "0x04%x 0x04%x: %s\n",
1072 detail->seq,
1073 detail->args.pci.venderId,
1074 detail->args.pci.deviceId,
1075 detail->args.pci.subVenderId,
1076 detail->args.pci.subDeviceId,
1077 detail->description
1078 );
1079 break;
1080 case MR_EVT_ARGS_RATE:
1081 device_printf(sc->mfi_dev, "%d - Rebuild rate %d: %s\n",
1082 detail->seq,
1083 detail->args.rate,
1084 detail->description
1085 );
1086 break;
1087 case MR_EVT_ARGS_TIME:
1088 device_printf(sc->mfi_dev, "%d - Adapter ticks %d "
1089 "elapsed %ds: %s\n",
1090 detail->seq,
1091 detail->args.time.rtc,
1092 detail->args.time.elapsedSeconds,
1093 detail->description
1094 );
1095 break;
1096 case MR_EVT_ARGS_ECC:
1097 device_printf(sc->mfi_dev, "%d - Adapter ECC %x,%x: %s: %s\n",
1098 detail->seq,
1099 detail->args.ecc.ecar,
1100 detail->args.ecc.elog,
1101 detail->args.ecc.str,
1102 detail->description
1103 );
1104 break;
1105 default:
1106 device_printf(sc->mfi_dev, "%d - Type %d: %s\n",
1107 detail->seq,
1108 detail->arg_type, detail->description
1109 );
1110 }
1111 }
1112
1113 static int
1114 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1115 {
1116 struct mfi_command *cm;
1117 struct mfi_dcmd_frame *dcmd;
1118 union mfi_evt current_aen, prior_aen;
1119 struct mfi_evt_detail *ed = NULL;
1120 int error;
1121
1122 current_aen.word = locale;
1123 if (sc->mfi_aen_cm != NULL) {
1124 prior_aen.word =
1125 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1126 if (prior_aen.members.class <= current_aen.members.class &&
1127 !((prior_aen.members.locale & current_aen.members.locale)
1128 ^current_aen.members.locale)) {
1129 return (0);
1130 } else {
1131 prior_aen.members.locale |= current_aen.members.locale;
1132 if (prior_aen.members.class
1133 < current_aen.members.class)
1134 current_aen.members.class =
1135 prior_aen.members.class;
1136 mfi_abort(sc, sc->mfi_aen_cm);
1137 }
1138 }
1139
1140 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1141 (void **)&ed, sizeof(*ed));
1142 if (error)
1143 return (error);
1144
1145 dcmd = &cm->cm_frame->dcmd;
1146 ((uint32_t *)&dcmd->mbox)[0] = seq;
1147 ((uint32_t *)&dcmd->mbox)[1] = locale;
1148 cm->cm_flags = MFI_CMD_DATAIN;
1149 cm->cm_complete = mfi_aen_complete;
1150
1151 sc->mfi_aen_cm = cm;
1152
1153 mfi_enqueue_ready(cm);
1154 mfi_startio(sc);
1155
1156 return (0);
1157 }
1158
1159 static void
1160 mfi_aen_complete(struct mfi_command *cm)
1161 {
1162 struct mfi_frame_header *hdr;
1163 struct mfi_softc *sc;
1164 struct mfi_evt_detail *detail;
1165 struct mfi_aen *mfi_aen_entry;
1166 int seq = 0, aborted = 0;
1167 int s;
1168
1169 sc = cm->cm_sc;
1170 hdr = &cm->cm_frame->header;
1171
1172 if (sc->mfi_aen_cm == NULL)
1173 return;
1174
1175 if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1176 sc->mfi_aen_cm->cm_aen_abort = 0;
1177 aborted = 1;
1178 } else {
1179 sc->mfi_aen_triggered = 1;
1180 if (sc->mfi_poll_waiting)
1181 selwakeup(&sc->mfi_select);
1182 detail = cm->cm_data;
1183 mfi_decode_evt(sc, detail);
1184 seq = detail->seq + 1;
1185 s = splbio();
1186 TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1187 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1188 aen_link);
1189 psignal(mfi_aen_entry->p, SIGIO);
1190 free(mfi_aen_entry, M_MFIBUF);
1191 }
1192 splx(s);
1193 }
1194
1195 free(cm->cm_data, M_MFIBUF);
1196 sc->mfi_aen_cm = NULL;
1197 wakeup(&sc->mfi_aen_cm);
1198 mfi_release_command(cm);
1199
1200 /* set it up again so the driver can catch more events */
1201 if (!aborted) {
1202 mfi_aen_setup(sc, seq);
1203 }
1204 }
1205
1206 #ifdef NOTYET
1207 static int
1208 mfi_get_entry(struct mfi_softc *sc, int seq)
1209 {
1210 struct mfi_command *cm;
1211 struct mfi_dcmd_frame *dcmd;
1212 struct mfi_log_detail *ed;
1213 int error;
1214
1215 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1216 return (EBUSY);
1217 }
1218
1219 ed = malloc(sizeof(struct mfi_log_detail), M_MFIBUF, M_NOWAIT | M_ZERO);
1220 if (ed == NULL) {
1221 mfi_release_command(cm);
1222 return (ENOMEM);
1223 }
1224
1225 dcmd = &cm->cm_frame->dcmd;
1226 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1227 dcmd->header.cmd = MFI_CMD_DCMD;
1228 dcmd->header.timeout = 0;
1229 dcmd->header.data_len = sizeof(struct mfi_log_detail);
1230 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1231 ((uint32_t *)&dcmd->mbox)[0] = seq;
1232 ((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1233 cm->cm_sg = &dcmd->sgl;
1234 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1235 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1236 cm->cm_data = ed;
1237 cm->cm_len = sizeof(struct mfi_evt_detail);
1238
1239 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1240 device_printf(sc->mfi_dev, "Controller info buffer map failed");
1241 free(ed, M_MFIBUF);
1242 mfi_release_command(cm);
1243 return (error);
1244 }
1245
1246 if ((error = mfi_polled_command(sc, cm)) != 0) {
1247 device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1248 sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
1249 MFI_SECTOR_LEN;
1250 free(ed, M_MFIBUF);
1251 mfi_release_command(cm);
1252 return (0);
1253 }
1254
1255 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1256 BUS_DMASYNC_POSTREAD);
1257 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1258
1259 mfi_decode_log(sc, ed);
1260
1261 free(cm->cm_data, M_MFIBUF);
1262 mfi_release_command(cm);
1263 return (0);
1264 }
1265 #endif
1266
1267 static int
1268 mfi_add_ld(struct mfi_softc *sc, int id)
1269 {
1270 struct mfi_command *cm;
1271 struct mfi_dcmd_frame *dcmd = NULL;
1272 struct mfi_ld_info *ld_info = NULL;
1273 int error;
1274
1275 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1276 (void **)&ld_info, sizeof(*ld_info));
1277 if (error) {
1278 device_printf(sc->mfi_dev,
1279 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1280 if (ld_info)
1281 free(ld_info, M_MFIBUF);
1282 return (error);
1283 }
1284 cm->cm_flags = MFI_CMD_DATAIN;
1285 dcmd = &cm->cm_frame->dcmd;
1286 dcmd->mbox[0] = id;
1287 if (mfi_wait_command(sc, cm) != 0) {
1288 device_printf(sc->mfi_dev,
1289 "Failed to get logical drive: %d\n", id);
1290 free(ld_info, M_MFIBUF);
1291 return (0);
1292 }
1293
1294 mfi_add_ld_complete(cm);
1295 return (0);
1296 }
1297
1298 static void
1299 mfi_add_ld_complete(struct mfi_command *cm)
1300 {
1301 struct mfi_frame_header *hdr;
1302 struct mfi_ld_info *ld_info;
1303 struct mfi_softc *sc;
1304 struct mfi_ld *ld;
1305 device_t child;
1306
1307 sc = cm->cm_sc;
1308 hdr = &cm->cm_frame->header;
1309 ld_info = cm->cm_private;
1310
1311 if (hdr->cmd_status != MFI_STAT_OK) {
1312 free(ld_info, M_MFIBUF);
1313 mfi_release_command(cm);
1314 return;
1315 }
1316 mfi_release_command(cm);
1317
1318 ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
1319 if (ld == NULL) {
1320 device_printf(sc->mfi_dev, "Cannot allocate ld\n");
1321 free(ld_info, M_MFIBUF);
1322 return;
1323 }
1324
1325 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1326 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1327 free(ld, M_MFIBUF);
1328 free(ld_info, M_MFIBUF);
1329 return;
1330 }
1331
1332 ld->ld_id = ld_info->ld_config.properties.ld.target_id;
1333 ld->ld_disk = child;
1334 ld->ld_info = ld_info;
1335
1336 device_set_ivars(child, ld);
1337 device_set_desc(child, "MFI Logical Disk");
1338 bus_generic_attach(sc->mfi_dev);
1339 }
1340
1341 static struct mfi_command *
1342 mfi_bio_command(struct mfi_softc *sc)
1343 {
1344 struct mfi_io_frame *io;
1345 struct mfi_command *cm;
1346 struct bio *bio;
1347 int flags, blkcount;
1348
1349 if ((cm = mfi_dequeue_free(sc)) == NULL)
1350 return (NULL);
1351
1352 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1353 mfi_release_command(cm);
1354 return (NULL);
1355 }
1356
1357 io = &cm->cm_frame->io;
1358 if (BIO_IS_READ(bio)) {
1359 io->header.cmd = MFI_CMD_LD_READ;
1360 flags = MFI_CMD_DATAIN;
1361 } else {
1362 io->header.cmd = MFI_CMD_LD_WRITE;
1363 flags = MFI_CMD_DATAOUT;
1364 }
1365
1366 /* Cheat with the sector length to avoid a non-constant division */
1367 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1368 io->header.target_id = (uintptr_t)bio->bio_driver1;
1369 io->header.timeout = 0;
1370 io->header.flags = 0;
1371 io->header.sense_len = MFI_SENSE_LEN;
1372 io->header.data_len = blkcount;
1373 io->sense_addr_lo = cm->cm_sense_busaddr;
1374 io->sense_addr_hi = 0;
1375 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1376 io->lba_lo = bio->bio_pblkno & 0xffffffff;
1377 cm->cm_complete = mfi_bio_complete;
1378 cm->cm_private = bio;
1379 cm->cm_data = bio->bio_data;
1380 cm->cm_len = bio->bio_bcount;
1381 cm->cm_sg = &io->sgl;
1382 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1383 cm->cm_flags = flags;
1384
1385 return (cm);
1386 }
1387
1388 static void
1389 mfi_bio_complete(struct mfi_command *cm)
1390 {
1391 struct bio *bio;
1392 struct mfi_frame_header *hdr;
1393 struct mfi_softc *sc;
1394
1395 bio = cm->cm_private;
1396 hdr = &cm->cm_frame->header;
1397 sc = cm->cm_sc;
1398
1399 if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1400 bio->bio_flags |= BIO_ERROR;
1401 bio->bio_error = EIO;
1402 device_printf(sc->mfi_dev, "I/O error, status= %d "
1403 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1404 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1405 }
1406
1407 mfi_release_command(cm);
1408 mfi_disk_complete(bio);
1409 }
1410
1411 void
1412 mfi_startio(struct mfi_softc *sc)
1413 {
1414 struct mfi_command *cm;
1415
1416 for (;;) {
1417 /* Don't bother if we're short on resources */
1418 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1419 break;
1420
1421 /* Try a command that has already been prepared */
1422 cm = mfi_dequeue_ready(sc);
1423
1424 /* Nope, so look for work on the bioq */
1425 if (cm == NULL)
1426 cm = mfi_bio_command(sc);
1427
1428 /* No work available, so exit */
1429 if (cm == NULL)
1430 break;
1431
1432 /* Send the command to the controller */
1433 if (mfi_mapcmd(sc, cm) != 0) {
1434 mfi_requeue_ready(cm);
1435 break;
1436 }
1437 }
1438 }
1439
1440 static int
1441 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1442 {
1443 int error, polled;
1444
1445 if (cm->cm_data != NULL) {
1446 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1447 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1448 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1449 if (error == EINPROGRESS) {
1450 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1451 return (0);
1452 }
1453 } else {
1454 mfi_enqueue_busy(cm);
1455 error = mfi_send_frame(sc, cm);
1456 }
1457
1458 return (error);
1459 }
1460
1461 static void
1462 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1463 {
1464 struct mfi_frame_header *hdr;
1465 struct mfi_command *cm;
1466 union mfi_sgl *sgl;
1467 struct mfi_softc *sc;
1468 int i, dir;
1469
1470 if (error)
1471 return;
1472
1473 cm = (struct mfi_command *)arg;
1474 sc = cm->cm_sc;
1475 hdr = &cm->cm_frame->header;
1476 sgl = cm->cm_sg;
1477
1478 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1479 for (i = 0; i < nsegs; i++) {
1480 sgl->sg32[i].addr = segs[i].ds_addr;
1481 sgl->sg32[i].len = segs[i].ds_len;
1482 }
1483 } else {
1484 for (i = 0; i < nsegs; i++) {
1485 sgl->sg64[i].addr = segs[i].ds_addr;
1486 sgl->sg64[i].len = segs[i].ds_len;
1487 }
1488 hdr->flags |= MFI_FRAME_SGL64;
1489 }
1490 hdr->sg_count = nsegs;
1491
1492 dir = 0;
1493 if (cm->cm_flags & MFI_CMD_DATAIN) {
1494 dir |= BUS_DMASYNC_PREREAD;
1495 hdr->flags |= MFI_FRAME_DIR_READ;
1496 }
1497 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1498 dir |= BUS_DMASYNC_PREWRITE;
1499 hdr->flags |= MFI_FRAME_DIR_WRITE;
1500 }
1501 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1502 cm->cm_flags |= MFI_CMD_MAPPED;
1503
1504 /*
1505 * Instead of calculating the total number of frames in the
1506 * compound frame, it's already assumed that there will be at
1507 * least 1 frame, so don't compensate for the modulo of the
1508 * following division.
1509 */
1510 cm->cm_total_frame_size += (sc->mfi_sgsize * nsegs);
1511 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1512
1513 /* The caller will take care of delivering polled commands */
1514 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1515 mfi_enqueue_busy(cm);
1516 mfi_send_frame(sc, cm);
1517 }
1518
1519 return;
1520 }
1521
1522 static int
1523 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1524 {
1525 int s;
1526
1527 /*
1528 * The bus address of the command is aligned on a 64 byte boundary,
1529 * leaving the least 6 bits as zero. For whatever reason, the
1530 * hardware wants the address shifted right by three, leaving just
1531 * 3 zero bits. These three bits are then used to indicate how many
1532 * 64 byte frames beyond the first one are used in the command. The
1533 * extra frames are typically filled with S/G elements. The extra
1534 * frames must also be contiguous. Thus, a compound frame can be at
1535 * most 512 bytes long, allowing for up to 59 32-bit S/G elements or
1536 * 39 64-bit S/G elements for block I/O commands. This means that
1537 * I/O transfers of 256k and higher simply are not possible, which
1538 * is quite odd for such a modern adapter.
1539 */
1540 s = splbio();
1541 MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1542 cm->cm_extra_frames);
1543 splx(s);
1544 return (0);
1545 }
1546
1547 static void
1548 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1549 {
1550 int dir;
1551
1552 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1553 dir = 0;
1554 if (cm->cm_flags & MFI_CMD_DATAIN)
1555 dir |= BUS_DMASYNC_POSTREAD;
1556 if (cm->cm_flags & MFI_CMD_DATAOUT)
1557 dir |= BUS_DMASYNC_POSTWRITE;
1558
1559 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1560 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1561 cm->cm_flags &= ~MFI_CMD_MAPPED;
1562 }
1563
1564 if (cm->cm_complete != NULL)
1565 cm->cm_complete(cm);
1566 else
1567 wakeup(cm);
1568
1569 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1570 mfi_startio(sc);
1571 }
1572
1573 static int
1574 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1575 {
1576 struct mfi_command *cm;
1577 struct mfi_abort_frame *abort;
1578
1579 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1580 return (EBUSY);
1581 }
1582
1583 abort = &cm->cm_frame->abort;
1584 abort->header.cmd = MFI_CMD_ABORT;
1585 abort->header.flags = 0;
1586 abort->abort_context = cm_abort->cm_frame->header.context;
1587 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1588 abort->abort_mfi_addr_hi = 0;
1589 cm->cm_data = NULL;
1590
1591 sc->mfi_aen_cm->cm_aen_abort = 1;
1592 mfi_mapcmd(sc, cm);
1593 mfi_polled_command(sc, cm);
1594 mfi_release_command(cm);
1595
1596 while (sc->mfi_aen_cm != NULL) {
1597 tsleep(&sc->mfi_aen_cm, 0, "mfiabort", 5 * hz);
1598 }
1599
1600 return (0);
1601 }
1602
1603 int
1604 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1605 {
1606 struct mfi_command *cm;
1607 struct mfi_io_frame *io;
1608 int error;
1609
1610 if ((cm = mfi_dequeue_free(sc)) == NULL)
1611 return (EBUSY);
1612
1613 io = &cm->cm_frame->io;
1614 io->header.cmd = MFI_CMD_LD_WRITE;
1615 io->header.target_id = id;
1616 io->header.timeout = 0;
1617 io->header.flags = 0;
1618 io->header.sense_len = MFI_SENSE_LEN;
1619 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1620 io->sense_addr_lo = cm->cm_sense_busaddr;
1621 io->sense_addr_hi = 0;
1622 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1623 io->lba_lo = lba & 0xffffffff;
1624 cm->cm_data = virt;
1625 cm->cm_len = len;
1626 cm->cm_sg = &io->sgl;
1627 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1628 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1629
1630 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1631 mfi_release_command(cm);
1632 return (error);
1633 }
1634
1635 error = mfi_polled_command(sc, cm);
1636 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1637 BUS_DMASYNC_POSTWRITE);
1638 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1639 mfi_release_command(cm);
1640
1641 return (error);
1642 }
1643
1644 static int
1645 mfi_open(dev_t dev, int flags, int fmt, d_thread_t *td)
1646 {
1647 struct mfi_softc *sc;
1648
1649 sc = dev->si_drv1;
1650 sc->mfi_flags |= MFI_FLAGS_OPEN;
1651
1652 return (0);
1653 }
1654
1655 static int
1656 mfi_close(dev_t dev, int flags, int fmt, d_thread_t *td)
1657 {
1658 struct mfi_softc *sc;
1659 struct mfi_aen *mfi_aen_entry;
1660 int s;
1661
1662 sc = dev->si_drv1;
1663 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1664
1665 s = splbio();
1666 TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1667 if (mfi_aen_entry->p == curproc) {
1668 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1669 aen_link);
1670 free(mfi_aen_entry, M_MFIBUF);
1671 }
1672 }
1673 splx(s);
1674 return (0);
1675 }
1676
1677 static int
1678 mfi_ioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1679 {
1680 struct mfi_softc *sc;
1681 union mfi_statrequest *ms;
1682 int error;
1683
1684 sc = dev->si_drv1;
1685 error = 0;
1686
1687 switch (cmd) {
1688 case MFIIO_STATS:
1689 ms = (union mfi_statrequest *)arg;
1690 switch (ms->ms_item) {
1691 case MFIQ_FREE:
1692 case MFIQ_BIO:
1693 case MFIQ_READY:
1694 case MFIQ_BUSY:
1695 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1696 sizeof(struct mfi_qstat));
1697 break;
1698 default:
1699 error = ENOIOCTL;
1700 break;
1701 }
1702 break;
1703
1704 #ifdef notyet
1705 case 0xc1144d01: /* Firmware Linux ioctl shim */
1706 {
1707 devclass_t devclass;
1708 struct mfi_linux_ioc_packet l_ioc;
1709 int adapter;
1710
1711 devclass = devclass_find("mfi");
1712 if (devclass == NULL)
1713 return (ENOENT);
1714
1715 error = copyin(arg, &l_ioc, sizeof(l_ioc));
1716 if (error)
1717 return (error);
1718 adapter = l_ioc.lioc_adapter_no;
1719 sc = devclass_get_softc(devclass, adapter);
1720 if (sc == NULL)
1721 return (ENOENT);
1722 return (mfi_linux_ioctl_int(sc->mfi_cdev,
1723 cmd, arg, flag, td));
1724 break;
1725 }
1726 case 0x400c4d03: /* AEN Linux ioctl shim */
1727 {
1728 devclass_t devclass;
1729 struct mfi_linux_ioc_aen l_aen;
1730 int adapter;
1731
1732 devclass = devclass_find("mfi");
1733 if (devclass == NULL)
1734 return (ENOENT);
1735
1736 error = copyin(arg, &l_aen, sizeof(l_aen));
1737 if (error)
1738 return (error);
1739 adapter = l_aen.laen_adapter_no;
1740 sc = devclass_get_softc(devclass, adapter);
1741 if (sc == NULL)
1742 return (ENOENT);
1743 return (mfi_linux_ioctl_int(sc->mfi_cdev,
1744 cmd, arg, flag, td));
1745 break;
1746 }
1747 #endif
1748 default:
1749 error = ENOENT;
1750 break;
1751 }
1752
1753 return (error);
1754 }
1755
1756 #ifdef notyet
1757 static int
1758 mfi_linux_ioctl_int(dev_t dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1759 {
1760 struct mfi_softc *sc;
1761 struct mfi_linux_ioc_packet l_ioc;
1762 struct mfi_linux_ioc_aen l_aen;
1763 struct mfi_command *cm = NULL;
1764 struct mfi_aen *mfi_aen_entry;
1765 uint32_t *sense_ptr;
1766 uint32_t context;
1767 uint8_t *data = NULL, *temp;
1768 int i;
1769 int s;
1770 int error;
1771
1772 sc = dev->si_drv1;
1773 error = 0;
1774 switch (cmd) {
1775 case 0xc1144d01: /* Firmware Linux ioctl shim */
1776 error = copyin(arg, &l_ioc, sizeof(l_ioc));
1777 if (error != 0)
1778 return (error);
1779
1780 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
1781 return (EINVAL);
1782 }
1783
1784 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1785 return (EBUSY);
1786 }
1787
1788 /*
1789 * save off original context since copying from user
1790 * will clobber some data
1791 */
1792 context = cm->cm_frame->header.context;
1793
1794 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
1795 l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
1796 cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
1797 cm->cm_sg =
1798 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
1799 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
1800 | MFI_CMD_POLLED;
1801 cm->cm_len = cm->cm_frame->header.data_len;
1802 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
1803 M_WAITOK | M_ZERO);
1804
1805 /* restore header context */
1806 cm->cm_frame->header.context = context;
1807
1808 temp = data;
1809 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1810 error = copyin(l_ioc.lioc_sgl[i].iov_base,
1811 temp,
1812 l_ioc.lioc_sgl[i].iov_len);
1813 if (error != 0) {
1814 device_printf(sc->mfi_dev,
1815 "Copy in failed");
1816 goto out;
1817 }
1818 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1819 }
1820
1821 if (l_ioc.lioc_sense_len) {
1822 sense_ptr =
1823 (void *)&cm->cm_frame->bytes[l_ioc.lioc_sense_off];
1824 *sense_ptr = cm->cm_sense_busaddr;
1825 }
1826
1827 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1828 device_printf(sc->mfi_dev,
1829 "Controller info buffer map failed");
1830 goto out;
1831 }
1832
1833 if ((error = mfi_polled_command(sc, cm)) != 0) {
1834 device_printf(sc->mfi_dev,
1835 "Controller polled failed");
1836 goto out;
1837 }
1838
1839 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1840 BUS_DMASYNC_POSTREAD);
1841 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1842
1843 temp = data;
1844 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1845 error = copyout(temp,
1846 l_ioc.lioc_sgl[i].iov_base,
1847 l_ioc.lioc_sgl[i].iov_len);
1848 if (error != 0) {
1849 device_printf(sc->mfi_dev,
1850 "Copy out failed");
1851 goto out;
1852 }
1853 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1854 }
1855
1856 if (l_ioc.lioc_sense_len) {
1857 /* copy out sense */
1858 sense_ptr = (void *)
1859 &l_ioc.lioc_frame.raw[l_ioc.lioc_sense_off];
1860 temp = 0;
1861 temp += cm->cm_sense_busaddr;
1862 error = copyout(temp, sense_ptr,
1863 l_ioc.lioc_sense_len);
1864 if (error != 0) {
1865 device_printf(sc->mfi_dev,
1866 "Copy out failed");
1867 goto out;
1868 }
1869 }
1870
1871 error = copyout(&cm->cm_frame->header.cmd_status,
1872 &((struct mfi_linux_ioc_packet*)arg)
1873 ->lioc_frame.hdr.cmd_status,
1874 1);
1875 if (error != 0) {
1876 device_printf(sc->mfi_dev,
1877 "Copy out failed");
1878 goto out;
1879 }
1880
1881 out:
1882 if (data)
1883 free(data, M_MFIBUF);
1884 if (cm) {
1885 mfi_release_command(cm);
1886 }
1887
1888 return (error);
1889 case 0x400c4d03: /* AEN Linux ioctl shim */
1890 error = copyin(arg, &l_aen, sizeof(l_aen));
1891 if (error != 0)
1892 return (error);
1893 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
1894 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
1895 M_WAITOK);
1896 if (mfi_aen_entry != NULL) {
1897 mfi_aen_entry->p = curproc;
1898 s = splbio();
1899 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
1900 aen_link);
1901 splx(s);
1902 }
1903 error = mfi_aen_register(sc, l_aen.laen_seq_num,
1904 l_aen.laen_class_locale);
1905
1906 if (error != 0) {
1907 s = splbio();
1908 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1909 aen_link);
1910 splx(s);
1911 free(mfi_aen_entry, M_MFIBUF);
1912 }
1913
1914 return (error);
1915 default:
1916 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
1917 error = ENOENT;
1918 break;
1919 }
1920
1921 return (error);
1922 }
1923 #endif
1924
1925 static int
1926 mfi_poll(dev_t dev, int poll_events, d_thread_t *td)
1927 {
1928 struct mfi_softc *sc;
1929 int revents = 0;
1930
1931 sc = dev->si_drv1;
1932
1933 if (poll_events & (POLLIN | POLLRDNORM)) {
1934 if (sc->mfi_aen_triggered != 0)
1935 revents |= poll_events & (POLLIN | POLLRDNORM);
1936 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
1937 revents |= POLLERR;
1938 }
1939 }
1940
1941 if (revents == 0) {
1942 if (poll_events & (POLLIN | POLLRDNORM)) {
1943 sc->mfi_poll_waiting = 1;
1944 selrecord(td, &sc->mfi_select);
1945 sc->mfi_poll_waiting = 0;
1946 }
1947 }
1948
1949 return revents;
1950 }
Cache object: 3f57e7964259bba108ae26476f58ef30
|