FreeBSD/Linux Kernel Cross Reference
sys/dev/mfi/mfi.c
1 /*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/6.1/sys/dev/mfi/mfi.c 157460 2006-04-04 03:24:49Z scottl $");
29
30 #include "opt_mfi.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/conf.h>
38 #include <sys/eventhandler.h>
39 #include <sys/rman.h>
40 #include <sys/bus_dma.h>
41 #include <sys/bio.h>
42 #include <sys/ioccom.h>
43
44 #include <machine/bus.h>
45 #include <machine/resource.h>
46
47 #include <dev/mfi/mfireg.h>
48 #include <dev/mfi/mfi_ioctl.h>
49 #include <dev/mfi/mfivar.h>
50
51 static int mfi_alloc_commands(struct mfi_softc *);
52 static void mfi_release_command(struct mfi_command *cm);
53 static int mfi_comms_init(struct mfi_softc *);
54 static int mfi_polled_command(struct mfi_softc *, struct mfi_command *);
55 static int mfi_get_controller_info(struct mfi_softc *);
56 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
57 static void mfi_startup(void *arg);
58 static void mfi_intr(void *arg);
59 static void mfi_enable_intr(struct mfi_softc *sc);
60 static void mfi_ldprobe_inq(struct mfi_softc *sc);
61 static void mfi_ldprobe_inq_complete(struct mfi_command *);
62 static int mfi_ldprobe_capacity(struct mfi_softc *sc, int id);
63 static void mfi_ldprobe_capacity_complete(struct mfi_command *);
64 static int mfi_ldprobe_tur(struct mfi_softc *sc, int id);
65 static void mfi_ldprobe_tur_complete(struct mfi_command *);
66 static int mfi_add_ld(struct mfi_softc *sc, int id, uint64_t, uint32_t);
67 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
68 static void mfi_bio_complete(struct mfi_command *);
69 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
70 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
71 static void mfi_complete(struct mfi_softc *, struct mfi_command *);
72
73 /* Management interface */
74 static d_open_t mfi_open;
75 static d_close_t mfi_close;
76 static d_ioctl_t mfi_ioctl;
77
78 static struct cdevsw mfi_cdevsw = {
79 .d_version = D_VERSION,
80 .d_flags = 0,
81 .d_open = mfi_open,
82 .d_close = mfi_close,
83 .d_ioctl = mfi_ioctl,
84 .d_name = "mfi",
85 };
86
87 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
88
89 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
90
91 static int
92 mfi_transition_firmware(struct mfi_softc *sc)
93 {
94 int32_t fw_state, cur_state;
95 int max_wait, i;
96
97 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
98 while (fw_state != MFI_FWSTATE_READY) {
99 if (bootverbose)
100 device_printf(sc->mfi_dev, "Waiting for firmware to "
101 "become ready\n");
102 cur_state = fw_state;
103 switch (fw_state) {
104 case MFI_FWSTATE_FAULT:
105 device_printf(sc->mfi_dev, "Firmware fault\n");
106 return (ENXIO);
107 case MFI_FWSTATE_WAIT_HANDSHAKE:
108 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
109 max_wait = 2;
110 break;
111 case MFI_FWSTATE_OPERATIONAL:
112 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
113 max_wait = 10;
114 break;
115 case MFI_FWSTATE_UNDEFINED:
116 case MFI_FWSTATE_BB_INIT:
117 max_wait = 2;
118 break;
119 case MFI_FWSTATE_FW_INIT:
120 case MFI_FWSTATE_DEVICE_SCAN:
121 case MFI_FWSTATE_FLUSH_CACHE:
122 max_wait = 20;
123 break;
124 default:
125 device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
126 fw_state);
127 return (ENXIO);
128 }
129 for (i = 0; i < (max_wait * 10); i++) {
130 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
131 if (fw_state == cur_state)
132 DELAY(100000);
133 else
134 break;
135 }
136 if (fw_state == cur_state) {
137 device_printf(sc->mfi_dev, "firmware stuck in state "
138 "%#x\n", fw_state);
139 return (ENXIO);
140 }
141 }
142 return (0);
143 }
144
145 static void
146 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
147 {
148 uint32_t *addr;
149
150 addr = arg;
151 *addr = segs[0].ds_addr;
152 }
153
154 int
155 mfi_attach(struct mfi_softc *sc)
156 {
157 uint32_t status;
158 int error, commsz, framessz, sensesz;
159 int frames, unit;
160
161 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
162 TAILQ_INIT(&sc->mfi_ld_tqh);
163
164 mfi_initq_free(sc);
165 mfi_initq_ready(sc);
166 mfi_initq_busy(sc);
167 mfi_initq_bio(sc);
168
169 /* Before we get too far, see if the firmware is working */
170 if ((error = mfi_transition_firmware(sc)) != 0) {
171 device_printf(sc->mfi_dev, "Firmware not in READY state, "
172 "error %d\n", error);
173 return (ENXIO);
174 }
175
176 /*
177 * Get information needed for sizing the contiguous memory for the
178 * frame pool. Size down the sgl parameter since we know that
179 * we will never need more than what's required for MAXPHYS.
180 * It would be nice if these constants were available at runtime
181 * instead of compile time.
182 */
183 status = MFI_READ4(sc, MFI_OMSG0);
184 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
185 sc->mfi_max_fw_sgl = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
186 sc->mfi_total_sgl = min(sc->mfi_max_fw_sgl, ((MAXPHYS / PAGE_SIZE) +1));
187
188 /*
189 * Create the dma tag for data buffers. Used both for block I/O
190 * and for various internal data queries.
191 */
192 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
193 1, 0, /* algnmnt, boundary */
194 BUS_SPACE_MAXADDR, /* lowaddr */
195 BUS_SPACE_MAXADDR, /* highaddr */
196 NULL, NULL, /* filter, filterarg */
197 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
198 sc->mfi_total_sgl, /* nsegments */
199 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
200 BUS_DMA_ALLOCNOW, /* flags */
201 busdma_lock_mutex, /* lockfunc */
202 &sc->mfi_io_lock, /* lockfuncarg */
203 &sc->mfi_buffer_dmat)) {
204 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
205 return (ENOMEM);
206 }
207
208 /*
209 * Allocate DMA memory for the comms queues. Keep it under 4GB for
210 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
211 * entry, so the calculated size here will be will be 1 more than
212 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
213 */
214 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
215 sizeof(struct mfi_hwcomms);
216 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
217 1, 0, /* algnmnt, boundary */
218 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
219 BUS_SPACE_MAXADDR, /* highaddr */
220 NULL, NULL, /* filter, filterarg */
221 commsz, /* maxsize */
222 1, /* msegments */
223 commsz, /* maxsegsize */
224 0, /* flags */
225 NULL, NULL, /* lockfunc, lockarg */
226 &sc->mfi_comms_dmat)) {
227 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
228 return (ENOMEM);
229 }
230 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
231 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
232 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
233 return (ENOMEM);
234 }
235 bzero(sc->mfi_comms, commsz);
236 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
237 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
238
239 /*
240 * Allocate DMA memory for the command frames. Keep them in the
241 * lower 4GB for efficiency. Calculate the size of the frames at
242 * the same time; the frame is 64 bytes plus space for the SG lists.
243 * The assumption here is that the SG list will start at the second
244 * 64 byte segment of the frame and not use the unused bytes in the
245 * frame. While this might seem wasteful, apparently the frames must
246 * be 64 byte aligned, so any savings would be negated by the extra
247 * alignment padding.
248 */
249 if (sizeof(bus_addr_t) == 8) {
250 sc->mfi_sgsize = sizeof(struct mfi_sg64);
251 sc->mfi_flags |= MFI_FLAGS_SG64;
252 } else {
253 sc->mfi_sgsize = sizeof(struct mfi_sg32);
254 }
255 frames = (sc->mfi_sgsize * sc->mfi_total_sgl + MFI_FRAME_SIZE - 1) /
256 MFI_FRAME_SIZE + 1;
257 sc->mfi_frame_size = frames * MFI_FRAME_SIZE;
258 framessz = sc->mfi_frame_size * sc->mfi_max_fw_cmds;
259 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
260 64, 0, /* algnmnt, boundary */
261 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
262 BUS_SPACE_MAXADDR, /* highaddr */
263 NULL, NULL, /* filter, filterarg */
264 framessz, /* maxsize */
265 1, /* nsegments */
266 framessz, /* maxsegsize */
267 0, /* flags */
268 NULL, NULL, /* lockfunc, lockarg */
269 &sc->mfi_frames_dmat)) {
270 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
271 return (ENOMEM);
272 }
273 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
274 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
275 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
276 return (ENOMEM);
277 }
278 bzero(sc->mfi_frames, framessz);
279 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
280 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
281
282 /*
283 * Allocate DMA memory for the frame sense data. Keep them in the
284 * lower 4GB for efficiency
285 */
286 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
287 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
288 4, 0, /* algnmnt, boundary */
289 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
290 BUS_SPACE_MAXADDR, /* highaddr */
291 NULL, NULL, /* filter, filterarg */
292 sensesz, /* maxsize */
293 1, /* nsegments */
294 sensesz, /* maxsegsize */
295 0, /* flags */
296 NULL, NULL, /* lockfunc, lockarg */
297 &sc->mfi_sense_dmat)) {
298 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
299 return (ENOMEM);
300 }
301 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
302 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
303 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
304 return (ENOMEM);
305 }
306 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
307 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
308
309 if ((error = mfi_alloc_commands(sc)) != 0)
310 return (error);
311
312 if ((error = mfi_comms_init(sc)) != 0)
313 return (error);
314
315 if ((error = mfi_get_controller_info(sc)) != 0)
316 return (error);
317
318 #if 0
319 if ((error = mfi_setup_aen(sc)) != 0)
320 return (error);
321 #endif
322
323 /*
324 * Set up the interrupt handler. XXX This should happen in
325 * mfi_pci.c
326 */
327 sc->mfi_irq_rid = 0;
328 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
329 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
330 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
331 return (EINVAL);
332 }
333 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
334 mfi_intr, sc, &sc->mfi_intr)) {
335 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
336 return (EINVAL);
337 }
338
339 /* Register a config hook to probe the bus for arrays */
340 sc->mfi_ich.ich_func = mfi_startup;
341 sc->mfi_ich.ich_arg = sc;
342 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
343 device_printf(sc->mfi_dev, "Cannot establish configuration "
344 "hook\n");
345 return (EINVAL);
346 }
347
348 /*
349 * Register a shutdown handler.
350 */
351 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
352 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
353 device_printf(sc->mfi_dev, "Warning: shutdown event "
354 "registration failed\n");
355 }
356
357 /*
358 * Create the control device for doing management
359 */
360 unit = device_get_unit(sc->mfi_dev);
361 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
362 0640, "mfi%d", unit);
363 if (sc->mfi_cdev != NULL)
364 sc->mfi_cdev->si_drv1 = sc;
365
366 return (0);
367 }
368
369 static int
370 mfi_alloc_commands(struct mfi_softc *sc)
371 {
372 struct mfi_command *cm;
373 int i, ncmds;
374
375 /*
376 * XXX Should we allocate all the commands up front, or allocate on
377 * demand later like 'aac' does?
378 */
379 ncmds = sc->mfi_max_fw_cmds;
380 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
381 M_WAITOK | M_ZERO);
382
383 for (i = 0; i < ncmds; i++) {
384 cm = &sc->mfi_commands[i];
385 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
386 sc->mfi_frame_size * i);
387 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
388 sc->mfi_frame_size * i;
389 cm->cm_frame->header.context = i;
390 cm->cm_sense = &sc->mfi_sense[i];
391 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
392 cm->cm_sc = sc;
393 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
394 &cm->cm_dmamap) == 0)
395 mfi_release_command(cm);
396 else
397 break;
398 sc->mfi_total_cmds++;
399 }
400
401 return (0);
402 }
403
404 static void
405 mfi_release_command(struct mfi_command *cm)
406 {
407 uint32_t *hdr_data;
408
409 /*
410 * Zero out the important fields of the frame, but make sure the
411 * context field is preserved
412 */
413 hdr_data = (uint32_t *)cm->cm_frame;
414 hdr_data[0] = 0;
415 hdr_data[1] = 0;
416
417 cm->cm_extra_frames = 0;
418 cm->cm_flags = 0;
419 cm->cm_complete = NULL;
420 cm->cm_private = NULL;
421 cm->cm_sg = 0;
422 cm->cm_total_frame_size = 0;
423 mfi_enqueue_free(cm);
424 }
425
426 static int
427 mfi_comms_init(struct mfi_softc *sc)
428 {
429 struct mfi_command *cm;
430 struct mfi_init_frame *init;
431 struct mfi_init_qinfo *qinfo;
432 int error;
433
434 if ((cm = mfi_dequeue_free(sc)) == NULL)
435 return (EBUSY);
436
437 /*
438 * Abuse the SG list area of the frame to hold the init_qinfo
439 * object;
440 */
441 init = &cm->cm_frame->init;
442 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
443
444 bzero(qinfo, sizeof(struct mfi_init_qinfo));
445 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
446 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
447 offsetof(struct mfi_hwcomms, hw_reply_q);
448 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
449 offsetof(struct mfi_hwcomms, hw_pi);
450 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
451 offsetof(struct mfi_hwcomms, hw_ci);
452
453 init->header.cmd = MFI_CMD_INIT;
454 init->header.data_len = sizeof(struct mfi_init_qinfo);
455 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
456
457 if ((error = mfi_polled_command(sc, cm)) != 0) {
458 device_printf(sc->mfi_dev, "failed to send init command\n");
459 return (error);
460 }
461 mfi_release_command(cm);
462
463 return (0);
464 }
465
466 static int
467 mfi_get_controller_info(struct mfi_softc *sc)
468 {
469 struct mfi_command *cm;
470 struct mfi_dcmd_frame *dcmd;
471 struct mfi_ctrl_info *ci;
472 uint32_t max_sectors_1, max_sectors_2;
473 int error;
474
475 if ((cm = mfi_dequeue_free(sc)) == NULL)
476 return (EBUSY);
477
478 ci = malloc(sizeof(struct mfi_ctrl_info), M_MFIBUF, M_NOWAIT | M_ZERO);
479 if (ci == NULL) {
480 mfi_release_command(cm);
481 return (ENOMEM);
482 }
483
484 dcmd = &cm->cm_frame->dcmd;
485 bzero(dcmd->mbox, MFI_MBOX_SIZE);
486 dcmd->header.cmd = MFI_CMD_DCMD;
487 dcmd->header.timeout = 0;
488 dcmd->header.data_len = sizeof(struct mfi_ctrl_info);
489 dcmd->opcode = MFI_DCMD_CTRL_GETINFO;
490 cm->cm_sg = &dcmd->sgl;
491 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
492 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
493 cm->cm_data = ci;
494 cm->cm_len = sizeof(struct mfi_ctrl_info);
495
496 if ((error = mfi_mapcmd(sc, cm)) != 0) {
497 device_printf(sc->mfi_dev, "Controller info buffer map failed");
498 free(ci, M_MFIBUF);
499 mfi_release_command(cm);
500 return (error);
501 }
502
503 /* It's ok if this fails, just use default info instead */
504 if ((error = mfi_polled_command(sc, cm)) != 0) {
505 device_printf(sc->mfi_dev, "Failed to get controller info\n");
506 sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
507 MFI_SECTOR_LEN;
508 free(ci, M_MFIBUF);
509 mfi_release_command(cm);
510 return (0);
511 }
512
513 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
514 BUS_DMASYNC_POSTREAD);
515 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
516
517 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
518 max_sectors_2 = ci->max_request_size;
519 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
520
521 free(ci, M_MFIBUF);
522 mfi_release_command(cm);
523
524 return (error);
525 }
526
527 static int
528 mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
529 {
530 struct mfi_frame_header *hdr;
531 int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
532
533 hdr = &cm->cm_frame->header;
534 hdr->cmd_status = 0xff;
535 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
536
537 mfi_send_frame(sc, cm);
538
539 while (hdr->cmd_status == 0xff) {
540 DELAY(1000);
541 tm -= 1000;
542 if (tm <= 0)
543 break;
544 }
545
546 if (hdr->cmd_status == 0xff) {
547 device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
548 return (ETIMEDOUT);
549 }
550
551 return (0);
552 }
553
554 void
555 mfi_free(struct mfi_softc *sc)
556 {
557 struct mfi_command *cm;
558 int i;
559
560 if (sc->mfi_cdev != NULL)
561 destroy_dev(sc->mfi_cdev);
562
563 if (sc->mfi_total_cmds != 0) {
564 for (i = 0; i < sc->mfi_total_cmds; i++) {
565 cm = &sc->mfi_commands[i];
566 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
567 }
568 free(sc->mfi_commands, M_MFIBUF);
569 }
570
571 if (sc->mfi_intr)
572 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
573 if (sc->mfi_irq != NULL)
574 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
575 sc->mfi_irq);
576
577 if (sc->mfi_sense_busaddr != 0)
578 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
579 if (sc->mfi_sense != NULL)
580 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
581 sc->mfi_sense_dmamap);
582 if (sc->mfi_sense_dmat != NULL)
583 bus_dma_tag_destroy(sc->mfi_sense_dmat);
584
585 if (sc->mfi_frames_busaddr != 0)
586 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
587 if (sc->mfi_frames != NULL)
588 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
589 sc->mfi_frames_dmamap);
590 if (sc->mfi_frames_dmat != NULL)
591 bus_dma_tag_destroy(sc->mfi_frames_dmat);
592
593 if (sc->mfi_comms_busaddr != 0)
594 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
595 if (sc->mfi_comms != NULL)
596 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
597 sc->mfi_comms_dmamap);
598 if (sc->mfi_comms_dmat != NULL)
599 bus_dma_tag_destroy(sc->mfi_comms_dmat);
600
601 if (sc->mfi_buffer_dmat != NULL)
602 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
603 if (sc->mfi_parent_dmat != NULL)
604 bus_dma_tag_destroy(sc->mfi_parent_dmat);
605
606 if (mtx_initialized(&sc->mfi_io_lock))
607 mtx_destroy(&sc->mfi_io_lock);
608
609 return;
610 }
611
612 static void
613 mfi_startup(void *arg)
614 {
615 struct mfi_softc *sc;
616
617 sc = (struct mfi_softc *)arg;
618
619 config_intrhook_disestablish(&sc->mfi_ich);
620
621 mfi_enable_intr(sc);
622 mfi_ldprobe_inq(sc);
623 }
624
625 static void
626 mfi_intr(void *arg)
627 {
628 struct mfi_softc *sc;
629 struct mfi_command *cm;
630 uint32_t status, pi, ci, context;
631
632 sc = (struct mfi_softc *)arg;
633
634 status = MFI_READ4(sc, MFI_OSTS);
635 if ((status & MFI_OSTS_INTR_VALID) == 0)
636 return;
637 MFI_WRITE4(sc, MFI_OSTS, status);
638
639 pi = sc->mfi_comms->hw_pi;
640 ci = sc->mfi_comms->hw_ci;
641
642 mtx_lock(&sc->mfi_io_lock);
643 while (ci != pi) {
644 context = sc->mfi_comms->hw_reply_q[ci];
645 sc->mfi_comms->hw_reply_q[ci] = 0xffffffff;
646 if (context == 0xffffffff) {
647 device_printf(sc->mfi_dev, "mfi_intr: invalid context "
648 "pi= %d ci= %d\n", pi, ci);
649 } else {
650 cm = &sc->mfi_commands[context];
651 mfi_remove_busy(cm);
652 mfi_complete(sc, cm);
653 }
654 ci++;
655 if (ci == (sc->mfi_max_fw_cmds + 1)) {
656 ci = 0;
657 }
658 }
659 mtx_unlock(&sc->mfi_io_lock);
660
661 sc->mfi_comms->hw_ci = ci;
662
663 return;
664 }
665
666 int
667 mfi_shutdown(struct mfi_softc *sc)
668 {
669 struct mfi_dcmd_frame *dcmd;
670 struct mfi_command *cm;
671 int error;
672
673 if ((cm = mfi_dequeue_free(sc)) == NULL)
674 return (EBUSY);
675
676 /* AEN? */
677
678 dcmd = &cm->cm_frame->dcmd;
679 bzero(dcmd->mbox, MFI_MBOX_SIZE);
680 dcmd->header.cmd = MFI_CMD_DCMD;
681 dcmd->header.sg_count = 0;
682 dcmd->header.flags = MFI_FRAME_DIR_NONE;
683 dcmd->header.timeout = 0;
684 dcmd->header.data_len = 0;
685 dcmd->opcode = MFI_DCMD_CTRL_SHUTDOWN;
686
687 if ((error = mfi_polled_command(sc, cm)) != 0) {
688 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
689 }
690
691 return (error);
692 }
693
694 static void
695 mfi_enable_intr(struct mfi_softc *sc)
696 {
697
698 MFI_WRITE4(sc, MFI_OMSK, 0x01);
699 }
700
701 static void
702 mfi_ldprobe_inq(struct mfi_softc *sc)
703 {
704 struct mfi_command *cm;
705 struct mfi_pass_frame *pass;
706 char *inq;
707 int i;
708
709 /* Probe all possible targets with a SCSI INQ command */
710 mtx_lock(&sc->mfi_io_lock);
711 sc->mfi_probe_count = 0;
712 for (i = 0; i < MFI_MAX_CHANNEL_DEVS; i++) {
713 inq = malloc(MFI_INQ_LENGTH, M_MFIBUF, M_NOWAIT|M_ZERO);
714 if (inq == NULL)
715 break;
716 cm = mfi_dequeue_free(sc);
717 if (cm == NULL) {
718 tsleep(mfi_startup, 0, "mfistart", 5 * hz);
719 i--;
720 continue;
721 }
722 pass = &cm->cm_frame->pass;
723 pass->header.cmd = MFI_CMD_LD_SCSI_IO;
724 pass->header.target_id = i;
725 pass->header.lun_id = 0;
726 pass->header.cdb_len = 6;
727 pass->header.timeout = 0;
728 pass->header.data_len = MFI_INQ_LENGTH;
729 bzero(pass->cdb, 16);
730 pass->cdb[0] = INQUIRY;
731 pass->cdb[4] = MFI_INQ_LENGTH;
732 pass->header.sense_len = MFI_SENSE_LEN;
733 pass->sense_addr_lo = cm->cm_sense_busaddr;
734 pass->sense_addr_hi = 0;
735 cm->cm_complete = mfi_ldprobe_inq_complete;
736 cm->cm_private = inq;
737 cm->cm_sg = &pass->sgl;
738 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
739 cm->cm_flags |= MFI_CMD_DATAIN;
740 cm->cm_data = inq;
741 cm->cm_len = MFI_INQ_LENGTH;
742 sc->mfi_probe_count++;
743 mfi_enqueue_ready(cm);
744 mfi_startio(sc);
745 }
746
747 /* Sleep while the arrays are attaching */
748 msleep(mfi_startup, &sc->mfi_io_lock, 0, "mfistart", 60 * hz);
749 mtx_unlock(&sc->mfi_io_lock);
750
751 return;
752 }
753
754 static void
755 mfi_ldprobe_inq_complete(struct mfi_command *cm)
756 {
757 struct mfi_frame_header *hdr;
758 struct mfi_softc *sc;
759 struct scsi_inquiry_data *inq;
760
761 sc = cm->cm_sc;
762 inq = cm->cm_private;
763 hdr = &cm->cm_frame->header;
764
765 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00) ||
766 (SID_TYPE(inq) != T_DIRECT)) {
767 free(inq, M_MFIBUF);
768 mfi_release_command(cm);
769 if (--sc->mfi_probe_count <= 0)
770 wakeup(mfi_startup);
771 return;
772 }
773
774 free(inq, M_MFIBUF);
775 mfi_release_command(cm);
776 mfi_ldprobe_tur(sc, hdr->target_id);
777 }
778
779 static int
780 mfi_ldprobe_tur(struct mfi_softc *sc, int id)
781 {
782 struct mfi_command *cm;
783 struct mfi_pass_frame *pass;
784
785 cm = mfi_dequeue_free(sc);
786 if (cm == NULL)
787 return (EBUSY);
788 pass = &cm->cm_frame->pass;
789 pass->header.cmd = MFI_CMD_LD_SCSI_IO;
790 pass->header.target_id = id;
791 pass->header.lun_id = 0;
792 pass->header.cdb_len = 6;
793 pass->header.timeout = 0;
794 pass->header.data_len = 0;
795 bzero(pass->cdb, 16);
796 pass->cdb[0] = TEST_UNIT_READY;
797 pass->header.sense_len = MFI_SENSE_LEN;
798 pass->sense_addr_lo = cm->cm_sense_busaddr;
799 pass->sense_addr_hi = 0;
800 cm->cm_complete = mfi_ldprobe_tur_complete;
801 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
802 cm->cm_flags = 0;
803 mfi_enqueue_ready(cm);
804 mfi_startio(sc);
805
806 return (0);
807 }
808
809 static void
810 mfi_ldprobe_tur_complete(struct mfi_command *cm)
811 {
812 struct mfi_frame_header *hdr;
813 struct mfi_softc *sc;
814
815 sc = cm->cm_sc;
816 hdr = &cm->cm_frame->header;
817
818 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00)) {
819 device_printf(sc->mfi_dev, "Logical disk %d is not ready, "
820 "cmd_status= %d scsi_status= %d\n", hdr->target_id,
821 hdr->cmd_status, hdr->scsi_status);
822 mfi_print_sense(sc, cm->cm_sense);
823 mfi_release_command(cm);
824 if (--sc->mfi_probe_count <= 0)
825 wakeup(mfi_startup);
826 return;
827 }
828 mfi_release_command(cm);
829 mfi_ldprobe_capacity(sc, hdr->target_id);
830 }
831
832 static int
833 mfi_ldprobe_capacity(struct mfi_softc *sc, int id)
834 {
835 struct mfi_command *cm;
836 struct mfi_pass_frame *pass;
837 struct scsi_read_capacity_data_long *cap;
838
839 cap = malloc(sizeof(*cap), M_MFIBUF, M_NOWAIT|M_ZERO);
840 if (cap == NULL)
841 return (ENOMEM);
842 cm = mfi_dequeue_free(sc);
843 if (cm == NULL)
844 return (EBUSY);
845 pass = &cm->cm_frame->pass;
846 pass->header.cmd = MFI_CMD_LD_SCSI_IO;
847 pass->header.target_id = id;
848 pass->header.lun_id = 0;
849 pass->header.cdb_len = 6;
850 pass->header.timeout = 0;
851 pass->header.data_len = sizeof(*cap);
852 bzero(pass->cdb, 16);
853 pass->cdb[0] = 0x9e; /* READ CAPACITY 16 */
854 pass->cdb[13] = sizeof(*cap);
855 pass->header.sense_len = MFI_SENSE_LEN;
856 pass->sense_addr_lo = cm->cm_sense_busaddr;
857 pass->sense_addr_hi = 0;
858 cm->cm_complete = mfi_ldprobe_capacity_complete;
859 cm->cm_private = cap;
860 cm->cm_sg = &pass->sgl;
861 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
862 cm->cm_flags |= MFI_CMD_DATAIN;
863 cm->cm_data = cap;
864 cm->cm_len = sizeof(*cap);
865 mfi_enqueue_ready(cm);
866 mfi_startio(sc);
867
868 return (0);
869 }
870
871 static void
872 mfi_ldprobe_capacity_complete(struct mfi_command *cm)
873 {
874 struct mfi_frame_header *hdr;
875 struct mfi_softc *sc;
876 struct scsi_read_capacity_data_long *cap;
877 uint64_t sectors;
878 uint32_t secsize;
879 int target;
880
881 sc = cm->cm_sc;
882 cap = cm->cm_private;
883 hdr = &cm->cm_frame->header;
884
885 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00)) {
886 device_printf(sc->mfi_dev, "Failed to read capacity for "
887 "logical disk\n");
888 device_printf(sc->mfi_dev, "cmd_status= %d scsi_status= %d\n",
889 hdr->cmd_status, hdr->scsi_status);
890 free(cap, M_MFIBUF);
891 mfi_release_command(cm);
892 if (--sc->mfi_probe_count <= 0)
893 wakeup(mfi_startup);
894 return;
895 }
896 target = hdr->target_id;
897 sectors = scsi_8btou64(cap->addr);
898 secsize = scsi_4btoul(cap->length);
899 free(cap, M_MFIBUF);
900 mfi_release_command(cm);
901 mfi_add_ld(sc, target, sectors, secsize);
902 if (--sc->mfi_probe_count <= 0)
903 wakeup(mfi_startup);
904
905 return;
906 }
907
908 static int
909 mfi_add_ld(struct mfi_softc *sc, int id, uint64_t sectors, uint32_t secsize)
910 {
911 struct mfi_ld *ld;
912 device_t child;
913
914 if ((secsize == 0) || (sectors == 0)) {
915 device_printf(sc->mfi_dev, "Invalid capacity parameters for "
916 "logical disk %d\n", id);
917 return (EINVAL);
918 }
919
920 ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
921 if (ld == NULL) {
922 device_printf(sc->mfi_dev, "Cannot allocate ld\n");
923 return (ENOMEM);
924 }
925
926 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
927 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
928 return (EINVAL);
929 }
930
931 ld->ld_id = id;
932 ld->ld_disk = child;
933 ld->ld_secsize = secsize;
934 ld->ld_sectors = sectors;
935
936 device_set_ivars(child, ld);
937 device_set_desc(child, "MFI Logical Disk");
938 mtx_unlock(&sc->mfi_io_lock);
939 mtx_lock(&Giant);
940 bus_generic_attach(sc->mfi_dev);
941 mtx_unlock(&Giant);
942 mtx_lock(&sc->mfi_io_lock);
943
944 return (0);
945 }
946
947 static struct mfi_command *
948 mfi_bio_command(struct mfi_softc *sc)
949 {
950 struct mfi_io_frame *io;
951 struct mfi_command *cm;
952 struct bio *bio;
953 int flags, blkcount;;
954
955 if ((cm = mfi_dequeue_free(sc)) == NULL)
956 return (NULL);
957
958 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
959 mfi_release_command(cm);
960 return (NULL);
961 }
962
963 io = &cm->cm_frame->io;
964 switch (bio->bio_cmd & 0x03) {
965 case BIO_READ:
966 io->header.cmd = MFI_CMD_LD_READ;
967 flags = MFI_CMD_DATAIN;
968 break;
969 case BIO_WRITE:
970 io->header.cmd = MFI_CMD_LD_WRITE;
971 flags = MFI_CMD_DATAOUT;
972 break;
973 default:
974 panic("Invalid bio command");
975 }
976
977 /* Cheat with the sector length to avoid a non-constant division */
978 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
979 io->header.target_id = (uintptr_t)bio->bio_driver1;
980 io->header.timeout = 0;
981 io->header.flags = 0;
982 io->header.sense_len = MFI_SENSE_LEN;
983 io->header.data_len = blkcount;
984 io->sense_addr_lo = cm->cm_sense_busaddr;
985 io->sense_addr_hi = 0;
986 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
987 io->lba_lo = bio->bio_pblkno & 0xffffffff;
988 cm->cm_complete = mfi_bio_complete;
989 cm->cm_private = bio;
990 cm->cm_data = bio->bio_data;
991 cm->cm_len = bio->bio_bcount;
992 cm->cm_sg = &io->sgl;
993 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
994 cm->cm_flags = flags;
995
996 return (cm);
997 }
998
999 static void
1000 mfi_bio_complete(struct mfi_command *cm)
1001 {
1002 struct bio *bio;
1003 struct mfi_frame_header *hdr;
1004 struct mfi_softc *sc;
1005
1006 bio = cm->cm_private;
1007 hdr = &cm->cm_frame->header;
1008 sc = cm->cm_sc;
1009
1010 if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1011 bio->bio_flags |= BIO_ERROR;
1012 bio->bio_error = EIO;
1013 device_printf(sc->mfi_dev, "I/O error, status= %d "
1014 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1015 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1016 }
1017
1018 mfi_release_command(cm);
1019 mfi_disk_complete(bio);
1020 }
1021
1022 void
1023 mfi_startio(struct mfi_softc *sc)
1024 {
1025 struct mfi_command *cm;
1026
1027 for (;;) {
1028 /* Don't bother if we're short on resources */
1029 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1030 break;
1031
1032 /* Try a command that has already been prepared */
1033 cm = mfi_dequeue_ready(sc);
1034
1035 /* Nope, so look for work on the bioq */
1036 if (cm == NULL)
1037 cm = mfi_bio_command(sc);
1038
1039 /* No work available, so exit */
1040 if (cm == NULL)
1041 break;
1042
1043 /* Send the command to the controller */
1044 if (mfi_mapcmd(sc, cm) != 0) {
1045 mfi_requeue_ready(cm);
1046 break;
1047 }
1048 }
1049 }
1050
1051 static int
1052 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1053 {
1054 int error, polled;
1055
1056 if (cm->cm_data != NULL) {
1057 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1058 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1059 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1060 if (error == EINPROGRESS) {
1061 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1062 return (0);
1063 }
1064 } else {
1065 mfi_enqueue_busy(cm);
1066 error = mfi_send_frame(sc, cm);
1067 }
1068
1069 return (error);
1070 }
1071
1072 static void
1073 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1074 {
1075 struct mfi_frame_header *hdr;
1076 struct mfi_command *cm;
1077 union mfi_sgl *sgl;
1078 struct mfi_softc *sc;
1079 int i, dir;
1080
1081 if (error)
1082 return;
1083
1084 cm = (struct mfi_command *)arg;
1085 sc = cm->cm_sc;
1086 hdr = &cm->cm_frame->header;
1087 sgl = cm->cm_sg;
1088
1089 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1090 for (i = 0; i < nsegs; i++) {
1091 sgl->sg32[i].addr = segs[i].ds_addr;
1092 sgl->sg32[i].len = segs[i].ds_len;
1093 }
1094 } else {
1095 for (i = 0; i < nsegs; i++) {
1096 sgl->sg64[i].addr = segs[i].ds_addr;
1097 sgl->sg64[i].len = segs[i].ds_len;
1098 }
1099 hdr->flags |= MFI_FRAME_SGL64;
1100 }
1101 hdr->sg_count = nsegs;
1102
1103 dir = 0;
1104 if (cm->cm_flags & MFI_CMD_DATAIN) {
1105 dir |= BUS_DMASYNC_PREREAD;
1106 hdr->flags |= MFI_FRAME_DIR_READ;
1107 }
1108 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1109 dir |= BUS_DMASYNC_PREWRITE;
1110 hdr->flags |= MFI_FRAME_DIR_WRITE;
1111 }
1112 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1113 cm->cm_flags |= MFI_CMD_MAPPED;
1114
1115 /*
1116 * Instead of calculating the total number of frames in the
1117 * compound frame, it's already assumed that there will be at
1118 * least 1 frame, so don't compensate for the modulo of the
1119 * following division.
1120 */
1121 cm->cm_total_frame_size += (sc->mfi_sgsize * nsegs);
1122 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1123
1124 /* The caller will take care of delivering polled commands */
1125 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1126 mfi_enqueue_busy(cm);
1127 mfi_send_frame(sc, cm);
1128 }
1129
1130 return;
1131 }
1132
1133 static int
1134 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1135 {
1136
1137 /*
1138 * The bus address of the command is aligned on a 64 byte boundary,
1139 * leaving the least 6 bits as zero. For whatever reason, the
1140 * hardware wants the address shifted right by three, leaving just
1141 * 3 zero bits. These three bits are then used to indicate how many
1142 * 64 byte frames beyond the first one are used in the command. The
1143 * extra frames are typically filled with S/G elements. The extra
1144 * frames must also be contiguous. Thus, a compound frame can be at
1145 * most 512 bytes long, allowing for up to 59 32-bit S/G elements or
1146 * 39 64-bit S/G elements for block I/O commands. This means that
1147 * I/O transfers of 256k and higher simply are not possible, which
1148 * is quite odd for such a modern adapter.
1149 */
1150 MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1151 cm->cm_extra_frames);
1152 return (0);
1153 }
1154
1155 static void
1156 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1157 {
1158 int dir;
1159
1160 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1161 dir = 0;
1162 if (cm->cm_flags & MFI_CMD_DATAIN)
1163 dir |= BUS_DMASYNC_POSTREAD;
1164 if (cm->cm_flags & MFI_CMD_DATAOUT)
1165 dir |= BUS_DMASYNC_POSTWRITE;
1166
1167 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1168 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1169 cm->cm_flags &= ~MFI_CMD_MAPPED;
1170 }
1171
1172 if (cm->cm_complete != NULL)
1173 cm->cm_complete(cm);
1174
1175 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1176 mfi_startio(sc);
1177 }
1178
1179 int
1180 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1181 {
1182 struct mfi_command *cm;
1183 struct mfi_io_frame *io;
1184 int error;
1185
1186 if ((cm = mfi_dequeue_free(sc)) == NULL)
1187 return (EBUSY);
1188
1189 io = &cm->cm_frame->io;
1190 io->header.cmd = MFI_CMD_LD_WRITE;
1191 io->header.target_id = id;
1192 io->header.timeout = 0;
1193 io->header.flags = 0;
1194 io->header.sense_len = MFI_SENSE_LEN;
1195 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1196 io->sense_addr_lo = cm->cm_sense_busaddr;
1197 io->sense_addr_hi = 0;
1198 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1199 io->lba_lo = lba & 0xffffffff;
1200 cm->cm_data = virt;
1201 cm->cm_len = len;
1202 cm->cm_sg = &io->sgl;
1203 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1204 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1205
1206 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1207 mfi_release_command(cm);
1208 return (error);
1209 }
1210
1211 error = mfi_polled_command(sc, cm);
1212 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1213 BUS_DMASYNC_POSTWRITE);
1214 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1215 mfi_release_command(cm);
1216
1217 return (error);
1218 }
1219
1220 static int
1221 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1222 {
1223 struct mfi_softc *sc;
1224
1225 sc = dev->si_drv1;
1226 sc->mfi_flags |= MFI_FLAGS_OPEN;
1227
1228 return (0);
1229 }
1230
1231 static int
1232 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1233 {
1234 struct mfi_softc *sc;
1235
1236 sc = dev->si_drv1;
1237 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1238
1239 return (0);
1240 }
1241
1242 static int
1243 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1244 {
1245 struct mfi_softc *sc;
1246 union mfi_statrequest *ms;
1247 int error;
1248
1249 sc = dev->si_drv1;
1250 error = 0;
1251
1252 switch (cmd) {
1253 case MFIIO_STATS:
1254 ms = (union mfi_statrequest *)arg;
1255 switch (ms->ms_item) {
1256 case MFIQ_FREE:
1257 case MFIQ_BIO:
1258 case MFIQ_READY:
1259 case MFIQ_BUSY:
1260 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1261 sizeof(struct mfi_qstat));
1262 break;
1263 default:
1264 error = ENOENT;
1265 break;
1266 }
1267 break;
1268 default:
1269 error = ENOENT;
1270 break;
1271 }
1272
1273 return (error);
1274 }
Cache object: e550935a60597372257749e835aa8865
|