FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/mfi.c
1 /* $NetBSD: mfi.c,v 1.19.4.4 2010/03/28 15:03:22 snj Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.19.4.4 2010/03/28 15:03:22 snj Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <sys/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 static void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 static void mfiminphys(struct buf *bp);
67
68 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
69 static void mfi_put_ccb(struct mfi_ccb *);
70 static int mfi_init_ccb(struct mfi_softc *);
71
72 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
73 static void mfi_freemem(struct mfi_softc *, struct mfi_mem *);
74
75 static int mfi_transition_firmware(struct mfi_softc *);
76 static int mfi_initialize_firmware(struct mfi_softc *);
77 static int mfi_get_info(struct mfi_softc *);
78 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
79 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
80 static int mfi_poll(struct mfi_ccb *);
81 static int mfi_create_sgl(struct mfi_ccb *, int);
82
83 /* commands */
84 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
85 static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *,
86 uint32_t, uint32_t);
87 static void mfi_scsi_xs_done(struct mfi_ccb *);
88 static int mfi_mgmt_internal(struct mfi_softc *,
89 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
90 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
91 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
92 static void mfi_mgmt_done(struct mfi_ccb *);
93
94 #if NBIO > 0
95 static int mfi_ioctl(device_t, u_long, void *);
96 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
97 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
98 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
99 static int mfi_ioctl_alarm(struct mfi_softc *,
100 struct bioc_alarm *);
101 static int mfi_ioctl_blink(struct mfi_softc *sc,
102 struct bioc_blink *);
103 static int mfi_ioctl_setstate(struct mfi_softc *,
104 struct bioc_setstate *);
105 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
106 static int mfi_create_sensors(struct mfi_softc *);
107 static void mfi_sensor_refresh(struct sysmon_envsys *,
108 envsys_data_t *);
109 #endif /* NBIO > 0 */
110
111 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
112 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
113 static int mfi_xscale_intr(struct mfi_softc *sc);
114 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
115
116 static const struct mfi_iop_ops mfi_iop_xscale = {
117 mfi_xscale_fw_state,
118 mfi_xscale_intr_ena,
119 mfi_xscale_intr,
120 mfi_xscale_post
121 };
122
123 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
124 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
125 static int mfi_ppc_intr(struct mfi_softc *sc);
126 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
127
128 static const struct mfi_iop_ops mfi_iop_ppc = {
129 mfi_ppc_fw_state,
130 mfi_ppc_intr_ena,
131 mfi_ppc_intr,
132 mfi_ppc_post
133 };
134
135 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
136 void mfi_gen2_intr_ena(struct mfi_softc *sc);
137 int mfi_gen2_intr(struct mfi_softc *sc);
138 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
139
140 static const struct mfi_iop_ops mfi_iop_gen2 = {
141 mfi_gen2_fw_state,
142 mfi_gen2_intr_ena,
143 mfi_gen2_intr,
144 mfi_gen2_post
145 };
146
147 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
148 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
149 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
150 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
151
152 static struct mfi_ccb *
153 mfi_get_ccb(struct mfi_softc *sc)
154 {
155 struct mfi_ccb *ccb;
156 int s;
157
158 s = splbio();
159 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
160 if (ccb) {
161 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
162 ccb->ccb_state = MFI_CCB_READY;
163 }
164 splx(s);
165
166 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
167
168 return ccb;
169 }
170
171 static void
172 mfi_put_ccb(struct mfi_ccb *ccb)
173 {
174 struct mfi_softc *sc = ccb->ccb_sc;
175 int s;
176
177 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
178
179 s = splbio();
180 ccb->ccb_state = MFI_CCB_FREE;
181 ccb->ccb_xs = NULL;
182 ccb->ccb_flags = 0;
183 ccb->ccb_done = NULL;
184 ccb->ccb_direction = 0;
185 ccb->ccb_frame_size = 0;
186 ccb->ccb_extra_frames = 0;
187 ccb->ccb_sgl = NULL;
188 ccb->ccb_data = NULL;
189 ccb->ccb_len = 0;
190 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
191 splx(s);
192 }
193
194 static int
195 mfi_init_ccb(struct mfi_softc *sc)
196 {
197 struct mfi_ccb *ccb;
198 uint32_t i;
199 int error;
200
201 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
202
203 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
204 M_DEVBUF, M_WAITOK|M_ZERO);
205
206 for (i = 0; i < sc->sc_max_cmds; i++) {
207 ccb = &sc->sc_ccb[i];
208
209 ccb->ccb_sc = sc;
210
211 /* select i'th frame */
212 ccb->ccb_frame = (union mfi_frame *)
213 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
214 ccb->ccb_pframe =
215 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
216 ccb->ccb_frame->mfr_header.mfh_context = i;
217
218 /* select i'th sense */
219 ccb->ccb_sense = (struct mfi_sense *)
220 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
221 ccb->ccb_psense =
222 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
223
224 /* create a dma map for transfer */
225 error = bus_dmamap_create(sc->sc_dmat,
226 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
227 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
228 if (error) {
229 printf("%s: cannot create ccb dmamap (%d)\n",
230 DEVNAME(sc), error);
231 goto destroy;
232 }
233
234 DNPRINTF(MFI_D_CCB,
235 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
236 ccb->ccb_frame->mfr_header.mfh_context, ccb,
237 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
238 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
239 (u_long)ccb->ccb_dmamap);
240
241 /* add ccb to queue */
242 mfi_put_ccb(ccb);
243 }
244
245 return 0;
246 destroy:
247 /* free dma maps and ccb memory */
248 while (i) {
249 i--;
250 ccb = &sc->sc_ccb[i];
251 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
252 }
253
254 free(sc->sc_ccb, M_DEVBUF);
255
256 return 1;
257 }
258
259 static uint32_t
260 mfi_read(struct mfi_softc *sc, bus_size_t r)
261 {
262 uint32_t rv;
263
264 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
265 BUS_SPACE_BARRIER_READ);
266 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
267
268 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
269 return rv;
270 }
271
272 static void
273 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
274 {
275 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
276
277 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
278 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
279 BUS_SPACE_BARRIER_WRITE);
280 }
281
282 static struct mfi_mem *
283 mfi_allocmem(struct mfi_softc *sc, size_t size)
284 {
285 struct mfi_mem *mm;
286 int nsegs;
287
288 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
289 (long)size);
290
291 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
292 if (mm == NULL)
293 return NULL;
294
295 mm->am_size = size;
296
297 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
298 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
299 goto amfree;
300
301 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
302 &nsegs, BUS_DMA_NOWAIT) != 0)
303 goto destroy;
304
305 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
306 BUS_DMA_NOWAIT) != 0)
307 goto free;
308
309 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
310 BUS_DMA_NOWAIT) != 0)
311 goto unmap;
312
313 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
314 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
315
316 memset(mm->am_kva, 0, size);
317 return mm;
318
319 unmap:
320 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
321 free:
322 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
323 destroy:
324 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
325 amfree:
326 free(mm, M_DEVBUF);
327
328 return NULL;
329 }
330
331 static void
332 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
333 {
334 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
335
336 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
337 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
338 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
339 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
340 free(mm, M_DEVBUF);
341 }
342
343 static int
344 mfi_transition_firmware(struct mfi_softc *sc)
345 {
346 uint32_t fw_state, cur_state;
347 int max_wait, i;
348
349 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
350
351 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
352 fw_state);
353
354 while (fw_state != MFI_STATE_READY) {
355 DNPRINTF(MFI_D_MISC,
356 "%s: waiting for firmware to become ready\n",
357 DEVNAME(sc));
358 cur_state = fw_state;
359 switch (fw_state) {
360 case MFI_STATE_FAULT:
361 printf("%s: firmware fault\n", DEVNAME(sc));
362 return 1;
363 case MFI_STATE_WAIT_HANDSHAKE:
364 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
365 max_wait = 2;
366 break;
367 case MFI_STATE_OPERATIONAL:
368 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
369 max_wait = 10;
370 break;
371 case MFI_STATE_UNDEFINED:
372 case MFI_STATE_BB_INIT:
373 max_wait = 2;
374 break;
375 case MFI_STATE_FW_INIT:
376 case MFI_STATE_DEVICE_SCAN:
377 case MFI_STATE_FLUSH_CACHE:
378 max_wait = 20;
379 break;
380 default:
381 printf("%s: unknown firmware state %d\n",
382 DEVNAME(sc), fw_state);
383 return 1;
384 }
385 for (i = 0; i < (max_wait * 10); i++) {
386 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
387 if (fw_state == cur_state)
388 DELAY(100000);
389 else
390 break;
391 }
392 if (fw_state == cur_state) {
393 printf("%s: firmware stuck in state %#x\n",
394 DEVNAME(sc), fw_state);
395 return 1;
396 }
397 }
398
399 return 0;
400 }
401
402 static int
403 mfi_initialize_firmware(struct mfi_softc *sc)
404 {
405 struct mfi_ccb *ccb;
406 struct mfi_init_frame *init;
407 struct mfi_init_qinfo *qinfo;
408
409 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
410
411 if ((ccb = mfi_get_ccb(sc)) == NULL)
412 return 1;
413
414 init = &ccb->ccb_frame->mfr_init;
415 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
416
417 memset(qinfo, 0, sizeof *qinfo);
418 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
419 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
420 offsetof(struct mfi_prod_cons, mpc_reply_q));
421 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
422 offsetof(struct mfi_prod_cons, mpc_producer));
423 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
424 offsetof(struct mfi_prod_cons, mpc_consumer));
425
426 init->mif_header.mfh_cmd = MFI_CMD_INIT;
427 init->mif_header.mfh_data_len = sizeof *qinfo;
428 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
429
430 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
431 DEVNAME(sc),
432 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
433 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
434
435 if (mfi_poll(ccb)) {
436 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
437 return 1;
438 }
439
440 mfi_put_ccb(ccb);
441
442 return 0;
443 }
444
445 static int
446 mfi_get_info(struct mfi_softc *sc)
447 {
448 #ifdef MFI_DEBUG
449 int i;
450 #endif
451 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
452
453 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
454 sizeof(sc->sc_info), &sc->sc_info, NULL))
455 return 1;
456
457 #ifdef MFI_DEBUG
458
459 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
460 printf("%s: active FW %s Version %s date %s time %s\n",
461 DEVNAME(sc),
462 sc->sc_info.mci_image_component[i].mic_name,
463 sc->sc_info.mci_image_component[i].mic_version,
464 sc->sc_info.mci_image_component[i].mic_build_date,
465 sc->sc_info.mci_image_component[i].mic_build_time);
466 }
467
468 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
469 printf("%s: pending FW %s Version %s date %s time %s\n",
470 DEVNAME(sc),
471 sc->sc_info.mci_pending_image_component[i].mic_name,
472 sc->sc_info.mci_pending_image_component[i].mic_version,
473 sc->sc_info.mci_pending_image_component[i].mic_build_date,
474 sc->sc_info.mci_pending_image_component[i].mic_build_time);
475 }
476
477 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
478 DEVNAME(sc),
479 sc->sc_info.mci_max_arms,
480 sc->sc_info.mci_max_spans,
481 sc->sc_info.mci_max_arrays,
482 sc->sc_info.mci_max_lds,
483 sc->sc_info.mci_product_name);
484
485 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
486 DEVNAME(sc),
487 sc->sc_info.mci_serial_number,
488 sc->sc_info.mci_hw_present,
489 sc->sc_info.mci_current_fw_time,
490 sc->sc_info.mci_max_cmds,
491 sc->sc_info.mci_max_sg_elements);
492
493 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
494 DEVNAME(sc),
495 sc->sc_info.mci_max_request_size,
496 sc->sc_info.mci_lds_present,
497 sc->sc_info.mci_lds_degraded,
498 sc->sc_info.mci_lds_offline,
499 sc->sc_info.mci_pd_present);
500
501 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
502 DEVNAME(sc),
503 sc->sc_info.mci_pd_disks_present,
504 sc->sc_info.mci_pd_disks_pred_failure,
505 sc->sc_info.mci_pd_disks_failed);
506
507 printf("%s: nvram %d mem %d flash %d\n",
508 DEVNAME(sc),
509 sc->sc_info.mci_nvram_size,
510 sc->sc_info.mci_memory_size,
511 sc->sc_info.mci_flash_size);
512
513 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
514 DEVNAME(sc),
515 sc->sc_info.mci_ram_correctable_errors,
516 sc->sc_info.mci_ram_uncorrectable_errors,
517 sc->sc_info.mci_cluster_allowed,
518 sc->sc_info.mci_cluster_active);
519
520 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
521 DEVNAME(sc),
522 sc->sc_info.mci_max_strips_per_io,
523 sc->sc_info.mci_raid_levels,
524 sc->sc_info.mci_adapter_ops,
525 sc->sc_info.mci_ld_ops);
526
527 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
528 DEVNAME(sc),
529 sc->sc_info.mci_stripe_sz_ops.min,
530 sc->sc_info.mci_stripe_sz_ops.max,
531 sc->sc_info.mci_pd_ops,
532 sc->sc_info.mci_pd_mix_support);
533
534 printf("%s: ecc_bucket %d pckg_prop %s\n",
535 DEVNAME(sc),
536 sc->sc_info.mci_ecc_bucket_count,
537 sc->sc_info.mci_package_version);
538
539 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
540 DEVNAME(sc),
541 sc->sc_info.mci_properties.mcp_seq_num,
542 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
543 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
544 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
545
546 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
547 DEVNAME(sc),
548 sc->sc_info.mci_properties.mcp_rebuild_rate,
549 sc->sc_info.mci_properties.mcp_patrol_read_rate,
550 sc->sc_info.mci_properties.mcp_bgi_rate,
551 sc->sc_info.mci_properties.mcp_cc_rate);
552
553 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
554 DEVNAME(sc),
555 sc->sc_info.mci_properties.mcp_recon_rate,
556 sc->sc_info.mci_properties.mcp_cache_flush_interval,
557 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
558 sc->sc_info.mci_properties.mcp_spinup_delay,
559 sc->sc_info.mci_properties.mcp_cluster_enable);
560
561 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
562 DEVNAME(sc),
563 sc->sc_info.mci_properties.mcp_coercion_mode,
564 sc->sc_info.mci_properties.mcp_alarm_enable,
565 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
566 sc->sc_info.mci_properties.mcp_disable_battery_warn,
567 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
568
569 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
570 DEVNAME(sc),
571 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
572 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
573 sc->sc_info.mci_properties.mcp_expose_encl_devices);
574
575 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
576 DEVNAME(sc),
577 sc->sc_info.mci_pci.mip_vendor,
578 sc->sc_info.mci_pci.mip_device,
579 sc->sc_info.mci_pci.mip_subvendor,
580 sc->sc_info.mci_pci.mip_subdevice);
581
582 printf("%s: type %#x port_count %d port_addr ",
583 DEVNAME(sc),
584 sc->sc_info.mci_host.mih_type,
585 sc->sc_info.mci_host.mih_port_count);
586
587 for (i = 0; i < 8; i++)
588 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
589 printf("\n");
590
591 printf("%s: type %.x port_count %d port_addr ",
592 DEVNAME(sc),
593 sc->sc_info.mci_device.mid_type,
594 sc->sc_info.mci_device.mid_port_count);
595
596 for (i = 0; i < 8; i++)
597 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
598 printf("\n");
599 #endif /* MFI_DEBUG */
600
601 return 0;
602 }
603
604 static void
605 mfiminphys(struct buf *bp)
606 {
607 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
608
609 /* XXX currently using MFI_MAXFER = MAXPHYS */
610 if (bp->b_bcount > MFI_MAXFER)
611 bp->b_bcount = MFI_MAXFER;
612 minphys(bp);
613 }
614
615 int
616 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
617 {
618 struct scsipi_adapter *adapt = &sc->sc_adapt;
619 struct scsipi_channel *chan = &sc->sc_chan;
620 uint32_t status, frames;
621 int i;
622
623 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
624
625 switch (iop) {
626 case MFI_IOP_XSCALE:
627 sc->sc_iop = &mfi_iop_xscale;
628 break;
629 case MFI_IOP_PPC:
630 sc->sc_iop = &mfi_iop_ppc;
631 break;
632 case MFI_IOP_GEN2:
633 sc->sc_iop = &mfi_iop_gen2;
634 break;
635 default:
636 panic("%s: unknown iop %d", DEVNAME(sc), iop);
637 }
638
639 if (mfi_transition_firmware(sc))
640 return 1;
641
642 TAILQ_INIT(&sc->sc_ccb_freeq);
643
644 status = mfi_fw_state(sc);
645 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
646 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
647 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
648 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
649
650 /* consumer/producer and reply queue memory */
651 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
652 sizeof(struct mfi_prod_cons));
653 if (sc->sc_pcq == NULL) {
654 aprint_error("%s: unable to allocate reply queue memory\n",
655 DEVNAME(sc));
656 goto nopcq;
657 }
658 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
659 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
660 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
661
662 /* frame memory */
663 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
664 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
665 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
666 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
667 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
668 if (sc->sc_frames == NULL) {
669 aprint_error("%s: unable to allocate frame memory\n",
670 DEVNAME(sc));
671 goto noframe;
672 }
673 /* XXX hack, fix this */
674 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
675 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
676 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
677 goto noframe;
678 }
679
680 /* sense memory */
681 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
682 if (sc->sc_sense == NULL) {
683 aprint_error("%s: unable to allocate sense memory\n",
684 DEVNAME(sc));
685 goto nosense;
686 }
687
688 /* now that we have all memory bits go initialize ccbs */
689 if (mfi_init_ccb(sc)) {
690 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
691 goto noinit;
692 }
693
694 /* kickstart firmware with all addresses and pointers */
695 if (mfi_initialize_firmware(sc)) {
696 aprint_error("%s: could not initialize firmware\n",
697 DEVNAME(sc));
698 goto noinit;
699 }
700
701 if (mfi_get_info(sc)) {
702 aprint_error("%s: could not retrieve controller information\n",
703 DEVNAME(sc));
704 goto noinit;
705 }
706
707 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
708 DEVNAME(sc),
709 sc->sc_info.mci_lds_present,
710 sc->sc_info.mci_package_version,
711 sc->sc_info.mci_memory_size);
712
713 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
714 sc->sc_max_ld = sc->sc_ld_cnt;
715 for (i = 0; i < sc->sc_ld_cnt; i++)
716 sc->sc_ld[i].ld_present = 1;
717
718 memset(adapt, 0, sizeof(*adapt));
719 adapt->adapt_dev = sc->sc_dev;
720 adapt->adapt_nchannels = 1;
721 if (sc->sc_ld_cnt)
722 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
723 else
724 adapt->adapt_openings = sc->sc_max_cmds;
725 adapt->adapt_max_periph = adapt->adapt_openings;
726 adapt->adapt_request = mfi_scsipi_request;
727 adapt->adapt_minphys = mfiminphys;
728
729 memset(chan, 0, sizeof(*chan));
730 chan->chan_adapter = adapt;
731 chan->chan_bustype = &scsi_bustype;
732 chan->chan_channel = 0;
733 chan->chan_flags = 0;
734 chan->chan_nluns = 8;
735 chan->chan_ntargets = MFI_MAX_LD;
736 chan->chan_id = MFI_MAX_LD;
737
738 (void)config_found(sc->sc_dev, &sc->sc_chan, scsiprint);
739
740 /* enable interrupts */
741 mfi_intr_enable(sc);
742
743 #if NBIO > 0
744 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
745 panic("%s: controller registration failed", DEVNAME(sc));
746 if (mfi_create_sensors(sc) != 0)
747 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
748 #endif /* NBIO > 0 */
749
750 return 0;
751 noinit:
752 mfi_freemem(sc, sc->sc_sense);
753 nosense:
754 mfi_freemem(sc, sc->sc_frames);
755 noframe:
756 mfi_freemem(sc, sc->sc_pcq);
757 nopcq:
758 return 1;
759 }
760
761 static int
762 mfi_poll(struct mfi_ccb *ccb)
763 {
764 struct mfi_softc *sc = ccb->ccb_sc;
765 struct mfi_frame_header *hdr;
766 int to = 0;
767
768 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
769
770 hdr = &ccb->ccb_frame->mfr_header;
771 hdr->mfh_cmd_status = 0xff;
772 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
773
774 mfi_post(sc, ccb);
775 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
776 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
777 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
778
779 while (hdr->mfh_cmd_status == 0xff) {
780 delay(1000);
781 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
782 break;
783 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
784 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
785 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
786 }
787 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
788 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
789 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
790
791 if (ccb->ccb_data != NULL) {
792 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
793 DEVNAME(sc));
794 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
795 ccb->ccb_dmamap->dm_mapsize,
796 (ccb->ccb_direction & MFI_DATA_IN) ?
797 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
798
799 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
800 }
801
802 if (hdr->mfh_cmd_status == 0xff) {
803 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
804 hdr->mfh_context);
805 ccb->ccb_flags |= MFI_CCB_F_ERR;
806 return 1;
807 }
808
809 return 0;
810 }
811
812 int
813 mfi_intr(void *arg)
814 {
815 struct mfi_softc *sc = arg;
816 struct mfi_prod_cons *pcq;
817 struct mfi_ccb *ccb;
818 uint32_t producer, consumer, ctx;
819 int claimed = 0;
820
821 if (!mfi_my_intr(sc))
822 return 0;
823
824 pcq = MFIMEM_KVA(sc->sc_pcq);
825
826 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
827 (u_long)sc, (u_long)pcq);
828
829 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
830 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
831 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
832
833 producer = pcq->mpc_producer;
834 consumer = pcq->mpc_consumer;
835
836 while (consumer != producer) {
837 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
838 DEVNAME(sc), producer, consumer);
839
840 ctx = pcq->mpc_reply_q[consumer];
841 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
842 if (ctx == MFI_INVALID_CTX)
843 printf("%s: invalid context, p: %d c: %d\n",
844 DEVNAME(sc), producer, consumer);
845 else {
846 /* XXX remove from queue and call scsi_done */
847 ccb = &sc->sc_ccb[ctx];
848 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
849 DEVNAME(sc), ctx);
850 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
851 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
852 sc->sc_frames_size,
853 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
854 ccb->ccb_done(ccb);
855
856 claimed = 1;
857 }
858 consumer++;
859 if (consumer == (sc->sc_max_cmds + 1))
860 consumer = 0;
861 }
862
863 pcq->mpc_consumer = consumer;
864 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
865 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
866 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
867
868 return claimed;
869 }
870
871 static int
872 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
873 uint32_t blockcnt)
874 {
875 struct scsipi_periph *periph = xs->xs_periph;
876 struct mfi_io_frame *io;
877
878 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
879 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
880 periph->periph_target);
881
882 if (!xs->data)
883 return 1;
884
885 io = &ccb->ccb_frame->mfr_io;
886 if (xs->xs_control & XS_CTL_DATA_IN) {
887 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
888 ccb->ccb_direction = MFI_DATA_IN;
889 } else {
890 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
891 ccb->ccb_direction = MFI_DATA_OUT;
892 }
893 io->mif_header.mfh_target_id = periph->periph_target;
894 io->mif_header.mfh_timeout = 0;
895 io->mif_header.mfh_flags = 0;
896 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
897 io->mif_header.mfh_data_len= blockcnt;
898 io->mif_lba_hi = 0;
899 io->mif_lba_lo = blockno;
900 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
901 io->mif_sense_addr_hi = 0;
902
903 ccb->ccb_done = mfi_scsi_xs_done;
904 ccb->ccb_xs = xs;
905 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
906 ccb->ccb_sgl = &io->mif_sgl;
907 ccb->ccb_data = xs->data;
908 ccb->ccb_len = xs->datalen;
909
910 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
911 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
912 return 1;
913
914 return 0;
915 }
916
917 static void
918 mfi_scsi_xs_done(struct mfi_ccb *ccb)
919 {
920 struct scsipi_xfer *xs = ccb->ccb_xs;
921 struct mfi_softc *sc = ccb->ccb_sc;
922 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
923
924 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
925 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
926
927 if (xs->data != NULL) {
928 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
929 DEVNAME(sc));
930 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
931 ccb->ccb_dmamap->dm_mapsize,
932 (xs->xs_control & XS_CTL_DATA_IN) ?
933 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
934
935 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
936 }
937
938 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
939 xs->error = XS_DRIVER_STUFFUP;
940 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
941 DEVNAME(sc), hdr->mfh_cmd_status);
942
943 if (hdr->mfh_scsi_status != 0) {
944 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
945 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
946 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
947 DNPRINTF(MFI_D_INTR,
948 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
949 DEVNAME(sc), hdr->mfh_scsi_status,
950 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
951 memset(&xs->sense, 0, sizeof(xs->sense));
952 memcpy(&xs->sense, ccb->ccb_sense,
953 sizeof(struct scsi_sense_data));
954 xs->error = XS_SENSE;
955 }
956 } else {
957 xs->error = XS_NOERROR;
958 xs->status = SCSI_OK;
959 xs->resid = 0;
960 }
961
962 mfi_put_ccb(ccb);
963 scsipi_done(xs);
964 }
965
966 static int
967 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
968 {
969 struct mfi_pass_frame *pf;
970 struct scsipi_periph *periph = xs->xs_periph;
971
972 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
973 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
974 periph->periph_target);
975
976 pf = &ccb->ccb_frame->mfr_pass;
977 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
978 pf->mpf_header.mfh_target_id = periph->periph_target;
979 pf->mpf_header.mfh_lun_id = 0;
980 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
981 pf->mpf_header.mfh_timeout = 0;
982 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
983 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
984
985 pf->mpf_sense_addr_hi = 0;
986 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
987
988 memset(pf->mpf_cdb, 0, 16);
989 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
990
991 ccb->ccb_done = mfi_scsi_xs_done;
992 ccb->ccb_xs = xs;
993 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
994 ccb->ccb_sgl = &pf->mpf_sgl;
995
996 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
997 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
998 MFI_DATA_IN : MFI_DATA_OUT;
999 else
1000 ccb->ccb_direction = MFI_DATA_NONE;
1001
1002 if (xs->data) {
1003 ccb->ccb_data = xs->data;
1004 ccb->ccb_len = xs->datalen;
1005
1006 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1007 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1008 return 1;
1009 }
1010
1011 return 0;
1012 }
1013
1014 static void
1015 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1016 void *arg)
1017 {
1018 struct scsipi_periph *periph;
1019 struct scsipi_xfer *xs;
1020 struct scsipi_adapter *adapt = chan->chan_adapter;
1021 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1022 struct mfi_ccb *ccb;
1023 struct scsi_rw_6 *rw;
1024 struct scsipi_rw_10 *rwb;
1025 uint32_t blockno, blockcnt;
1026 uint8_t target;
1027 uint8_t mbox[MFI_MBOX_SIZE];
1028 int s;
1029
1030 switch (req) {
1031 case ADAPTER_REQ_GROW_RESOURCES:
1032 /* Not supported. */
1033 return;
1034 case ADAPTER_REQ_SET_XFER_MODE:
1035 /* Not supported. */
1036 return;
1037 case ADAPTER_REQ_RUN_XFER:
1038 break;
1039 }
1040
1041 xs = arg;
1042
1043 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1044 DEVNAME(sc), req, xs->cmd->opcode);
1045
1046 periph = xs->xs_periph;
1047 target = periph->periph_target;
1048
1049 s = splbio();
1050 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1051 periph->periph_lun != 0) {
1052 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1053 DEVNAME(sc), target);
1054 xs->error = XS_SELTIMEOUT;
1055 scsipi_done(xs);
1056 splx(s);
1057 return;
1058 }
1059
1060 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1061 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1062 xs->error = XS_RESOURCE_SHORTAGE;
1063 scsipi_done(xs);
1064 splx(s);
1065 return;
1066 }
1067
1068 switch (xs->cmd->opcode) {
1069 /* IO path */
1070 case READ_10:
1071 case WRITE_10:
1072 rwb = (struct scsipi_rw_10 *)xs->cmd;
1073 blockno = _4btol(rwb->addr);
1074 blockcnt = _2btol(rwb->length);
1075 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1076 mfi_put_ccb(ccb);
1077 goto stuffup;
1078 }
1079 break;
1080
1081 case SCSI_READ_6_COMMAND:
1082 case SCSI_WRITE_6_COMMAND:
1083 rw = (struct scsi_rw_6 *)xs->cmd;
1084 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1085 blockcnt = rw->length ? rw->length : 0x100;
1086 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1087 mfi_put_ccb(ccb);
1088 goto stuffup;
1089 }
1090 break;
1091
1092 case SCSI_SYNCHRONIZE_CACHE_10:
1093 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1094 if (mfi_mgmt(ccb, xs,
1095 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1096 mfi_put_ccb(ccb);
1097 goto stuffup;
1098 }
1099 break;
1100
1101 /* hand it of to the firmware and let it deal with it */
1102 case SCSI_TEST_UNIT_READY:
1103 /* save off sd? after autoconf */
1104 if (!cold) /* XXX bogus */
1105 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1106 sizeof(sc->sc_ld[target].ld_dev));
1107 /* FALLTHROUGH */
1108
1109 default:
1110 if (mfi_scsi_ld(ccb, xs)) {
1111 mfi_put_ccb(ccb);
1112 goto stuffup;
1113 }
1114 break;
1115 }
1116
1117 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1118
1119 if (xs->xs_control & XS_CTL_POLL) {
1120 if (mfi_poll(ccb)) {
1121 /* XXX check for sense in ccb->ccb_sense? */
1122 printf("%s: mfi_scsipi_request poll failed\n",
1123 DEVNAME(sc));
1124 bzero(&xs->sense, sizeof(xs->sense));
1125 xs->sense.scsi_sense.response_code =
1126 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1127 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1128 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1129 xs->error = XS_SENSE;
1130 xs->status = SCSI_CHECK;
1131 } else {
1132 DNPRINTF(MFI_D_DMA,
1133 "%s: mfi_scsipi_request poll complete %d\n",
1134 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1135 xs->error = XS_NOERROR;
1136 xs->status = SCSI_OK;
1137 xs->resid = 0;
1138 }
1139 mfi_put_ccb(ccb);
1140 scsipi_done(xs);
1141 splx(s);
1142 return;
1143 }
1144
1145 mfi_post(sc, ccb);
1146
1147 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1148 ccb->ccb_dmamap->dm_nsegs);
1149
1150 splx(s);
1151 return;
1152
1153 stuffup:
1154 xs->error = XS_DRIVER_STUFFUP;
1155 scsipi_done(xs);
1156 splx(s);
1157 }
1158
1159 static int
1160 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1161 {
1162 struct mfi_softc *sc = ccb->ccb_sc;
1163 struct mfi_frame_header *hdr;
1164 bus_dma_segment_t *sgd;
1165 union mfi_sgl *sgl;
1166 int error, i;
1167
1168 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1169 (u_long)ccb->ccb_data);
1170
1171 if (!ccb->ccb_data)
1172 return 1;
1173
1174 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1175 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1176 if (error) {
1177 if (error == EFBIG)
1178 printf("more than %d dma segs\n",
1179 sc->sc_max_sgl);
1180 else
1181 printf("error %d loading dma map\n", error);
1182 return 1;
1183 }
1184
1185 hdr = &ccb->ccb_frame->mfr_header;
1186 sgl = ccb->ccb_sgl;
1187 sgd = ccb->ccb_dmamap->dm_segs;
1188 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1189 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1190 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1191 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1192 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1193 }
1194
1195 if (ccb->ccb_direction == MFI_DATA_IN) {
1196 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1197 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1198 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1199 } else {
1200 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1201 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1202 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1203 }
1204
1205 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1206 /* for 64 bit io make the sizeof a variable to hold whatever sg size */
1207 ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1208 ccb->ccb_dmamap->dm_nsegs;
1209 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1210
1211 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1212 " dm_nsegs: %d extra_frames: %d\n",
1213 DEVNAME(sc),
1214 hdr->mfh_sg_count,
1215 ccb->ccb_frame_size,
1216 sc->sc_frames_size,
1217 ccb->ccb_dmamap->dm_nsegs,
1218 ccb->ccb_extra_frames);
1219
1220 return 0;
1221 }
1222
1223 static int
1224 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1225 uint32_t len, void *buf, uint8_t *mbox)
1226 {
1227 struct mfi_ccb *ccb;
1228 int rv = 1;
1229
1230 if ((ccb = mfi_get_ccb(sc)) == NULL)
1231 return rv;
1232 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1233 if (rv)
1234 return rv;
1235
1236 if (cold) {
1237 if (mfi_poll(ccb))
1238 goto done;
1239 } else {
1240 mfi_post(sc, ccb);
1241
1242 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1243 DEVNAME(sc));
1244 while (ccb->ccb_state != MFI_CCB_DONE)
1245 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1246
1247 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1248 goto done;
1249 }
1250 rv = 0;
1251
1252 done:
1253 mfi_put_ccb(ccb);
1254 return rv;
1255 }
1256
1257 static int
1258 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1259 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1260 {
1261 struct mfi_dcmd_frame *dcmd;
1262
1263 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1264
1265 dcmd = &ccb->ccb_frame->mfr_dcmd;
1266 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1267 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1268 dcmd->mdf_header.mfh_timeout = 0;
1269
1270 dcmd->mdf_opcode = opc;
1271 dcmd->mdf_header.mfh_data_len = 0;
1272 ccb->ccb_direction = dir;
1273 ccb->ccb_xs = xs;
1274 ccb->ccb_done = mfi_mgmt_done;
1275
1276 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1277
1278 /* handle special opcodes */
1279 if (mbox)
1280 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1281
1282 if (dir != MFI_DATA_NONE) {
1283 dcmd->mdf_header.mfh_data_len = len;
1284 ccb->ccb_data = buf;
1285 ccb->ccb_len = len;
1286 ccb->ccb_sgl = &dcmd->mdf_sgl;
1287
1288 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1289 return 1;
1290 }
1291 return 0;
1292 }
1293
1294 static void
1295 mfi_mgmt_done(struct mfi_ccb *ccb)
1296 {
1297 struct scsipi_xfer *xs = ccb->ccb_xs;
1298 struct mfi_softc *sc = ccb->ccb_sc;
1299 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1300
1301 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1302 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1303
1304 if (ccb->ccb_data != NULL) {
1305 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1306 DEVNAME(sc));
1307 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1308 ccb->ccb_dmamap->dm_mapsize,
1309 (ccb->ccb_direction & MFI_DATA_IN) ?
1310 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1311
1312 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1313 }
1314
1315 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1316 ccb->ccb_flags |= MFI_CCB_F_ERR;
1317
1318 ccb->ccb_state = MFI_CCB_DONE;
1319 if (xs) {
1320 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1321 xs->error = XS_DRIVER_STUFFUP;
1322 } else {
1323 xs->error = XS_NOERROR;
1324 xs->status = SCSI_OK;
1325 xs->resid = 0;
1326 }
1327 mfi_put_ccb(ccb);
1328 scsipi_done(xs);
1329 } else
1330 wakeup(ccb);
1331 }
1332
1333 #if NBIO > 0
1334 int
1335 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1336 {
1337 struct mfi_softc *sc = device_private(dev);
1338 int error = 0;
1339 int s;
1340
1341 KERNEL_LOCK(1, curlwp);
1342 s = splbio();
1343
1344 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1345
1346 switch (cmd) {
1347 case BIOCINQ:
1348 DNPRINTF(MFI_D_IOCTL, "inq\n");
1349 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1350 break;
1351
1352 case BIOCVOL:
1353 DNPRINTF(MFI_D_IOCTL, "vol\n");
1354 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1355 break;
1356
1357 case BIOCDISK:
1358 DNPRINTF(MFI_D_IOCTL, "disk\n");
1359 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1360 break;
1361
1362 case BIOCALARM:
1363 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1364 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1365 break;
1366
1367 case BIOCBLINK:
1368 DNPRINTF(MFI_D_IOCTL, "blink\n");
1369 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1370 break;
1371
1372 case BIOCSETSTATE:
1373 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1374 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1375 break;
1376
1377 default:
1378 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1379 error = EINVAL;
1380 }
1381 splx(s);
1382 KERNEL_UNLOCK_ONE(curlwp);
1383
1384 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1385 return error;
1386 }
1387
1388 static int
1389 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1390 {
1391 struct mfi_conf *cfg;
1392 int rv = EINVAL;
1393
1394 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1395
1396 if (mfi_get_info(sc)) {
1397 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1398 DEVNAME(sc));
1399 return EIO;
1400 }
1401
1402 /* get figures */
1403 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1404 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1405 sizeof *cfg, cfg, NULL))
1406 goto freeme;
1407
1408 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1409 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1410 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1411
1412 rv = 0;
1413 freeme:
1414 free(cfg, M_DEVBUF);
1415 return rv;
1416 }
1417
1418 static int
1419 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1420 {
1421 int i, per, rv = EINVAL;
1422 uint8_t mbox[MFI_MBOX_SIZE];
1423
1424 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1425 DEVNAME(sc), bv->bv_volid);
1426
1427 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1428 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1429 goto done;
1430
1431 i = bv->bv_volid;
1432 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1433 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1434 DEVNAME(sc), mbox[0]);
1435
1436 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1437 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1438 goto done;
1439
1440 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1441 /* go do hotspares */
1442 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1443 goto done;
1444 }
1445
1446 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1447
1448 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1449 case MFI_LD_OFFLINE:
1450 bv->bv_status = BIOC_SVOFFLINE;
1451 break;
1452
1453 case MFI_LD_PART_DEGRADED:
1454 case MFI_LD_DEGRADED:
1455 bv->bv_status = BIOC_SVDEGRADED;
1456 break;
1457
1458 case MFI_LD_ONLINE:
1459 bv->bv_status = BIOC_SVONLINE;
1460 break;
1461
1462 default:
1463 bv->bv_status = BIOC_SVINVALID;
1464 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1465 DEVNAME(sc),
1466 sc->sc_ld_list.mll_list[i].mll_state);
1467 }
1468
1469 /* additional status can modify MFI status */
1470 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1471 case MFI_LD_PROG_CC:
1472 case MFI_LD_PROG_BGI:
1473 bv->bv_status = BIOC_SVSCRUB;
1474 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1475 bv->bv_percent = (per * 100) / 0xffff;
1476 bv->bv_seconds =
1477 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1478 break;
1479
1480 case MFI_LD_PROG_FGI:
1481 case MFI_LD_PROG_RECONSTRUCT:
1482 /* nothing yet */
1483 break;
1484 }
1485
1486 /*
1487 * The RAID levels are determined per the SNIA DDF spec, this is only
1488 * a subset that is valid for the MFI contrller.
1489 */
1490 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1491 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1492 MFI_DDF_SRL_SPANNED)
1493 bv->bv_level *= 10;
1494
1495 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1496 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1497
1498 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1499
1500 rv = 0;
1501 done:
1502 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1503 DEVNAME(sc), rv);
1504 return rv;
1505 }
1506
1507 static int
1508 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1509 {
1510 struct mfi_conf *cfg;
1511 struct mfi_array *ar;
1512 struct mfi_ld_cfg *ld;
1513 struct mfi_pd_details *pd;
1514 struct scsipi_inquiry_data *inqbuf;
1515 char vend[8+16+4+1];
1516 int i, rv = EINVAL;
1517 int arr, vol, disk;
1518 uint32_t size;
1519 uint8_t mbox[MFI_MBOX_SIZE];
1520
1521 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1522 DEVNAME(sc), bd->bd_diskid);
1523
1524 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1525
1526 /* send single element command to retrieve size for full structure */
1527 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1528 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1529 sizeof *cfg, cfg, NULL))
1530 goto freeme;
1531
1532 size = cfg->mfc_size;
1533 free(cfg, M_DEVBUF);
1534
1535 /* memory for read config */
1536 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1537 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1538 size, cfg, NULL))
1539 goto freeme;
1540
1541 ar = cfg->mfc_array;
1542
1543 /* calculate offset to ld structure */
1544 ld = (struct mfi_ld_cfg *)(
1545 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1546 cfg->mfc_array_size * cfg->mfc_no_array);
1547
1548 vol = bd->bd_volid;
1549
1550 if (vol >= cfg->mfc_no_ld) {
1551 /* do hotspares */
1552 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1553 goto freeme;
1554 }
1555
1556 /* find corresponding array for ld */
1557 for (i = 0, arr = 0; i < vol; i++)
1558 arr += ld[i].mlc_parm.mpa_span_depth;
1559
1560 /* offset disk into pd list */
1561 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1562
1563 /* offset array index into the next spans */
1564 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1565
1566 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1567 switch (ar[arr].pd[disk].mar_pd_state){
1568 case MFI_PD_UNCONFIG_GOOD:
1569 bd->bd_status = BIOC_SDUNUSED;
1570 break;
1571
1572 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1573 bd->bd_status = BIOC_SDHOTSPARE;
1574 break;
1575
1576 case MFI_PD_OFFLINE:
1577 bd->bd_status = BIOC_SDOFFLINE;
1578 break;
1579
1580 case MFI_PD_FAILED:
1581 bd->bd_status = BIOC_SDFAILED;
1582 break;
1583
1584 case MFI_PD_REBUILD:
1585 bd->bd_status = BIOC_SDREBUILD;
1586 break;
1587
1588 case MFI_PD_ONLINE:
1589 bd->bd_status = BIOC_SDONLINE;
1590 break;
1591
1592 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1593 default:
1594 bd->bd_status = BIOC_SDINVALID;
1595 break;
1596
1597 }
1598
1599 /* get the remaining fields */
1600 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1601 memset(pd, 0, sizeof(*pd));
1602 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1603 sizeof *pd, pd, mbox))
1604 goto freeme;
1605
1606 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1607
1608 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1609 bd->bd_channel = pd->mpd_enc_idx;
1610
1611 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1612 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1613 vend[sizeof vend - 1] = '\0';
1614 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1615
1616 /* XXX find a way to retrieve serial nr from drive */
1617 /* XXX find a way to get bd_procdev */
1618
1619 rv = 0;
1620 freeme:
1621 free(pd, M_DEVBUF);
1622 free(cfg, M_DEVBUF);
1623
1624 return rv;
1625 }
1626
1627 static int
1628 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1629 {
1630 uint32_t opc, dir = MFI_DATA_NONE;
1631 int rv = 0;
1632 int8_t ret;
1633
1634 switch(ba->ba_opcode) {
1635 case BIOC_SADISABLE:
1636 opc = MR_DCMD_SPEAKER_DISABLE;
1637 break;
1638
1639 case BIOC_SAENABLE:
1640 opc = MR_DCMD_SPEAKER_ENABLE;
1641 break;
1642
1643 case BIOC_SASILENCE:
1644 opc = MR_DCMD_SPEAKER_SILENCE;
1645 break;
1646
1647 case BIOC_GASTATUS:
1648 opc = MR_DCMD_SPEAKER_GET;
1649 dir = MFI_DATA_IN;
1650 break;
1651
1652 case BIOC_SATEST:
1653 opc = MR_DCMD_SPEAKER_TEST;
1654 break;
1655
1656 default:
1657 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1658 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1659 return EINVAL;
1660 }
1661
1662 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL))
1663 rv = EINVAL;
1664 else
1665 if (ba->ba_opcode == BIOC_GASTATUS)
1666 ba->ba_status = ret;
1667 else
1668 ba->ba_status = 0;
1669
1670 return rv;
1671 }
1672
1673 static int
1674 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1675 {
1676 int i, found, rv = EINVAL;
1677 uint8_t mbox[MFI_MBOX_SIZE];
1678 uint32_t cmd;
1679 struct mfi_pd_list *pd;
1680
1681 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1682 bb->bb_status);
1683
1684 /* channel 0 means not in an enclosure so can't be blinked */
1685 if (bb->bb_channel == 0)
1686 return EINVAL;
1687
1688 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1689
1690 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1691 MFI_PD_LIST_SIZE, pd, NULL))
1692 goto done;
1693
1694 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1695 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1696 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1697 found = 1;
1698 break;
1699 }
1700
1701 if (!found)
1702 goto done;
1703
1704 memset(mbox, 0, sizeof mbox);
1705
1706 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1707
1708 switch (bb->bb_status) {
1709 case BIOC_SBUNBLINK:
1710 cmd = MR_DCMD_PD_UNBLINK;
1711 break;
1712
1713 case BIOC_SBBLINK:
1714 cmd = MR_DCMD_PD_BLINK;
1715 break;
1716
1717 case BIOC_SBALARM:
1718 default:
1719 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1720 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1721 goto done;
1722 }
1723
1724
1725 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1726 goto done;
1727
1728 rv = 0;
1729 done:
1730 free(pd, M_DEVBUF);
1731 return rv;
1732 }
1733
1734 static int
1735 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1736 {
1737 struct mfi_pd_list *pd;
1738 int i, found, rv = EINVAL;
1739 uint8_t mbox[MFI_MBOX_SIZE];
1740 uint32_t cmd;
1741
1742 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1743 bs->bs_status);
1744
1745 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1746
1747 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1748 MFI_PD_LIST_SIZE, pd, NULL))
1749 goto done;
1750
1751 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1752 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1753 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1754 found = 1;
1755 break;
1756 }
1757
1758 if (!found)
1759 goto done;
1760
1761 memset(mbox, 0, sizeof mbox);
1762
1763 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1764
1765 switch (bs->bs_status) {
1766 case BIOC_SSONLINE:
1767 mbox[2] = MFI_PD_ONLINE;
1768 cmd = MD_DCMD_PD_SET_STATE;
1769 break;
1770
1771 case BIOC_SSOFFLINE:
1772 mbox[2] = MFI_PD_OFFLINE;
1773 cmd = MD_DCMD_PD_SET_STATE;
1774 break;
1775
1776 case BIOC_SSHOTSPARE:
1777 mbox[2] = MFI_PD_HOTSPARE;
1778 cmd = MD_DCMD_PD_SET_STATE;
1779 break;
1780 /*
1781 case BIOC_SSREBUILD:
1782 cmd = MD_DCMD_PD_REBUILD;
1783 break;
1784 */
1785 default:
1786 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1787 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1788 goto done;
1789 }
1790
1791
1792 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
1793 0, NULL, mbox))
1794 goto done;
1795
1796 rv = 0;
1797 done:
1798 free(pd, M_DEVBUF);
1799 return rv;
1800 }
1801
1802 static int
1803 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1804 {
1805 struct mfi_conf *cfg;
1806 struct mfi_hotspare *hs;
1807 struct mfi_pd_details *pd;
1808 struct bioc_disk *sdhs;
1809 struct bioc_vol *vdhs;
1810 struct scsipi_inquiry_data *inqbuf;
1811 char vend[8+16+4+1];
1812 int i, rv = EINVAL;
1813 uint32_t size;
1814 uint8_t mbox[MFI_MBOX_SIZE];
1815
1816 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1817
1818 if (!bio_hs)
1819 return EINVAL;
1820
1821 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1822
1823 /* send single element command to retrieve size for full structure */
1824 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1825 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1826 sizeof *cfg, cfg, NULL))
1827 goto freeme;
1828
1829 size = cfg->mfc_size;
1830 free(cfg, M_DEVBUF);
1831
1832 /* memory for read config */
1833 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1834 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1835 size, cfg, NULL))
1836 goto freeme;
1837
1838 /* calculate offset to hs structure */
1839 hs = (struct mfi_hotspare *)(
1840 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1841 cfg->mfc_array_size * cfg->mfc_no_array +
1842 cfg->mfc_ld_size * cfg->mfc_no_ld);
1843
1844 if (volid < cfg->mfc_no_ld)
1845 goto freeme; /* not a hotspare */
1846
1847 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1848 goto freeme; /* not a hotspare */
1849
1850 /* offset into hotspare structure */
1851 i = volid - cfg->mfc_no_ld;
1852
1853 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1854 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1855 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1856
1857 /* get pd fields */
1858 memset(mbox, 0, sizeof mbox);
1859 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1860 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1861 sizeof *pd, pd, mbox)) {
1862 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1863 DEVNAME(sc));
1864 goto freeme;
1865 }
1866
1867 switch (type) {
1868 case MFI_MGMT_VD:
1869 vdhs = bio_hs;
1870 vdhs->bv_status = BIOC_SVONLINE;
1871 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
1872 vdhs->bv_level = -1; /* hotspare */
1873 vdhs->bv_nodisk = 1;
1874 break;
1875
1876 case MFI_MGMT_SD:
1877 sdhs = bio_hs;
1878 sdhs->bd_status = BIOC_SDHOTSPARE;
1879 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
1880 sdhs->bd_channel = pd->mpd_enc_idx;
1881 sdhs->bd_target = pd->mpd_enc_slot;
1882 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1883 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
1884 vend[sizeof vend - 1] = '\0';
1885 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1886 break;
1887
1888 default:
1889 goto freeme;
1890 }
1891
1892 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1893 rv = 0;
1894 freeme:
1895 free(pd, M_DEVBUF);
1896 free(cfg, M_DEVBUF);
1897
1898 return rv;
1899 }
1900
1901 static int
1902 mfi_create_sensors(struct mfi_softc *sc)
1903 {
1904 int i;
1905 int nsensors = sc->sc_ld_cnt;
1906
1907 sc->sc_sme = sysmon_envsys_create();
1908 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
1909 M_DEVBUF, M_NOWAIT | M_ZERO);
1910 if (sc->sc_sensor == NULL) {
1911 aprint_error("%s: can't allocate envsys_data_t\n",
1912 DEVNAME(sc));
1913 return ENOMEM;
1914 }
1915
1916 for (i = 0; i < nsensors; i++) {
1917 sc->sc_sensor[i].units = ENVSYS_DRIVE;
1918 sc->sc_sensor[i].monitor = true;
1919 /* Enable monitoring for drive state changes */
1920 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
1921 /* logical drives */
1922 snprintf(sc->sc_sensor[i].desc,
1923 sizeof(sc->sc_sensor[i].desc), "%s:%d",
1924 DEVNAME(sc), i);
1925 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1926 &sc->sc_sensor[i]))
1927 goto out;
1928 }
1929
1930 sc->sc_sme->sme_name = DEVNAME(sc);
1931 sc->sc_sme->sme_cookie = sc;
1932 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
1933 if (sysmon_envsys_register(sc->sc_sme)) {
1934 aprint_error("%s: unable to register with sysmon\n",
1935 DEVNAME(sc));
1936 goto out;
1937 }
1938 return 0;
1939
1940 out:
1941 free(sc->sc_sensor, M_DEVBUF);
1942 sysmon_envsys_destroy(sc->sc_sme);
1943 return EINVAL;
1944 }
1945
1946 static void
1947 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
1948 {
1949 struct mfi_softc *sc = sme->sme_cookie;
1950 struct bioc_vol bv;
1951 int s;
1952 int error;
1953
1954 if (edata->sensor >= sc->sc_ld_cnt)
1955 return;
1956
1957 bzero(&bv, sizeof(bv));
1958 bv.bv_volid = edata->sensor;
1959 KERNEL_LOCK(1, curlwp);
1960 s = splbio();
1961 error = mfi_ioctl_vol(sc, &bv);
1962 splx(s);
1963 KERNEL_UNLOCK_ONE(curlwp);
1964 if (error)
1965 return;
1966
1967 switch(bv.bv_status) {
1968 case BIOC_SVOFFLINE:
1969 edata->value_cur = ENVSYS_DRIVE_FAIL;
1970 edata->state = ENVSYS_SCRITICAL;
1971 break;
1972
1973 case BIOC_SVDEGRADED:
1974 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1975 edata->state = ENVSYS_SCRITICAL;
1976 break;
1977
1978 case BIOC_SVSCRUB:
1979 case BIOC_SVONLINE:
1980 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1981 edata->state = ENVSYS_SVALID;
1982 break;
1983
1984 case BIOC_SVINVALID:
1985 /* FALLTRHOUGH */
1986 default:
1987 edata->value_cur = 0; /* unknown */
1988 edata->state = ENVSYS_SINVALID;
1989 }
1990 }
1991
1992 #endif /* NBIO > 0 */
1993
1994 static uint32_t
1995 mfi_xscale_fw_state(struct mfi_softc *sc)
1996 {
1997 return mfi_read(sc, MFI_OMSG0);
1998 }
1999
2000 static void
2001 mfi_xscale_intr_ena(struct mfi_softc *sc)
2002 {
2003 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2004 }
2005
2006 static int
2007 mfi_xscale_intr(struct mfi_softc *sc)
2008 {
2009 uint32_t status;
2010
2011 status = mfi_read(sc, MFI_OSTS);
2012 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2013 return 0;
2014
2015 /* write status back to acknowledge interrupt */
2016 mfi_write(sc, MFI_OSTS, status);
2017 return 1;
2018 }
2019
2020 static void
2021 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2022 {
2023 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2024 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2025 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2026 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2027 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2028 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2029
2030 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2031 ccb->ccb_extra_frames);
2032 }
2033
2034 static uint32_t
2035 mfi_ppc_fw_state(struct mfi_softc *sc)
2036 {
2037 return mfi_read(sc, MFI_OSP);
2038 }
2039
2040 static void
2041 mfi_ppc_intr_ena(struct mfi_softc *sc)
2042 {
2043 mfi_write(sc, MFI_ODC, 0xffffffff);
2044 mfi_write(sc, MFI_OMSK, ~0x80000004);
2045 }
2046
2047 static int
2048 mfi_ppc_intr(struct mfi_softc *sc)
2049 {
2050 uint32_t status;
2051
2052 status = mfi_read(sc, MFI_OSTS);
2053 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2054 return 0;
2055
2056 /* write status back to acknowledge interrupt */
2057 mfi_write(sc, MFI_ODC, status);
2058 return 1;
2059 }
2060
2061 static void
2062 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2063 {
2064 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2065 (ccb->ccb_extra_frames << 1));
2066 }
2067
2068 u_int32_t
2069 mfi_gen2_fw_state(struct mfi_softc *sc)
2070 {
2071 return (mfi_read(sc, MFI_OSP));
2072 }
2073
2074 void
2075 mfi_gen2_intr_ena(struct mfi_softc *sc)
2076 {
2077 mfi_write(sc, MFI_ODC, 0xffffffff);
2078 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2079 }
2080
2081 int
2082 mfi_gen2_intr(struct mfi_softc *sc)
2083 {
2084 u_int32_t status;
2085
2086 status = mfi_read(sc, MFI_OSTS);
2087 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2088 return (0);
2089
2090 /* write status back to acknowledge interrupt */
2091 mfi_write(sc, MFI_ODC, status);
2092
2093 return (1);
2094 }
2095
2096 void
2097 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2098 {
2099 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2100 (ccb->ccb_extra_frames << 1));
2101 }
Cache object: b00f6b35b41a1f8c92d52028a0ad8c0b
|