FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/mfi.c
1 /* $OpenBSD: mfi.c,v 1.188 2022/04/16 19:19:59 naddy Exp $ */
2 /*
3 * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "bio.h"
19
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/device.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/rwlock.h>
27 #include <sys/sensors.h>
28 #include <sys/dkio.h>
29 #include <sys/pool.h>
30
31 #include <machine/bus.h>
32
33 #include <scsi/scsi_all.h>
34 #include <scsi/scsi_disk.h>
35 #include <scsi/scsiconf.h>
36
37 #include <dev/biovar.h>
38 #include <dev/ic/mfireg.h>
39 #include <dev/ic/mfivar.h>
40
41 #ifdef MFI_DEBUG
42 uint32_t mfi_debug = 0
43 /* | MFI_D_CMD */
44 /* | MFI_D_INTR */
45 /* | MFI_D_MISC */
46 /* | MFI_D_DMA */
47 /* | MFI_D_IOCTL */
48 /* | MFI_D_RW */
49 /* | MFI_D_MEM */
50 /* | MFI_D_CCB */
51 ;
52 #endif
53
54 struct cfdriver mfi_cd = {
55 NULL, "mfi", DV_DULL
56 };
57
58 void mfi_scsi_cmd(struct scsi_xfer *);
59 int mfi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
60 int mfi_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
61
62 void mfi_pd_scsi_cmd(struct scsi_xfer *);
63 int mfi_pd_scsi_probe(struct scsi_link *);
64
65 const struct scsi_adapter mfi_switch = {
66 mfi_scsi_cmd, NULL, NULL, NULL, mfi_scsi_ioctl
67 };
68
69 const struct scsi_adapter mfi_pd_switch = {
70 mfi_pd_scsi_cmd, NULL, mfi_pd_scsi_probe, NULL, mfi_scsi_ioctl
71 };
72
73 void * mfi_get_ccb(void *);
74 void mfi_put_ccb(void *, void *);
75 void mfi_scrub_ccb(struct mfi_ccb *);
76 int mfi_init_ccb(struct mfi_softc *);
77
78 struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
79 void mfi_freemem(struct mfi_softc *, struct mfi_mem *);
80
81 int mfi_transition_firmware(struct mfi_softc *);
82 int mfi_initialize_firmware(struct mfi_softc *);
83 int mfi_get_info(struct mfi_softc *);
84 uint32_t mfi_read(struct mfi_softc *, bus_size_t);
85 void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
86 void mfi_poll(struct mfi_softc *, struct mfi_ccb *);
87 void mfi_exec(struct mfi_softc *, struct mfi_ccb *);
88 void mfi_exec_done(struct mfi_softc *, struct mfi_ccb *);
89 int mfi_create_sgl(struct mfi_softc *, struct mfi_ccb *, int);
90 u_int mfi_default_sgd_load(struct mfi_softc *, struct mfi_ccb *);
91 int mfi_syspd(struct mfi_softc *);
92
93 /* commands */
94 int mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *,
95 struct scsi_xfer *);
96 int mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *,
97 struct scsi_xfer *, uint64_t, uint32_t);
98 void mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *);
99 int mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
100 void *, const union mfi_mbox *);
101 int mfi_do_mgmt(struct mfi_softc *, struct mfi_ccb * , uint32_t,
102 uint32_t, uint32_t, void *, const union mfi_mbox *);
103 void mfi_empty_done(struct mfi_softc *, struct mfi_ccb *);
104
105 #if NBIO > 0
106 int mfi_ioctl(struct device *, u_long, caddr_t);
107 int mfi_bio_getitall(struct mfi_softc *);
108 int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
109 int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
110 int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
111 int mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
112 int mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
113 int mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
114 int mfi_ioctl_patrol(struct mfi_softc *sc, struct bioc_patrol *);
115 int mfi_bio_hs(struct mfi_softc *, int, int, void *);
116 #ifndef SMALL_KERNEL
117 int mfi_create_sensors(struct mfi_softc *);
118 void mfi_refresh_sensors(void *);
119 int mfi_bbu(struct mfi_softc *);
120 #endif /* SMALL_KERNEL */
121 #endif /* NBIO > 0 */
122
123 void mfi_start(struct mfi_softc *, struct mfi_ccb *);
124 void mfi_done(struct mfi_softc *, struct mfi_ccb *);
125 u_int32_t mfi_xscale_fw_state(struct mfi_softc *);
126 void mfi_xscale_intr_ena(struct mfi_softc *);
127 int mfi_xscale_intr(struct mfi_softc *);
128 void mfi_xscale_post(struct mfi_softc *, struct mfi_ccb *);
129
130 static const struct mfi_iop_ops mfi_iop_xscale = {
131 mfi_xscale_fw_state,
132 mfi_xscale_intr_ena,
133 mfi_xscale_intr,
134 mfi_xscale_post,
135 mfi_default_sgd_load,
136 0,
137 };
138
139 u_int32_t mfi_ppc_fw_state(struct mfi_softc *);
140 void mfi_ppc_intr_ena(struct mfi_softc *);
141 int mfi_ppc_intr(struct mfi_softc *);
142 void mfi_ppc_post(struct mfi_softc *, struct mfi_ccb *);
143
144 static const struct mfi_iop_ops mfi_iop_ppc = {
145 mfi_ppc_fw_state,
146 mfi_ppc_intr_ena,
147 mfi_ppc_intr,
148 mfi_ppc_post,
149 mfi_default_sgd_load,
150 MFI_IDB,
151 0
152 };
153
154 u_int32_t mfi_gen2_fw_state(struct mfi_softc *);
155 void mfi_gen2_intr_ena(struct mfi_softc *);
156 int mfi_gen2_intr(struct mfi_softc *);
157 void mfi_gen2_post(struct mfi_softc *, struct mfi_ccb *);
158
159 static const struct mfi_iop_ops mfi_iop_gen2 = {
160 mfi_gen2_fw_state,
161 mfi_gen2_intr_ena,
162 mfi_gen2_intr,
163 mfi_gen2_post,
164 mfi_default_sgd_load,
165 MFI_IDB,
166 0
167 };
168
169 u_int32_t mfi_skinny_fw_state(struct mfi_softc *);
170 void mfi_skinny_intr_ena(struct mfi_softc *);
171 int mfi_skinny_intr(struct mfi_softc *);
172 void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
173 u_int mfi_skinny_sgd_load(struct mfi_softc *, struct mfi_ccb *);
174
175 static const struct mfi_iop_ops mfi_iop_skinny = {
176 mfi_skinny_fw_state,
177 mfi_skinny_intr_ena,
178 mfi_skinny_intr,
179 mfi_skinny_post,
180 mfi_skinny_sgd_load,
181 MFI_SKINNY_IDB,
182 MFI_IOP_F_SYSPD
183 };
184
185 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
186 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
187 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
188 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
189 #define mfi_sgd_load(_s, _c) ((_s)->sc_iop->mio_sgd_load((_s), (_c)))
190
191 void *
192 mfi_get_ccb(void *cookie)
193 {
194 struct mfi_softc *sc = cookie;
195 struct mfi_ccb *ccb;
196
197 KERNEL_UNLOCK();
198
199 mtx_enter(&sc->sc_ccb_mtx);
200 ccb = SLIST_FIRST(&sc->sc_ccb_freeq);
201 if (ccb != NULL) {
202 SLIST_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
203 ccb->ccb_state = MFI_CCB_READY;
204 }
205 mtx_leave(&sc->sc_ccb_mtx);
206
207 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
208 KERNEL_LOCK();
209
210 return (ccb);
211 }
212
213 void
214 mfi_put_ccb(void *cookie, void *io)
215 {
216 struct mfi_softc *sc = cookie;
217 struct mfi_ccb *ccb = io;
218
219 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
220
221 KERNEL_UNLOCK();
222 mtx_enter(&sc->sc_ccb_mtx);
223 SLIST_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
224 mtx_leave(&sc->sc_ccb_mtx);
225 KERNEL_LOCK();
226 }
227
228 void
229 mfi_scrub_ccb(struct mfi_ccb *ccb)
230 {
231 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
232
233 hdr->mfh_cmd_status = 0x0;
234 hdr->mfh_flags = 0x0;
235 ccb->ccb_state = MFI_CCB_FREE;
236 ccb->ccb_cookie = NULL;
237 ccb->ccb_flags = 0;
238 ccb->ccb_done = NULL;
239 ccb->ccb_direction = 0;
240 ccb->ccb_frame_size = 0;
241 ccb->ccb_extra_frames = 0;
242 ccb->ccb_sgl = NULL;
243 ccb->ccb_data = NULL;
244 ccb->ccb_len = 0;
245 }
246
247 int
248 mfi_init_ccb(struct mfi_softc *sc)
249 {
250 struct mfi_ccb *ccb;
251 uint32_t i;
252 int error;
253
254 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
255
256 sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfi_ccb),
257 M_DEVBUF, M_WAITOK|M_ZERO);
258
259 for (i = 0; i < sc->sc_max_cmds; i++) {
260 ccb = &sc->sc_ccb[i];
261
262 /* select i'th frame */
263 ccb->ccb_frame = (union mfi_frame *)
264 (MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
265 ccb->ccb_pframe =
266 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
267 ccb->ccb_pframe_offset = sc->sc_frames_size * i;
268 ccb->ccb_frame->mfr_header.mfh_context = i;
269
270 /* select i'th sense */
271 ccb->ccb_sense = (struct mfi_sense *)
272 (MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
273 ccb->ccb_psense =
274 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
275
276 /* create a dma map for transfer */
277 error = bus_dmamap_create(sc->sc_dmat,
278 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
279 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
280 if (error) {
281 printf("%s: cannot create ccb dmamap (%d)\n",
282 DEVNAME(sc), error);
283 goto destroy;
284 }
285
286 DNPRINTF(MFI_D_CCB,
287 "ccb(%d): %p frame: %p (%#lx) sense: %p (%#lx) map: %p\n",
288 ccb->ccb_frame->mfr_header.mfh_context, ccb,
289 ccb->ccb_frame, ccb->ccb_pframe,
290 ccb->ccb_sense, ccb->ccb_psense,
291 ccb->ccb_dmamap);
292
293 /* add ccb to queue */
294 mfi_put_ccb(sc, ccb);
295 }
296
297 return (0);
298 destroy:
299 /* free dma maps and ccb memory */
300 while ((ccb = mfi_get_ccb(sc)) != NULL)
301 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
302
303 free(sc->sc_ccb, M_DEVBUF, 0);
304
305 return (1);
306 }
307
308 uint32_t
309 mfi_read(struct mfi_softc *sc, bus_size_t r)
310 {
311 uint32_t rv;
312
313 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
314 BUS_SPACE_BARRIER_READ);
315 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
316
317 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), r, rv);
318 return (rv);
319 }
320
321 void
322 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
323 {
324 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), r, v);
325
326 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
327 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
328 BUS_SPACE_BARRIER_WRITE);
329 }
330
331 struct mfi_mem *
332 mfi_allocmem(struct mfi_softc *sc, size_t size)
333 {
334 struct mfi_mem *mm;
335 int nsegs;
336
337 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %zu\n", DEVNAME(sc),
338 size);
339
340 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
341 if (mm == NULL)
342 return (NULL);
343
344 mm->am_size = size;
345
346 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
347 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
348 goto amfree;
349
350 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
351 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
352 goto destroy;
353
354 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
355 BUS_DMA_NOWAIT) != 0)
356 goto free;
357
358 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
359 BUS_DMA_NOWAIT) != 0)
360 goto unmap;
361
362 DNPRINTF(MFI_D_MEM, " kva: %p dva: %lx map: %p\n",
363 mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
364
365 return (mm);
366
367 unmap:
368 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
369 free:
370 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
371 destroy:
372 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
373 amfree:
374 free(mm, M_DEVBUF, sizeof *mm);
375
376 return (NULL);
377 }
378
379 void
380 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
381 {
382 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
383
384 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
385 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
386 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
387 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
388 free(mm, M_DEVBUF, sizeof *mm);
389 }
390
391 int
392 mfi_transition_firmware(struct mfi_softc *sc)
393 {
394 int32_t fw_state, cur_state;
395 u_int32_t idb = sc->sc_iop->mio_idb;
396 int max_wait, i;
397
398 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
399
400 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
401 fw_state);
402
403 while (fw_state != MFI_STATE_READY) {
404 DNPRINTF(MFI_D_MISC,
405 "%s: waiting for firmware to become ready\n",
406 DEVNAME(sc));
407 cur_state = fw_state;
408 switch (fw_state) {
409 case MFI_STATE_FAULT:
410 printf("%s: firmware fault\n", DEVNAME(sc));
411 return (1);
412 case MFI_STATE_WAIT_HANDSHAKE:
413 mfi_write(sc, idb, MFI_INIT_CLEAR_HANDSHAKE);
414 max_wait = 2;
415 break;
416 case MFI_STATE_OPERATIONAL:
417 mfi_write(sc, idb, MFI_INIT_READY);
418 max_wait = 10;
419 break;
420 case MFI_STATE_UNDEFINED:
421 case MFI_STATE_BB_INIT:
422 max_wait = 2;
423 break;
424 case MFI_STATE_FW_INIT:
425 case MFI_STATE_DEVICE_SCAN:
426 case MFI_STATE_FLUSH_CACHE:
427 max_wait = 20;
428 break;
429 default:
430 printf("%s: unknown firmware state %d\n",
431 DEVNAME(sc), fw_state);
432 return (1);
433 }
434 for (i = 0; i < (max_wait * 10); i++) {
435 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
436 if (fw_state == cur_state)
437 DELAY(100000);
438 else
439 break;
440 }
441 if (fw_state == cur_state) {
442 printf("%s: firmware stuck in state %#x\n",
443 DEVNAME(sc), fw_state);
444 return (1);
445 }
446 }
447
448 return (0);
449 }
450
451 int
452 mfi_initialize_firmware(struct mfi_softc *sc)
453 {
454 struct mfi_ccb *ccb;
455 struct mfi_init_frame *init;
456 struct mfi_init_qinfo *qinfo;
457 int rv = 0;
458
459 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
460
461 ccb = scsi_io_get(&sc->sc_iopool, 0);
462 mfi_scrub_ccb(ccb);
463
464 init = &ccb->ccb_frame->mfr_init;
465 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
466
467 memset(qinfo, 0, sizeof(*qinfo));
468 qinfo->miq_rq_entries = htole32(sc->sc_max_cmds + 1);
469
470 qinfo->miq_rq_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
471 offsetof(struct mfi_prod_cons, mpc_reply_q));
472
473 qinfo->miq_pi_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
474 offsetof(struct mfi_prod_cons, mpc_producer));
475
476 qinfo->miq_ci_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
477 offsetof(struct mfi_prod_cons, mpc_consumer));
478
479 init->mif_header.mfh_cmd = MFI_CMD_INIT;
480 init->mif_header.mfh_data_len = htole32(sizeof(*qinfo));
481 init->mif_qinfo_new_addr = htole64(ccb->ccb_pframe + MFI_FRAME_SIZE);
482
483 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
484 0, MFIMEM_LEN(sc->sc_pcq),
485 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
486
487 ccb->ccb_done = mfi_empty_done;
488 mfi_poll(sc, ccb);
489 if (init->mif_header.mfh_cmd_status != MFI_STAT_OK)
490 rv = 1;
491
492 mfi_put_ccb(sc, ccb);
493
494 return (rv);
495 }
496
497 void
498 mfi_empty_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
499 {
500 /* nop */
501 }
502
503 int
504 mfi_get_info(struct mfi_softc *sc)
505 {
506 #ifdef MFI_DEBUG
507 int i;
508 #endif
509 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
510
511 if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
512 sizeof(sc->sc_info), &sc->sc_info, NULL))
513 return (1);
514
515 #ifdef MFI_DEBUG
516 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
517 printf("%s: active FW %s Version %s date %s time %s\n",
518 DEVNAME(sc),
519 sc->sc_info.mci_image_component[i].mic_name,
520 sc->sc_info.mci_image_component[i].mic_version,
521 sc->sc_info.mci_image_component[i].mic_build_date,
522 sc->sc_info.mci_image_component[i].mic_build_time);
523 }
524
525 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
526 printf("%s: pending FW %s Version %s date %s time %s\n",
527 DEVNAME(sc),
528 sc->sc_info.mci_pending_image_component[i].mic_name,
529 sc->sc_info.mci_pending_image_component[i].mic_version,
530 sc->sc_info.mci_pending_image_component[i].mic_build_date,
531 sc->sc_info.mci_pending_image_component[i].mic_build_time);
532 }
533
534 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
535 DEVNAME(sc),
536 sc->sc_info.mci_max_arms,
537 sc->sc_info.mci_max_spans,
538 sc->sc_info.mci_max_arrays,
539 sc->sc_info.mci_max_lds,
540 sc->sc_info.mci_product_name);
541
542 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
543 DEVNAME(sc),
544 sc->sc_info.mci_serial_number,
545 sc->sc_info.mci_hw_present,
546 sc->sc_info.mci_current_fw_time,
547 sc->sc_info.mci_max_cmds,
548 sc->sc_info.mci_max_sg_elements);
549
550 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
551 DEVNAME(sc),
552 sc->sc_info.mci_max_request_size,
553 sc->sc_info.mci_lds_present,
554 sc->sc_info.mci_lds_degraded,
555 sc->sc_info.mci_lds_offline,
556 sc->sc_info.mci_pd_present);
557
558 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
559 DEVNAME(sc),
560 sc->sc_info.mci_pd_disks_present,
561 sc->sc_info.mci_pd_disks_pred_failure,
562 sc->sc_info.mci_pd_disks_failed);
563
564 printf("%s: nvram %d mem %d flash %d\n",
565 DEVNAME(sc),
566 sc->sc_info.mci_nvram_size,
567 sc->sc_info.mci_memory_size,
568 sc->sc_info.mci_flash_size);
569
570 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
571 DEVNAME(sc),
572 sc->sc_info.mci_ram_correctable_errors,
573 sc->sc_info.mci_ram_uncorrectable_errors,
574 sc->sc_info.mci_cluster_allowed,
575 sc->sc_info.mci_cluster_active);
576
577 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
578 DEVNAME(sc),
579 sc->sc_info.mci_max_strips_per_io,
580 sc->sc_info.mci_raid_levels,
581 sc->sc_info.mci_adapter_ops,
582 sc->sc_info.mci_ld_ops);
583
584 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
585 DEVNAME(sc),
586 sc->sc_info.mci_stripe_sz_ops.min,
587 sc->sc_info.mci_stripe_sz_ops.max,
588 sc->sc_info.mci_pd_ops,
589 sc->sc_info.mci_pd_mix_support);
590
591 printf("%s: ecc_bucket %d pckg_prop %s\n",
592 DEVNAME(sc),
593 sc->sc_info.mci_ecc_bucket_count,
594 sc->sc_info.mci_package_version);
595
596 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
597 DEVNAME(sc),
598 sc->sc_info.mci_properties.mcp_seq_num,
599 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
600 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
601 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
602
603 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
604 DEVNAME(sc),
605 sc->sc_info.mci_properties.mcp_rebuild_rate,
606 sc->sc_info.mci_properties.mcp_patrol_read_rate,
607 sc->sc_info.mci_properties.mcp_bgi_rate,
608 sc->sc_info.mci_properties.mcp_cc_rate);
609
610 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
611 DEVNAME(sc),
612 sc->sc_info.mci_properties.mcp_recon_rate,
613 sc->sc_info.mci_properties.mcp_cache_flush_interval,
614 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
615 sc->sc_info.mci_properties.mcp_spinup_delay,
616 sc->sc_info.mci_properties.mcp_cluster_enable);
617
618 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
619 DEVNAME(sc),
620 sc->sc_info.mci_properties.mcp_coercion_mode,
621 sc->sc_info.mci_properties.mcp_alarm_enable,
622 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
623 sc->sc_info.mci_properties.mcp_disable_battery_warn,
624 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
625
626 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
627 DEVNAME(sc),
628 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
629 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
630 sc->sc_info.mci_properties.mcp_expose_encl_devices);
631
632 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
633 DEVNAME(sc),
634 sc->sc_info.mci_pci.mip_vendor,
635 sc->sc_info.mci_pci.mip_device,
636 sc->sc_info.mci_pci.mip_subvendor,
637 sc->sc_info.mci_pci.mip_subdevice);
638
639 printf("%s: type %#x port_count %d port_addr ",
640 DEVNAME(sc),
641 sc->sc_info.mci_host.mih_type,
642 sc->sc_info.mci_host.mih_port_count);
643
644 for (i = 0; i < 8; i++)
645 printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
646 printf("\n");
647
648 printf("%s: type %.x port_count %d port_addr ",
649 DEVNAME(sc),
650 sc->sc_info.mci_device.mid_type,
651 sc->sc_info.mci_device.mid_port_count);
652
653 for (i = 0; i < 8; i++)
654 printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
655 printf("\n");
656 #endif /* MFI_DEBUG */
657
658 return (0);
659 }
660
661 int
662 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
663 {
664 struct scsibus_attach_args saa;
665 uint32_t status, frames, max_sgl;
666 int i;
667
668 switch (iop) {
669 case MFI_IOP_XSCALE:
670 sc->sc_iop = &mfi_iop_xscale;
671 break;
672 case MFI_IOP_PPC:
673 sc->sc_iop = &mfi_iop_ppc;
674 break;
675 case MFI_IOP_GEN2:
676 sc->sc_iop = &mfi_iop_gen2;
677 break;
678 case MFI_IOP_SKINNY:
679 sc->sc_iop = &mfi_iop_skinny;
680 break;
681 default:
682 panic("%s: unknown iop %d", DEVNAME(sc), iop);
683 }
684
685 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
686
687 if (mfi_transition_firmware(sc))
688 return (1);
689
690 SLIST_INIT(&sc->sc_ccb_freeq);
691 mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
692 scsi_iopool_init(&sc->sc_iopool, sc, mfi_get_ccb, mfi_put_ccb);
693
694 rw_init(&sc->sc_lock, "mfi_lock");
695
696 status = mfi_fw_state(sc);
697 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
698 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
699 if (sc->sc_64bit_dma) {
700 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
701 sc->sc_sgl_size = sizeof(struct mfi_sg64);
702 sc->sc_sgl_flags = MFI_FRAME_SGL64;
703 } else {
704 sc->sc_max_sgl = max_sgl;
705 sc->sc_sgl_size = sizeof(struct mfi_sg32);
706 sc->sc_sgl_flags = MFI_FRAME_SGL32;
707 }
708 if (iop == MFI_IOP_SKINNY)
709 sc->sc_sgl_size = sizeof(struct mfi_sg_skinny);
710 DNPRINTF(MFI_D_MISC, "%s: 64bit: %d max commands: %u, max sgl: %u\n",
711 DEVNAME(sc), sc->sc_64bit_dma, sc->sc_max_cmds, sc->sc_max_sgl);
712
713 /* consumer/producer and reply queue memory */
714 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
715 sizeof(struct mfi_prod_cons));
716 if (sc->sc_pcq == NULL) {
717 printf("%s: unable to allocate reply queue memory\n",
718 DEVNAME(sc));
719 goto nopcq;
720 }
721
722 /* frame memory */
723 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
724 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
725 MFI_FRAME_SIZE + 1;
726 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
727 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
728 if (sc->sc_frames == NULL) {
729 printf("%s: unable to allocate frame memory\n", DEVNAME(sc));
730 goto noframe;
731 }
732 /* XXX hack, fix this */
733 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
734 printf("%s: improper frame alignment (%#lx) FIXME\n",
735 DEVNAME(sc), MFIMEM_DVA(sc->sc_frames));
736 goto noframe;
737 }
738
739 /* sense memory */
740 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
741 if (sc->sc_sense == NULL) {
742 printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
743 goto nosense;
744 }
745
746 /* now that we have all memory bits go initialize ccbs */
747 if (mfi_init_ccb(sc)) {
748 printf("%s: could not init ccb list\n", DEVNAME(sc));
749 goto noinit;
750 }
751
752 /* kickstart firmware with all addresses and pointers */
753 if (mfi_initialize_firmware(sc)) {
754 printf("%s: could not initialize firmware\n", DEVNAME(sc));
755 goto noinit;
756 }
757
758 if (mfi_get_info(sc)) {
759 printf("%s: could not retrieve controller information\n",
760 DEVNAME(sc));
761 goto noinit;
762 }
763
764 printf("%s: \"%s\", firmware %s", DEVNAME(sc),
765 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
766 if (letoh16(sc->sc_info.mci_memory_size) > 0)
767 printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
768 printf("\n");
769
770 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
771 for (i = 0; i < sc->sc_ld_cnt; i++)
772 sc->sc_ld[i].ld_present = 1;
773
774 saa.saa_adapter = &mfi_switch;
775 saa.saa_adapter_softc = sc;
776 saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
777 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
778 saa.saa_luns = 1;
779 saa.saa_openings = sc->sc_max_cmds - 1;
780 saa.saa_pool = &sc->sc_iopool;
781 saa.saa_quirks = saa.saa_flags = 0;
782 saa.saa_wwpn = saa.saa_wwnn = 0;
783
784 sc->sc_scsibus = (struct scsibus_softc *)
785 config_found(&sc->sc_dev, &saa, scsiprint);
786
787 if (ISSET(sc->sc_iop->mio_flags, MFI_IOP_F_SYSPD))
788 mfi_syspd(sc);
789
790 /* enable interrupts */
791 mfi_intr_enable(sc);
792
793 #if NBIO > 0
794 if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
795 panic("%s: controller registration failed", DEVNAME(sc));
796 else
797 sc->sc_ioctl = mfi_ioctl;
798
799 #ifndef SMALL_KERNEL
800 if (mfi_create_sensors(sc) != 0)
801 printf("%s: unable to create sensors\n", DEVNAME(sc));
802 #endif
803 #endif /* NBIO > 0 */
804
805 return (0);
806 noinit:
807 mfi_freemem(sc, sc->sc_sense);
808 nosense:
809 mfi_freemem(sc, sc->sc_frames);
810 noframe:
811 mfi_freemem(sc, sc->sc_pcq);
812 nopcq:
813 return (1);
814 }
815
816 int
817 mfi_syspd(struct mfi_softc *sc)
818 {
819 struct scsibus_attach_args saa;
820 struct mfi_pd_link *pl;
821 struct mfi_pd_list *pd;
822 u_int npds, i;
823
824 sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
825 if (sc->sc_pd == NULL)
826 return (1);
827
828 pd = malloc(sizeof(*pd), M_TEMP, M_WAITOK|M_ZERO);
829 if (pd == NULL)
830 goto nopdsc;
831
832 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
833 sizeof(*pd), pd, NULL) != 0)
834 goto nopd;
835
836 npds = letoh32(pd->mpl_no_pd);
837 for (i = 0; i < npds; i++) {
838 pl = malloc(sizeof(*pl), M_DEVBUF, M_WAITOK|M_ZERO);
839 if (pl == NULL)
840 goto nopl;
841
842 pl->pd_id = pd->mpl_address[i].mpa_pd_id;
843 sc->sc_pd->pd_links[i] = pl;
844 }
845
846 free(pd, M_TEMP, sizeof *pd);
847
848 saa.saa_adapter = &mfi_pd_switch;
849 saa.saa_adapter_softc = sc;
850 saa.saa_adapter_buswidth = MFI_MAX_PD;
851 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
852 saa.saa_luns = 8;
853 saa.saa_openings = sc->sc_max_cmds - 1;
854 saa.saa_pool = &sc->sc_iopool;
855 saa.saa_quirks = saa.saa_flags = 0;
856 saa.saa_wwpn = saa.saa_wwnn = 0;
857
858 sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
859 config_found(&sc->sc_dev, &saa, scsiprint);
860
861 return (0);
862 nopl:
863 for (i = 0; i < npds; i++) {
864 pl = sc->sc_pd->pd_links[i];
865 if (pl == NULL)
866 break;
867
868 free(pl, M_DEVBUF, sizeof *pl);
869 }
870 nopd:
871 free(pd, M_TEMP, sizeof *pd);
872 nopdsc:
873 free(sc->sc_pd, M_DEVBUF, sizeof *sc->sc_pd);
874 return (1);
875 }
876
877 void
878 mfi_poll(struct mfi_softc *sc, struct mfi_ccb *ccb)
879 {
880 struct mfi_frame_header *hdr;
881 int to = 0;
882
883 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
884
885 hdr = &ccb->ccb_frame->mfr_header;
886 hdr->mfh_cmd_status = 0xff;
887 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
888
889 mfi_start(sc, ccb);
890
891 for (;;) {
892 delay(1000);
893
894 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
895 ccb->ccb_pframe_offset, sc->sc_frames_size,
896 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
897
898 if (hdr->mfh_cmd_status != 0xff)
899 break;
900
901 if (to++ > 5000) {
902 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
903 hdr->mfh_context);
904 ccb->ccb_flags |= MFI_CCB_F_ERR;
905 break;
906 }
907
908 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
909 ccb->ccb_pframe_offset, sc->sc_frames_size,
910 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
911 }
912
913 if (ccb->ccb_len > 0) {
914 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
915 ccb->ccb_dmamap->dm_mapsize,
916 (ccb->ccb_direction & MFI_DATA_IN) ?
917 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
918
919 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
920 }
921
922 ccb->ccb_done(sc, ccb);
923 }
924
925 void
926 mfi_exec(struct mfi_softc *sc, struct mfi_ccb *ccb)
927 {
928 struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
929
930 #ifdef DIAGNOSTIC
931 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
932 panic("mfi_exec called with cookie or done set");
933 #endif
934
935 ccb->ccb_cookie = &m;
936 ccb->ccb_done = mfi_exec_done;
937
938 mfi_start(sc, ccb);
939
940 mtx_enter(&m);
941 while (ccb->ccb_cookie != NULL)
942 msleep_nsec(ccb, &m, PRIBIO, "mfiexec", INFSLP);
943 mtx_leave(&m);
944 }
945
946 void
947 mfi_exec_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
948 {
949 struct mutex *m = ccb->ccb_cookie;
950
951 mtx_enter(m);
952 ccb->ccb_cookie = NULL;
953 wakeup_one(ccb);
954 mtx_leave(m);
955 }
956
957 int
958 mfi_intr(void *arg)
959 {
960 struct mfi_softc *sc = arg;
961 struct mfi_prod_cons *pcq = MFIMEM_KVA(sc->sc_pcq);
962 struct mfi_ccb *ccb;
963 uint32_t producer, consumer, ctx;
964 int claimed = 0;
965
966 if (!mfi_my_intr(sc))
967 return (0);
968
969 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
970 0, MFIMEM_LEN(sc->sc_pcq),
971 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
972
973 producer = letoh32(pcq->mpc_producer);
974 consumer = letoh32(pcq->mpc_consumer);
975
976 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %p %p\n", DEVNAME(sc), sc, pcq);
977
978 while (consumer != producer) {
979 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
980 DEVNAME(sc), producer, consumer);
981
982 ctx = pcq->mpc_reply_q[consumer];
983 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
984 if (ctx == MFI_INVALID_CTX)
985 printf("%s: invalid context, p: %d c: %d\n",
986 DEVNAME(sc), producer, consumer);
987 else {
988 /* XXX remove from queue and call scsi_done */
989 ccb = &sc->sc_ccb[ctx];
990 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
991 DEVNAME(sc), ctx);
992 mfi_done(sc, ccb);
993
994 claimed = 1;
995 }
996 consumer++;
997 if (consumer == (sc->sc_max_cmds + 1))
998 consumer = 0;
999 }
1000
1001 pcq->mpc_consumer = htole32(consumer);
1002
1003 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
1004 0, MFIMEM_LEN(sc->sc_pcq),
1005 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1006
1007 return (claimed);
1008 }
1009
1010 int
1011 mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *ccb,
1012 struct scsi_xfer *xs, uint64_t blockno, uint32_t blockcnt)
1013 {
1014 struct scsi_link *link = xs->sc_link;
1015 struct mfi_io_frame *io;
1016
1017 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1018 DEVNAME((struct mfi_softc *)link->bus->sb_adapter_softc), link->target);
1019
1020 if (!xs->data)
1021 return (1);
1022
1023 io = &ccb->ccb_frame->mfr_io;
1024 if (xs->flags & SCSI_DATA_IN) {
1025 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1026 ccb->ccb_direction = MFI_DATA_IN;
1027 } else {
1028 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1029 ccb->ccb_direction = MFI_DATA_OUT;
1030 }
1031 io->mif_header.mfh_target_id = link->target;
1032 io->mif_header.mfh_timeout = 0;
1033 io->mif_header.mfh_flags = 0;
1034 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1035 io->mif_header.mfh_data_len = htole32(blockcnt);
1036 io->mif_lba = htole64(blockno);
1037 io->mif_sense_addr = htole64(ccb->ccb_psense);
1038
1039 ccb->ccb_done = mfi_scsi_xs_done;
1040 ccb->ccb_cookie = xs;
1041 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1042 ccb->ccb_sgl = &io->mif_sgl;
1043 ccb->ccb_data = xs->data;
1044 ccb->ccb_len = xs->datalen;
1045
1046 if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1047 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1048 return (1);
1049
1050 return (0);
1051 }
1052
1053 void
1054 mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
1055 {
1056 struct scsi_xfer *xs = ccb->ccb_cookie;
1057 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1058
1059 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %p %p\n",
1060 DEVNAME(sc), ccb, ccb->ccb_frame);
1061
1062 switch (hdr->mfh_cmd_status) {
1063 case MFI_STAT_OK:
1064 xs->resid = 0;
1065 break;
1066
1067 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1068 xs->error = XS_SENSE;
1069 xs->resid = 0;
1070 memset(&xs->sense, 0, sizeof(xs->sense));
1071 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1072 break;
1073
1074 case MFI_STAT_DEVICE_NOT_FOUND:
1075 xs->error = XS_SELTIMEOUT;
1076 break;
1077
1078 default:
1079 xs->error = XS_DRIVER_STUFFUP;
1080 DNPRINTF(MFI_D_CMD,
1081 "%s: mfi_scsi_xs_done stuffup %02x on %02x\n",
1082 DEVNAME(sc), hdr->mfh_cmd_status, xs->cmd.opcode);
1083
1084 if (hdr->mfh_scsi_status != 0) {
1085 DNPRINTF(MFI_D_INTR,
1086 "%s: mfi_scsi_xs_done sense %#x %p %p\n",
1087 DEVNAME(sc), hdr->mfh_scsi_status,
1088 &xs->sense, ccb->ccb_sense);
1089 memset(&xs->sense, 0, sizeof(xs->sense));
1090 memcpy(&xs->sense, ccb->ccb_sense,
1091 sizeof(struct scsi_sense_data));
1092 xs->error = XS_SENSE;
1093 }
1094 break;
1095 }
1096
1097 KERNEL_LOCK();
1098 scsi_done(xs);
1099 KERNEL_UNLOCK();
1100 }
1101
1102 int
1103 mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *ccb, struct scsi_xfer *xs)
1104 {
1105 struct scsi_link *link = xs->sc_link;
1106 struct mfi_pass_frame *pf;
1107
1108 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1109 DEVNAME((struct mfi_softc *)link->bus->sb_adapter_softc), link->target);
1110
1111 pf = &ccb->ccb_frame->mfr_pass;
1112 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1113 pf->mpf_header.mfh_target_id = link->target;
1114 pf->mpf_header.mfh_lun_id = 0;
1115 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1116 pf->mpf_header.mfh_timeout = 0;
1117 pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
1118 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1119
1120 pf->mpf_sense_addr = htole64(ccb->ccb_psense);
1121
1122 memset(pf->mpf_cdb, 0, 16);
1123 memcpy(pf->mpf_cdb, &xs->cmd, xs->cmdlen);
1124
1125 ccb->ccb_done = mfi_scsi_xs_done;
1126 ccb->ccb_cookie = xs;
1127 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1128 ccb->ccb_sgl = &pf->mpf_sgl;
1129
1130 if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1131 ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
1132 MFI_DATA_IN : MFI_DATA_OUT;
1133 else
1134 ccb->ccb_direction = MFI_DATA_NONE;
1135
1136 if (xs->data) {
1137 ccb->ccb_data = xs->data;
1138 ccb->ccb_len = xs->datalen;
1139
1140 if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1141 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1142 return (1);
1143 }
1144
1145 return (0);
1146 }
1147
1148 void
1149 mfi_scsi_cmd(struct scsi_xfer *xs)
1150 {
1151 struct scsi_link *link = xs->sc_link;
1152 struct mfi_softc *sc = link->bus->sb_adapter_softc;
1153 struct mfi_ccb *ccb = xs->io;
1154 struct scsi_rw *rw;
1155 struct scsi_rw_10 *rw10;
1156 struct scsi_rw_16 *rw16;
1157 uint64_t blockno;
1158 uint32_t blockcnt;
1159 uint8_t target = link->target;
1160 union mfi_mbox mbox;
1161
1162 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_cmd opcode: %#x\n",
1163 DEVNAME(sc), xs->cmd.opcode);
1164
1165 KERNEL_UNLOCK();
1166
1167 if (!sc->sc_ld[target].ld_present) {
1168 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1169 DEVNAME(sc), target);
1170 goto stuffup;
1171 }
1172
1173 mfi_scrub_ccb(ccb);
1174
1175 xs->error = XS_NOERROR;
1176
1177 switch (xs->cmd.opcode) {
1178 /* IO path */
1179 case READ_10:
1180 case WRITE_10:
1181 rw10 = (struct scsi_rw_10 *)&xs->cmd;
1182 blockno = (uint64_t)_4btol(rw10->addr);
1183 blockcnt = _2btol(rw10->length);
1184 if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1185 goto stuffup;
1186 break;
1187
1188 case READ_COMMAND:
1189 case WRITE_COMMAND:
1190 rw = (struct scsi_rw *)&xs->cmd;
1191 blockno =
1192 (uint64_t)(_3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff));
1193 blockcnt = rw->length ? rw->length : 0x100;
1194 if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1195 goto stuffup;
1196 break;
1197
1198 case READ_16:
1199 case WRITE_16:
1200 rw16 = (struct scsi_rw_16 *)&xs->cmd;
1201 blockno = _8btol(rw16->addr);
1202 blockcnt = _4btol(rw16->length);
1203 if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1204 goto stuffup;
1205 break;
1206
1207 case SYNCHRONIZE_CACHE:
1208 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1209 if (mfi_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH,
1210 MFI_DATA_NONE, 0, NULL, &mbox))
1211 goto stuffup;
1212
1213 goto complete;
1214 /* NOTREACHED */
1215
1216 default:
1217 if (mfi_scsi_ld(sc, ccb, xs))
1218 goto stuffup;
1219 break;
1220 }
1221
1222 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1223
1224 if (xs->flags & SCSI_POLL)
1225 mfi_poll(sc, ccb);
1226 else
1227 mfi_start(sc, ccb);
1228
1229 KERNEL_LOCK();
1230 return;
1231
1232 stuffup:
1233 xs->error = XS_DRIVER_STUFFUP;
1234 complete:
1235 KERNEL_LOCK();
1236 scsi_done(xs);
1237 }
1238
1239 u_int
1240 mfi_default_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
1241 {
1242 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1243 union mfi_sgl *sgl = ccb->ccb_sgl;
1244 bus_dma_segment_t *sgd = ccb->ccb_dmamap->dm_segs;
1245 int i;
1246
1247 hdr->mfh_flags |= sc->sc_sgl_flags;
1248
1249 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1250 if (sc->sc_64bit_dma) {
1251 sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1252 sgl->sg64[i].len = htole32(sgd[i].ds_len);
1253 DNPRINTF(MFI_D_DMA, "%s: addr: %#llx len: %#x\n",
1254 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1255 } else {
1256 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1257 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1258 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1259 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1260 }
1261 }
1262
1263 return (ccb->ccb_dmamap->dm_nsegs *
1264 (sc->sc_64bit_dma ? sizeof(sgl->sg64) : sizeof(sgl->sg32)));
1265 }
1266
1267 int
1268 mfi_create_sgl(struct mfi_softc *sc, struct mfi_ccb *ccb, int flags)
1269 {
1270 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1271 int error;
1272
1273 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %p\n", DEVNAME(sc),
1274 ccb->ccb_data);
1275
1276 if (!ccb->ccb_data) {
1277 hdr->mfh_sg_count = 0;
1278 return (1);
1279 }
1280
1281 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1282 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1283 if (error) {
1284 if (error == EFBIG)
1285 printf("more than %d dma segs\n",
1286 sc->sc_max_sgl);
1287 else
1288 printf("error %d loading dma map\n", error);
1289 return (1);
1290 }
1291
1292 ccb->ccb_frame_size += mfi_sgd_load(sc, ccb);
1293
1294 if (ccb->ccb_direction == MFI_DATA_IN) {
1295 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1296 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1297 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1298 } else {
1299 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1300 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1301 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1302 }
1303
1304 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1305 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1306
1307 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1308 " dm_nsegs: %d extra_frames: %d\n",
1309 DEVNAME(sc),
1310 hdr->mfh_sg_count,
1311 ccb->ccb_frame_size,
1312 sc->sc_frames_size,
1313 ccb->ccb_dmamap->dm_nsegs,
1314 ccb->ccb_extra_frames);
1315
1316 return (0);
1317 }
1318
1319 int
1320 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1321 void *buf, const union mfi_mbox *mbox)
1322 {
1323 struct mfi_ccb *ccb;
1324 int rv;
1325
1326 ccb = scsi_io_get(&sc->sc_iopool, 0);
1327 mfi_scrub_ccb(ccb);
1328 rv = mfi_do_mgmt(sc, ccb, opc, dir, len, buf, mbox);
1329 scsi_io_put(&sc->sc_iopool, ccb);
1330
1331 return (rv);
1332 }
1333
1334 int
1335 mfi_do_mgmt(struct mfi_softc *sc, struct mfi_ccb *ccb, uint32_t opc,
1336 uint32_t dir, uint32_t len, void *buf, const union mfi_mbox *mbox)
1337 {
1338 struct mfi_dcmd_frame *dcmd;
1339 uint8_t *dma_buf = NULL;
1340 int rv = EINVAL;
1341
1342 DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt %#x\n", DEVNAME(sc), opc);
1343
1344 dma_buf = dma_alloc(len, cold ? PR_NOWAIT : PR_WAITOK);
1345 if (dma_buf == NULL)
1346 goto done;
1347
1348 dcmd = &ccb->ccb_frame->mfr_dcmd;
1349 memset(&dcmd->mdf_mbox, 0, sizeof(dcmd->mdf_mbox));
1350 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1351 dcmd->mdf_header.mfh_timeout = 0;
1352
1353 dcmd->mdf_opcode = opc;
1354 dcmd->mdf_header.mfh_data_len = 0;
1355 ccb->ccb_direction = dir;
1356
1357 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1358
1359 /* handle special opcodes */
1360 if (mbox != NULL)
1361 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1362
1363 if (dir != MFI_DATA_NONE) {
1364 if (dir == MFI_DATA_OUT)
1365 memcpy(dma_buf, buf, len);
1366 dcmd->mdf_header.mfh_data_len = len;
1367 ccb->ccb_data = dma_buf;
1368 ccb->ccb_len = len;
1369 ccb->ccb_sgl = &dcmd->mdf_sgl;
1370
1371 if (mfi_create_sgl(sc, ccb, cold ? BUS_DMA_NOWAIT :
1372 BUS_DMA_WAITOK)) {
1373 rv = EINVAL;
1374 goto done;
1375 }
1376 }
1377
1378 if (cold) {
1379 ccb->ccb_done = mfi_empty_done;
1380 mfi_poll(sc, ccb);
1381 } else
1382 mfi_exec(sc, ccb);
1383
1384 if (dcmd->mdf_header.mfh_cmd_status != MFI_STAT_OK) {
1385 if (dcmd->mdf_header.mfh_cmd_status == MFI_STAT_WRONG_STATE)
1386 rv = ENXIO;
1387 else
1388 rv = EIO;
1389 goto done;
1390 }
1391
1392 if (dir == MFI_DATA_IN)
1393 memcpy(buf, dma_buf, len);
1394
1395 rv = 0;
1396 done:
1397 if (dma_buf)
1398 dma_free(dma_buf, len);
1399
1400 return (rv);
1401 }
1402
1403 int
1404 mfi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1405 {
1406 struct mfi_softc *sc = link->bus->sb_adapter_softc;
1407
1408 DNPRINTF(MFI_D_IOCTL, "%s: mfi_scsi_ioctl\n", DEVNAME(sc));
1409
1410 switch (cmd) {
1411 case DIOCGCACHE:
1412 case DIOCSCACHE:
1413 return (mfi_ioctl_cache(link, cmd, (struct dk_cache *)addr));
1414 break;
1415
1416 default:
1417 if (sc->sc_ioctl)
1418 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
1419 break;
1420 }
1421
1422 return (ENOTTY);
1423 }
1424
1425 int
1426 mfi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
1427 {
1428 struct mfi_softc *sc = link->bus->sb_adapter_softc;
1429 int rv, wrenable, rdenable;
1430 struct mfi_ld_prop ldp;
1431 union mfi_mbox mbox;
1432
1433 if (mfi_get_info(sc)) {
1434 rv = EIO;
1435 goto done;
1436 }
1437
1438 if (!sc->sc_ld[link->target].ld_present) {
1439 rv = EIO;
1440 goto done;
1441 }
1442
1443 memset(&mbox, 0, sizeof(mbox));
1444 mbox.b[0] = link->target;
1445 if ((rv = mfi_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, MFI_DATA_IN,
1446 sizeof(ldp), &ldp, &mbox)) != 0)
1447 goto done;
1448
1449 if (sc->sc_info.mci_memory_size > 0) {
1450 wrenable = ISSET(ldp.mlp_cur_cache_policy,
1451 MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
1452 rdenable = ISSET(ldp.mlp_cur_cache_policy,
1453 MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
1454 } else {
1455 wrenable = ISSET(ldp.mlp_diskcache_policy,
1456 MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
1457 rdenable = 0;
1458 }
1459
1460 if (cmd == DIOCGCACHE) {
1461 dc->wrcache = wrenable;
1462 dc->rdcache = rdenable;
1463 goto done;
1464 } /* else DIOCSCACHE */
1465
1466 if (((dc->wrcache) ? 1 : 0) == wrenable &&
1467 ((dc->rdcache) ? 1 : 0) == rdenable)
1468 goto done;
1469
1470 memset(&mbox, 0, sizeof(mbox));
1471 mbox.b[0] = ldp.mlp_ld.mld_target;
1472 mbox.b[1] = ldp.mlp_ld.mld_res;
1473 mbox.s[1] = ldp.mlp_ld.mld_seq;
1474
1475 if (sc->sc_info.mci_memory_size > 0) {
1476 if (dc->rdcache)
1477 SET(ldp.mlp_cur_cache_policy,
1478 MR_LD_CACHE_ALLOW_READ_CACHE);
1479 else
1480 CLR(ldp.mlp_cur_cache_policy,
1481 MR_LD_CACHE_ALLOW_READ_CACHE);
1482 if (dc->wrcache)
1483 SET(ldp.mlp_cur_cache_policy,
1484 MR_LD_CACHE_ALLOW_WRITE_CACHE);
1485 else
1486 CLR(ldp.mlp_cur_cache_policy,
1487 MR_LD_CACHE_ALLOW_WRITE_CACHE);
1488 } else {
1489 if (dc->rdcache) {
1490 rv = EOPNOTSUPP;
1491 goto done;
1492 }
1493 if (dc->wrcache)
1494 ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
1495 else
1496 ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
1497 }
1498
1499 rv = mfi_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, MFI_DATA_OUT, sizeof(ldp),
1500 &ldp, &mbox);
1501
1502 done:
1503 return (rv);
1504 }
1505
1506 #if NBIO > 0
1507 int
1508 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1509 {
1510 struct mfi_softc *sc = (struct mfi_softc *)dev;
1511 int error = 0;
1512
1513 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1514
1515 rw_enter_write(&sc->sc_lock);
1516
1517 switch (cmd) {
1518 case BIOCINQ:
1519 DNPRINTF(MFI_D_IOCTL, "inq\n");
1520 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1521 break;
1522
1523 case BIOCVOL:
1524 DNPRINTF(MFI_D_IOCTL, "vol\n");
1525 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1526 break;
1527
1528 case BIOCDISK:
1529 DNPRINTF(MFI_D_IOCTL, "disk\n");
1530 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1531 break;
1532
1533 case BIOCALARM:
1534 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1535 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1536 break;
1537
1538 case BIOCBLINK:
1539 DNPRINTF(MFI_D_IOCTL, "blink\n");
1540 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1541 break;
1542
1543 case BIOCSETSTATE:
1544 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1545 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1546 break;
1547
1548 case BIOCPATROL:
1549 DNPRINTF(MFI_D_IOCTL, "patrol\n");
1550 error = mfi_ioctl_patrol(sc, (struct bioc_patrol *)addr);
1551 break;
1552
1553 default:
1554 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1555 error = ENOTTY;
1556 }
1557
1558 rw_exit_write(&sc->sc_lock);
1559
1560 return (error);
1561 }
1562
1563 int
1564 mfi_bio_getitall(struct mfi_softc *sc)
1565 {
1566 int i, d, size, rv = EINVAL;
1567 union mfi_mbox mbox;
1568 struct mfi_conf *cfg = NULL;
1569 struct mfi_ld_details *ld_det = NULL;
1570
1571 /* get info */
1572 if (mfi_get_info(sc)) {
1573 DNPRINTF(MFI_D_IOCTL, "%s: mfi_get_info failed\n",
1574 DEVNAME(sc));
1575 goto done;
1576 }
1577
1578 /* send single element command to retrieve size for full structure */
1579 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
1580 if (cfg == NULL)
1581 goto done;
1582 if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg,
1583 NULL)) {
1584 free(cfg, M_DEVBUF, sizeof *cfg);
1585 goto done;
1586 }
1587
1588 size = cfg->mfc_size;
1589 free(cfg, M_DEVBUF, sizeof *cfg);
1590
1591 /* memory for read config */
1592 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1593 if (cfg == NULL)
1594 goto done;
1595 if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL)) {
1596 free(cfg, M_DEVBUF, size);
1597 goto done;
1598 }
1599
1600 /* replace current pointer with new one */
1601 if (sc->sc_cfg)
1602 free(sc->sc_cfg, M_DEVBUF, 0);
1603 sc->sc_cfg = cfg;
1604
1605 /* get all ld info */
1606 if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1607 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1608 goto done;
1609
1610 /* get memory for all ld structures */
1611 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
1612 if (sc->sc_ld_sz != size) {
1613 if (sc->sc_ld_details)
1614 free(sc->sc_ld_details, M_DEVBUF, 0);
1615
1616 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1617 if (ld_det == NULL)
1618 goto done;
1619 sc->sc_ld_sz = size;
1620 sc->sc_ld_details = ld_det;
1621 }
1622
1623 /* find used physical disks */
1624 size = sizeof(struct mfi_ld_details);
1625 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
1626 memset(&mbox, 0, sizeof(mbox));
1627 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1628 if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, size,
1629 &sc->sc_ld_details[i], &mbox))
1630 goto done;
1631
1632 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1633 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1634 }
1635 sc->sc_no_pd = d;
1636
1637 rv = 0;
1638 done:
1639 return (rv);
1640 }
1641
1642 int
1643 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1644 {
1645 int rv = EINVAL;
1646 struct mfi_conf *cfg = NULL;
1647
1648 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1649
1650 if (mfi_bio_getitall(sc)) {
1651 DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1652 DEVNAME(sc));
1653 goto done;
1654 }
1655
1656 /* count unused disks as volumes */
1657 if (sc->sc_cfg == NULL)
1658 goto done;
1659 cfg = sc->sc_cfg;
1660
1661 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1662 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1663 #if notyet
1664 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
1665 (bi->bi_nodisk - sc->sc_no_pd);
1666 #endif
1667 /* tell bio who we are */
1668 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1669
1670 rv = 0;
1671 done:
1672 return (rv);
1673 }
1674
1675 int
1676 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1677 {
1678 int i, per, rv = EINVAL;
1679 struct scsi_link *link;
1680 struct device *dev;
1681
1682 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1683 DEVNAME(sc), bv->bv_volid);
1684
1685 /* we really could skip and expect that inq took care of it */
1686 if (mfi_bio_getitall(sc)) {
1687 DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1688 DEVNAME(sc));
1689 goto done;
1690 }
1691
1692 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1693 /* go do hotspares & unused disks */
1694 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1695 goto done;
1696 }
1697
1698 i = bv->bv_volid;
1699 link = scsi_get_link(sc->sc_scsibus, i, 0);
1700 if (link != NULL && link->device_softc != NULL) {
1701 dev = link->device_softc;
1702 strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
1703 }
1704
1705 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1706 case MFI_LD_OFFLINE:
1707 bv->bv_status = BIOC_SVOFFLINE;
1708 break;
1709
1710 case MFI_LD_PART_DEGRADED:
1711 case MFI_LD_DEGRADED:
1712 bv->bv_status = BIOC_SVDEGRADED;
1713 break;
1714
1715 case MFI_LD_ONLINE:
1716 bv->bv_status = BIOC_SVONLINE;
1717 break;
1718
1719 default:
1720 bv->bv_status = BIOC_SVINVALID;
1721 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1722 DEVNAME(sc),
1723 sc->sc_ld_list.mll_list[i].mll_state);
1724 }
1725
1726 /* additional status can modify MFI status */
1727 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
1728 case MFI_LD_PROG_CC:
1729 bv->bv_status = BIOC_SVSCRUB;
1730 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
1731 bv->bv_percent = (per * 100) / 0xffff;
1732 bv->bv_seconds =
1733 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
1734 break;
1735
1736 case MFI_LD_PROG_BGI:
1737 bv->bv_status = BIOC_SVSCRUB;
1738 per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
1739 bv->bv_percent = (per * 100) / 0xffff;
1740 bv->bv_seconds =
1741 sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
1742 break;
1743
1744 case MFI_LD_PROG_FGI:
1745 case MFI_LD_PROG_RECONSTRUCT:
1746 /* nothing yet */
1747 break;
1748 }
1749
1750 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
1751 bv->bv_cache = BIOC_CVWRITEBACK;
1752 else
1753 bv->bv_cache = BIOC_CVWRITETHROUGH;
1754
1755 /*
1756 * The RAID levels are determined per the SNIA DDF spec, this is only
1757 * a subset that is valid for the MFI controller.
1758 */
1759 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
1760 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_sec_raid ==
1761 MFI_DDF_SRL_SPANNED)
1762 bv->bv_level *= 10;
1763
1764 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1765 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1766
1767 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
1768
1769 rv = 0;
1770 done:
1771 return (rv);
1772 }
1773
1774 int
1775 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1776 {
1777 struct mfi_conf *cfg;
1778 struct mfi_array *ar;
1779 struct mfi_ld_cfg *ld;
1780 struct mfi_pd_details *pd;
1781 struct mfi_pd_progress *mfp;
1782 struct mfi_progress *mp;
1783 struct scsi_inquiry_data *inqbuf;
1784 char vend[8+16+4+1], *vendp;
1785 int rv = EINVAL;
1786 int arr, vol, disk, span;
1787 union mfi_mbox mbox;
1788
1789 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1790 DEVNAME(sc), bd->bd_diskid);
1791
1792 /* we really could skip and expect that inq took care of it */
1793 if (mfi_bio_getitall(sc)) {
1794 DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1795 DEVNAME(sc));
1796 return (rv);
1797 }
1798 cfg = sc->sc_cfg;
1799
1800 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1801
1802 ar = cfg->mfc_array;
1803 vol = bd->bd_volid;
1804 if (vol >= cfg->mfc_no_ld) {
1805 /* do hotspares */
1806 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1807 goto freeme;
1808 }
1809
1810 /* calculate offset to ld structure */
1811 ld = (struct mfi_ld_cfg *)(
1812 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1813 cfg->mfc_array_size * cfg->mfc_no_array);
1814
1815 /* use span 0 only when raid group is not spanned */
1816 if (ld[vol].mlc_parm.mpa_span_depth > 1)
1817 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1818 else
1819 span = 0;
1820 arr = ld[vol].mlc_span[span].mls_index;
1821
1822 /* offset disk into pd list */
1823 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1824 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1825
1826 /* get status */
1827 switch (ar[arr].pd[disk].mar_pd_state){
1828 case MFI_PD_UNCONFIG_GOOD:
1829 case MFI_PD_FAILED:
1830 bd->bd_status = BIOC_SDFAILED;
1831 break;
1832
1833 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1834 bd->bd_status = BIOC_SDHOTSPARE;
1835 break;
1836
1837 case MFI_PD_OFFLINE:
1838 bd->bd_status = BIOC_SDOFFLINE;
1839 break;
1840
1841 case MFI_PD_REBUILD:
1842 bd->bd_status = BIOC_SDREBUILD;
1843 break;
1844
1845 case MFI_PD_ONLINE:
1846 bd->bd_status = BIOC_SDONLINE;
1847 break;
1848
1849 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1850 default:
1851 bd->bd_status = BIOC_SDINVALID;
1852 break;
1853 }
1854
1855 /* get the remaining fields */
1856 memset(&mbox, 0, sizeof(mbox));
1857 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
1858 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1859 sizeof *pd, pd, &mbox)) {
1860 /* disk is missing but succeed command */
1861 rv = 0;
1862 goto freeme;
1863 }
1864
1865 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1866
1867 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1868 bd->bd_channel = pd->mpd_enc_idx;
1869
1870 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1871 vendp = inqbuf->vendor;
1872 memcpy(vend, vendp, sizeof vend - 1);
1873 vend[sizeof vend - 1] = '\0';
1874 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1875
1876 /* XXX find a way to retrieve serial nr from drive */
1877 /* XXX find a way to get bd_procdev */
1878
1879 mfp = &pd->mpd_progress;
1880 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
1881 mp = &mfp->mfp_patrol_read;
1882 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
1883 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
1884 }
1885
1886 rv = 0;
1887 freeme:
1888 free(pd, M_DEVBUF, sizeof *pd);
1889
1890 return (rv);
1891 }
1892
1893 int
1894 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1895 {
1896 uint32_t opc, dir = MFI_DATA_NONE;
1897 int rv = 0;
1898 int8_t ret;
1899
1900 switch(ba->ba_opcode) {
1901 case BIOC_SADISABLE:
1902 opc = MR_DCMD_SPEAKER_DISABLE;
1903 break;
1904
1905 case BIOC_SAENABLE:
1906 opc = MR_DCMD_SPEAKER_ENABLE;
1907 break;
1908
1909 case BIOC_SASILENCE:
1910 opc = MR_DCMD_SPEAKER_SILENCE;
1911 break;
1912
1913 case BIOC_GASTATUS:
1914 opc = MR_DCMD_SPEAKER_GET;
1915 dir = MFI_DATA_IN;
1916 break;
1917
1918 case BIOC_SATEST:
1919 opc = MR_DCMD_SPEAKER_TEST;
1920 break;
1921
1922 default:
1923 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1924 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1925 return (EINVAL);
1926 }
1927
1928 if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1929 rv = EINVAL;
1930 else
1931 if (ba->ba_opcode == BIOC_GASTATUS)
1932 ba->ba_status = ret;
1933 else
1934 ba->ba_status = 0;
1935
1936 return (rv);
1937 }
1938
1939 int
1940 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1941 {
1942 int i, found, rv = EINVAL;
1943 union mfi_mbox mbox;
1944 uint32_t cmd;
1945 struct mfi_pd_list *pd;
1946
1947 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1948 bb->bb_status);
1949
1950 /* channel 0 means not in an enclosure so can't be blinked */
1951 if (bb->bb_channel == 0)
1952 return (EINVAL);
1953
1954 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
1955
1956 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1957 sizeof(*pd), pd, NULL))
1958 goto done;
1959
1960 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1961 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1962 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1963 found = 1;
1964 break;
1965 }
1966
1967 if (!found)
1968 goto done;
1969
1970 memset(&mbox, 0, sizeof(mbox));
1971 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
1972
1973 switch (bb->bb_status) {
1974 case BIOC_SBUNBLINK:
1975 cmd = MR_DCMD_PD_UNBLINK;
1976 break;
1977
1978 case BIOC_SBBLINK:
1979 cmd = MR_DCMD_PD_BLINK;
1980 break;
1981
1982 case BIOC_SBALARM:
1983 default:
1984 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1985 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1986 goto done;
1987 }
1988
1989
1990 rv = mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, &mbox);
1991
1992 done:
1993 free(pd, M_DEVBUF, sizeof *pd);
1994 return (rv);
1995 }
1996
1997 int
1998 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1999 {
2000 struct mfi_pd_list *pd;
2001 struct mfi_pd_details *info;
2002 int i, found, rv = EINVAL;
2003 union mfi_mbox mbox;
2004
2005 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2006 bs->bs_status);
2007
2008 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
2009 info = malloc(sizeof *info, M_DEVBUF, M_WAITOK);
2010
2011 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2012 sizeof(*pd), pd, NULL))
2013 goto done;
2014
2015 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2016 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2017 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2018 found = 1;
2019 break;
2020 }
2021
2022 if (!found)
2023 goto done;
2024
2025 memset(&mbox, 0, sizeof(mbox));
2026 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
2027
2028 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2029 sizeof *info, info, &mbox))
2030 goto done;
2031
2032 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
2033 mbox.s[1] = info->mpd_pd.mfp_seq;
2034
2035 switch (bs->bs_status) {
2036 case BIOC_SSONLINE:
2037 mbox.b[4] = MFI_PD_ONLINE;
2038 break;
2039
2040 case BIOC_SSOFFLINE:
2041 mbox.b[4] = MFI_PD_OFFLINE;
2042 break;
2043
2044 case BIOC_SSHOTSPARE:
2045 mbox.b[4] = MFI_PD_HOTSPARE;
2046 break;
2047
2048 case BIOC_SSREBUILD:
2049 mbox.b[4] = MFI_PD_REBUILD;
2050 break;
2051
2052 default:
2053 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2054 "opcode %x\n", DEVNAME(sc), bs->bs_status);
2055 goto done;
2056 }
2057
2058 rv = mfi_mgmt(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, &mbox);
2059
2060 done:
2061 free(pd, M_DEVBUF, sizeof *pd);
2062 free(info, M_DEVBUF, sizeof *info);
2063 return (rv);
2064 }
2065
2066 int
2067 mfi_ioctl_patrol(struct mfi_softc *sc, struct bioc_patrol *bp)
2068 {
2069 uint32_t opc, dir = MFI_DATA_NONE;
2070 int rv = 0;
2071 struct mfi_pr_properties prop;
2072 struct mfi_pr_status status;
2073 uint32_t time, exec_freq;
2074
2075 switch (bp->bp_opcode) {
2076 case BIOC_SPSTOP:
2077 case BIOC_SPSTART:
2078 if (bp->bp_opcode == BIOC_SPSTART)
2079 opc = MR_DCMD_PR_START;
2080 else
2081 opc = MR_DCMD_PR_STOP;
2082 dir = MFI_DATA_IN;
2083 if (mfi_mgmt(sc, opc, dir, 0, NULL, NULL))
2084 return (EINVAL);
2085 break;
2086
2087 case BIOC_SPMANUAL:
2088 case BIOC_SPDISABLE:
2089 case BIOC_SPAUTO:
2090 /* Get device's time. */
2091 opc = MR_DCMD_TIME_SECS_GET;
2092 dir = MFI_DATA_IN;
2093 if (mfi_mgmt(sc, opc, dir, sizeof(time), &time, NULL))
2094 return (EINVAL);
2095
2096 opc = MR_DCMD_PR_GET_PROPERTIES;
2097 dir = MFI_DATA_IN;
2098 if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2099 return (EINVAL);
2100
2101 switch (bp->bp_opcode) {
2102 case BIOC_SPMANUAL:
2103 prop.op_mode = MFI_PR_OPMODE_MANUAL;
2104 break;
2105 case BIOC_SPDISABLE:
2106 prop.op_mode = MFI_PR_OPMODE_DISABLED;
2107 break;
2108 case BIOC_SPAUTO:
2109 if (bp->bp_autoival != 0) {
2110 if (bp->bp_autoival == -1)
2111 /* continuously */
2112 exec_freq = 0xffffffffU;
2113 else if (bp->bp_autoival > 0)
2114 exec_freq = bp->bp_autoival;
2115 else
2116 return (EINVAL);
2117 prop.exec_freq = exec_freq;
2118 }
2119 if (bp->bp_autonext != 0) {
2120 if (bp->bp_autonext < 0)
2121 return (EINVAL);
2122 else
2123 prop.next_exec = time + bp->bp_autonext;
2124 }
2125 prop.op_mode = MFI_PR_OPMODE_AUTO;
2126 break;
2127 }
2128
2129 opc = MR_DCMD_PR_SET_PROPERTIES;
2130 dir = MFI_DATA_OUT;
2131 if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2132 return (EINVAL);
2133
2134 break;
2135
2136 case BIOC_GPSTATUS:
2137 opc = MR_DCMD_PR_GET_PROPERTIES;
2138 dir = MFI_DATA_IN;
2139 if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2140 return (EINVAL);
2141
2142 opc = MR_DCMD_PR_GET_STATUS;
2143 dir = MFI_DATA_IN;
2144 if (mfi_mgmt(sc, opc, dir, sizeof(status), &status, NULL))
2145 return (EINVAL);
2146
2147 /* Get device's time. */
2148 opc = MR_DCMD_TIME_SECS_GET;
2149 dir = MFI_DATA_IN;
2150 if (mfi_mgmt(sc, opc, dir, sizeof(time), &time, NULL))
2151 return (EINVAL);
2152
2153 switch (prop.op_mode) {
2154 case MFI_PR_OPMODE_AUTO:
2155 bp->bp_mode = BIOC_SPMAUTO;
2156 bp->bp_autoival = prop.exec_freq;
2157 bp->bp_autonext = prop.next_exec;
2158 bp->bp_autonow = time;
2159 break;
2160 case MFI_PR_OPMODE_MANUAL:
2161 bp->bp_mode = BIOC_SPMMANUAL;
2162 break;
2163 case MFI_PR_OPMODE_DISABLED:
2164 bp->bp_mode = BIOC_SPMDISABLED;
2165 break;
2166 default:
2167 printf("%s: unknown patrol mode %d\n",
2168 DEVNAME(sc), prop.op_mode);
2169 break;
2170 }
2171
2172 switch (status.state) {
2173 case MFI_PR_STATE_STOPPED:
2174 bp->bp_status = BIOC_SPSSTOPPED;
2175 break;
2176 case MFI_PR_STATE_READY:
2177 bp->bp_status = BIOC_SPSREADY;
2178 break;
2179 case MFI_PR_STATE_ACTIVE:
2180 bp->bp_status = BIOC_SPSACTIVE;
2181 break;
2182 case MFI_PR_STATE_ABORTED:
2183 bp->bp_status = BIOC_SPSABORTED;
2184 break;
2185 default:
2186 printf("%s: unknown patrol state %d\n",
2187 DEVNAME(sc), status.state);
2188 break;
2189 }
2190
2191 break;
2192
2193 default:
2194 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_patrol biocpatrol invalid "
2195 "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
2196 return (EINVAL);
2197 }
2198
2199 return (rv);
2200 }
2201
2202 int
2203 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2204 {
2205 struct mfi_conf *cfg;
2206 struct mfi_hotspare *hs;
2207 struct mfi_pd_details *pd;
2208 struct bioc_disk *sdhs;
2209 struct bioc_vol *vdhs;
2210 struct scsi_inquiry_data *inqbuf;
2211 char vend[8+16+4+1], *vendp;
2212 int i, rv = EINVAL;
2213 uint32_t size;
2214 union mfi_mbox mbox;
2215
2216 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2217
2218 if (!bio_hs)
2219 return (EINVAL);
2220
2221 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
2222
2223 /* send single element command to retrieve size for full structure */
2224 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2225 if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
2226 goto freeme;
2227
2228 size = cfg->mfc_size;
2229 free(cfg, M_DEVBUF, sizeof *cfg);
2230
2231 /* memory for read config */
2232 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2233 if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
2234 goto freeme;
2235
2236 /* calculate offset to hs structure */
2237 hs = (struct mfi_hotspare *)(
2238 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2239 cfg->mfc_array_size * cfg->mfc_no_array +
2240 cfg->mfc_ld_size * cfg->mfc_no_ld);
2241
2242 if (volid < cfg->mfc_no_ld)
2243 goto freeme; /* not a hotspare */
2244
2245 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2246 goto freeme; /* not a hotspare */
2247
2248 /* offset into hotspare structure */
2249 i = volid - cfg->mfc_no_ld;
2250
2251 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2252 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2253 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2254
2255 /* get pd fields */
2256 memset(&mbox, 0, sizeof(mbox));
2257 mbox.s[0] = hs[i].mhs_pd.mfp_id;
2258 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2259 sizeof *pd, pd, &mbox)) {
2260 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2261 DEVNAME(sc));
2262 goto freeme;
2263 }
2264
2265 switch (type) {
2266 case MFI_MGMT_VD:
2267 vdhs = bio_hs;
2268 vdhs->bv_status = BIOC_SVONLINE;
2269 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2270 vdhs->bv_level = -1; /* hotspare */
2271 vdhs->bv_nodisk = 1;
2272 break;
2273
2274 case MFI_MGMT_SD:
2275 sdhs = bio_hs;
2276 sdhs->bd_status = BIOC_SDHOTSPARE;
2277 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2278 sdhs->bd_channel = pd->mpd_enc_idx;
2279 sdhs->bd_target = pd->mpd_enc_slot;
2280 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
2281 vendp = inqbuf->vendor;
2282 memcpy(vend, vendp, sizeof vend - 1);
2283 vend[sizeof vend - 1] = '\0';
2284 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2285 break;
2286
2287 default:
2288 goto freeme;
2289 }
2290
2291 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2292 rv = 0;
2293 freeme:
2294 free(pd, M_DEVBUF, sizeof *pd);
2295 free(cfg, M_DEVBUF, 0);
2296
2297 return (rv);
2298 }
2299
2300 #ifndef SMALL_KERNEL
2301
2302 static const char *mfi_bbu_indicators[] = {
2303 "pack missing",
2304 "voltage low",
2305 "temp high",
2306 "charge active",
2307 "discharge active",
2308 "learn cycle req'd",
2309 "learn cycle active",
2310 "learn cycle failed",
2311 "learn cycle timeout",
2312 "I2C errors",
2313 "replace pack",
2314 "low capacity",
2315 "periodic learn req'd"
2316 };
2317
2318 #define MFI_BBU_SENSORS 4
2319
2320 int
2321 mfi_bbu(struct mfi_softc *sc)
2322 {
2323 struct mfi_bbu_status bbu;
2324 u_int32_t status;
2325 u_int32_t mask;
2326 u_int32_t soh_bad;
2327 int i;
2328
2329 if (mfi_mgmt(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
2330 sizeof(bbu), &bbu, NULL) != 0) {
2331 for (i = 0; i < MFI_BBU_SENSORS; i++) {
2332 sc->sc_bbu[i].value = 0;
2333 sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2334 }
2335 for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2336 sc->sc_bbu_status[i].value = 0;
2337 sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2338 }
2339 return (-1);
2340 }
2341
2342 switch (bbu.battery_type) {
2343 case MFI_BBU_TYPE_IBBU:
2344 mask = MFI_BBU_STATE_BAD_IBBU;
2345 soh_bad = 0;
2346 break;
2347 case MFI_BBU_TYPE_BBU:
2348 mask = MFI_BBU_STATE_BAD_BBU;
2349 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
2350 break;
2351
2352 case MFI_BBU_TYPE_NONE:
2353 default:
2354 sc->sc_bbu[0].value = 0;
2355 sc->sc_bbu[0].status = SENSOR_S_CRIT;
2356 for (i = 1; i < MFI_BBU_SENSORS; i++) {
2357 sc->sc_bbu[i].value = 0;
2358 sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2359 }
2360 for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2361 sc->sc_bbu_status[i].value = 0;
2362 sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2363 }
2364 return (0);
2365 }
2366
2367 status = letoh32(bbu.fw_status);
2368
2369 sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
2370 sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
2371 SENSOR_S_OK;
2372
2373 sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
2374 sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
2375 sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
2376 for (i = 1; i < MFI_BBU_SENSORS; i++)
2377 sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
2378
2379 for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2380 sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
2381 sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2382 }
2383
2384 return (0);
2385 }
2386
2387 int
2388 mfi_create_sensors(struct mfi_softc *sc)
2389 {
2390 struct device *dev;
2391 struct scsi_link *link;
2392 int i;
2393
2394 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
2395 sizeof(sc->sc_sensordev.xname));
2396
2397 if (ISSET(letoh32(sc->sc_info.mci_adapter_ops ), MFI_INFO_AOPS_BBU)) {
2398 sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
2399 M_DEVBUF, M_WAITOK | M_ZERO);
2400
2401 sc->sc_bbu[0].type = SENSOR_INDICATOR;
2402 sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
2403 strlcpy(sc->sc_bbu[0].desc, "bbu ok",
2404 sizeof(sc->sc_bbu[0].desc));
2405 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
2406
2407 sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
2408 sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
2409 sc->sc_bbu[2].type = SENSOR_AMPS;
2410 sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
2411 sc->sc_bbu[3].type = SENSOR_TEMP;
2412 sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
2413 for (i = 1; i < MFI_BBU_SENSORS; i++) {
2414 strlcpy(sc->sc_bbu[i].desc, "bbu",
2415 sizeof(sc->sc_bbu[i].desc));
2416 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
2417 }
2418
2419 sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
2420 sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
2421
2422 for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2423 sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
2424 sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2425 strlcpy(sc->sc_bbu_status[i].desc,
2426 mfi_bbu_indicators[i],
2427 sizeof(sc->sc_bbu_status[i].desc));
2428
2429 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
2430 }
2431 }
2432
2433 sc->sc_sensors = mallocarray(sc->sc_ld_cnt, sizeof(struct ksensor),
2434 M_DEVBUF, M_NOWAIT | M_ZERO);
2435 if (sc->sc_sensors == NULL)
2436 return (1);
2437
2438 for (i = 0; i < sc->sc_ld_cnt; i++) {
2439 link = scsi_get_link(sc->sc_scsibus, i, 0);
2440 if (link == NULL)
2441 goto bad;
2442
2443 dev = link->device_softc;
2444
2445 sc->sc_sensors[i].type = SENSOR_DRIVE;
2446 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2447
2448 strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
2449 sizeof(sc->sc_sensors[i].desc));
2450
2451 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
2452 }
2453
2454 if (sensor_task_register(sc, mfi_refresh_sensors, 10) == NULL)
2455 goto bad;
2456
2457 sensordev_install(&sc->sc_sensordev);
2458
2459 return (0);
2460
2461 bad:
2462 free(sc->sc_sensors, M_DEVBUF,
2463 sc->sc_ld_cnt * sizeof(struct ksensor));
2464
2465 return (1);
2466 }
2467
2468 void
2469 mfi_refresh_sensors(void *arg)
2470 {
2471 struct mfi_softc *sc = arg;
2472 int i, rv;
2473 struct bioc_vol bv;
2474
2475 if (sc->sc_bbu != NULL && mfi_bbu(sc) != 0)
2476 return;
2477
2478 for (i = 0; i < sc->sc_ld_cnt; i++) {
2479 bzero(&bv, sizeof(bv));
2480 bv.bv_volid = i;
2481
2482 rw_enter_write(&sc->sc_lock);
2483 rv = mfi_ioctl_vol(sc, &bv);
2484 rw_exit_write(&sc->sc_lock);
2485
2486 if (rv != 0)
2487 return;
2488
2489 switch(bv.bv_status) {
2490 case BIOC_SVOFFLINE:
2491 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
2492 sc->sc_sensors[i].status = SENSOR_S_CRIT;
2493 break;
2494
2495 case BIOC_SVDEGRADED:
2496 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
2497 sc->sc_sensors[i].status = SENSOR_S_WARN;
2498 break;
2499
2500 case BIOC_SVSCRUB:
2501 case BIOC_SVONLINE:
2502 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
2503 sc->sc_sensors[i].status = SENSOR_S_OK;
2504 break;
2505
2506 case BIOC_SVINVALID:
2507 /* FALLTRHOUGH */
2508 default:
2509 sc->sc_sensors[i].value = 0; /* unknown */
2510 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2511 break;
2512 }
2513 }
2514 }
2515 #endif /* SMALL_KERNEL */
2516 #endif /* NBIO > 0 */
2517
2518 void
2519 mfi_start(struct mfi_softc *sc, struct mfi_ccb *ccb)
2520 {
2521 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2522 ccb->ccb_pframe_offset, sc->sc_frames_size,
2523 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2524
2525 mfi_post(sc, ccb);
2526 }
2527
2528 void
2529 mfi_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
2530 {
2531 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2532 ccb->ccb_pframe_offset, sc->sc_frames_size,
2533 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2534
2535 if (ccb->ccb_len > 0) {
2536 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
2537 0, ccb->ccb_dmamap->dm_mapsize,
2538 (ccb->ccb_direction == MFI_DATA_IN) ?
2539 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2540
2541 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
2542 }
2543
2544 ccb->ccb_done(sc, ccb);
2545 }
2546
2547 u_int32_t
2548 mfi_xscale_fw_state(struct mfi_softc *sc)
2549 {
2550 return (mfi_read(sc, MFI_OMSG0));
2551 }
2552
2553 void
2554 mfi_xscale_intr_ena(struct mfi_softc *sc)
2555 {
2556 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2557 }
2558
2559 int
2560 mfi_xscale_intr(struct mfi_softc *sc)
2561 {
2562 u_int32_t status;
2563
2564 status = mfi_read(sc, MFI_OSTS);
2565 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2566 return (0);
2567
2568 /* write status back to acknowledge interrupt */
2569 mfi_write(sc, MFI_OSTS, status);
2570
2571 return (1);
2572 }
2573
2574 void
2575 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2576 {
2577 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2578 ccb->ccb_extra_frames);
2579 }
2580
2581 u_int32_t
2582 mfi_ppc_fw_state(struct mfi_softc *sc)
2583 {
2584 return (mfi_read(sc, MFI_OSP));
2585 }
2586
2587 void
2588 mfi_ppc_intr_ena(struct mfi_softc *sc)
2589 {
2590 mfi_write(sc, MFI_ODC, 0xffffffff);
2591 mfi_write(sc, MFI_OMSK, ~0x80000004);
2592 }
2593
2594 int
2595 mfi_ppc_intr(struct mfi_softc *sc)
2596 {
2597 u_int32_t status;
2598
2599 status = mfi_read(sc, MFI_OSTS);
2600 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2601 return (0);
2602
2603 /* write status back to acknowledge interrupt */
2604 mfi_write(sc, MFI_ODC, status);
2605
2606 return (1);
2607 }
2608
2609 void
2610 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2611 {
2612 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2613 (ccb->ccb_extra_frames << 1));
2614 }
2615
2616 u_int32_t
2617 mfi_gen2_fw_state(struct mfi_softc *sc)
2618 {
2619 return (mfi_read(sc, MFI_OSP));
2620 }
2621
2622 void
2623 mfi_gen2_intr_ena(struct mfi_softc *sc)
2624 {
2625 mfi_write(sc, MFI_ODC, 0xffffffff);
2626 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2627 }
2628
2629 int
2630 mfi_gen2_intr(struct mfi_softc *sc)
2631 {
2632 u_int32_t status;
2633
2634 status = mfi_read(sc, MFI_OSTS);
2635 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2636 return (0);
2637
2638 /* write status back to acknowledge interrupt */
2639 mfi_write(sc, MFI_ODC, status);
2640
2641 return (1);
2642 }
2643
2644 void
2645 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2646 {
2647 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2648 (ccb->ccb_extra_frames << 1));
2649 }
2650
2651 u_int32_t
2652 mfi_skinny_fw_state(struct mfi_softc *sc)
2653 {
2654 return (mfi_read(sc, MFI_OSP));
2655 }
2656
2657 void
2658 mfi_skinny_intr_ena(struct mfi_softc *sc)
2659 {
2660 mfi_write(sc, MFI_OMSK, ~0x00000001);
2661 }
2662
2663 int
2664 mfi_skinny_intr(struct mfi_softc *sc)
2665 {
2666 u_int32_t status;
2667
2668 status = mfi_read(sc, MFI_OSTS);
2669 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2670 return (0);
2671
2672 /* write status back to acknowledge interrupt */
2673 mfi_write(sc, MFI_OSTS, status);
2674
2675 return (1);
2676 }
2677
2678 void
2679 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2680 {
2681 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2682 (ccb->ccb_extra_frames << 1));
2683 mfi_write(sc, MFI_IQPH, 0x00000000);
2684 }
2685
2686 u_int
2687 mfi_skinny_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
2688 {
2689 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
2690 union mfi_sgl *sgl = ccb->ccb_sgl;
2691 bus_dma_segment_t *sgd = ccb->ccb_dmamap->dm_segs;
2692 int i;
2693
2694 switch (hdr->mfh_cmd) {
2695 case MFI_CMD_LD_READ:
2696 case MFI_CMD_LD_WRITE:
2697 case MFI_CMD_PD_SCSI_IO:
2698 /* Use MF_FRAME_IEEE for some IO commands on skinny adapters */
2699 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
2700 sgl->sg_skinny[i].addr = htole64(sgd[i].ds_addr);
2701 sgl->sg_skinny[i].len = htole32(sgd[i].ds_len);
2702 sgl->sg_skinny[i].flag = 0;
2703 }
2704 hdr->mfh_flags |= MFI_FRAME_IEEE | MFI_FRAME_SGL64;
2705
2706 return (ccb->ccb_dmamap->dm_nsegs * sizeof(sgl->sg_skinny));
2707 default:
2708 return (mfi_default_sgd_load(sc, ccb));
2709 }
2710 }
2711
2712 int
2713 mfi_pd_scsi_probe(struct scsi_link *link)
2714 {
2715 union mfi_mbox mbox;
2716 struct mfi_softc *sc = link->bus->sb_adapter_softc;
2717 struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2718
2719 if (link->lun > 0)
2720 return (0);
2721
2722 if (pl == NULL)
2723 return (ENXIO);
2724
2725 memset(&mbox, 0, sizeof(mbox));
2726 mbox.s[0] = pl->pd_id;
2727
2728 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2729 sizeof(pl->pd_info), &pl->pd_info, &mbox))
2730 return (EIO);
2731
2732 if (letoh16(pl->pd_info.mpd_fw_state) != MFI_PD_SYSTEM)
2733 return (ENXIO);
2734
2735 return (0);
2736 }
2737
2738 void
2739 mfi_pd_scsi_cmd(struct scsi_xfer *xs)
2740 {
2741 struct scsi_link *link = xs->sc_link;
2742 struct mfi_softc *sc = link->bus->sb_adapter_softc;
2743 struct mfi_ccb *ccb = xs->io;
2744 struct mfi_pass_frame *pf = &ccb->ccb_frame->mfr_pass;
2745 struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2746
2747 KERNEL_UNLOCK();
2748
2749 mfi_scrub_ccb(ccb);
2750 xs->error = XS_NOERROR;
2751
2752 pf->mpf_header.mfh_cmd = MFI_CMD_PD_SCSI_IO;
2753 pf->mpf_header.mfh_target_id = pl->pd_id;
2754 pf->mpf_header.mfh_lun_id = link->lun;
2755 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
2756 pf->mpf_header.mfh_timeout = 0;
2757 pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
2758 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
2759 pf->mpf_sense_addr = htole64(ccb->ccb_psense);
2760
2761 memset(pf->mpf_cdb, 0, sizeof(pf->mpf_cdb));
2762 memcpy(pf->mpf_cdb, &xs->cmd, xs->cmdlen);
2763
2764 ccb->ccb_done = mfi_scsi_xs_done;
2765 ccb->ccb_cookie = xs;
2766 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
2767 ccb->ccb_sgl = &pf->mpf_sgl;
2768
2769 if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
2770 ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
2771 MFI_DATA_IN : MFI_DATA_OUT;
2772 else
2773 ccb->ccb_direction = MFI_DATA_NONE;
2774
2775 if (xs->data) {
2776 ccb->ccb_data = xs->data;
2777 ccb->ccb_len = xs->datalen;
2778
2779 if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
2780 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
2781 goto stuffup;
2782 }
2783
2784 if (xs->flags & SCSI_POLL)
2785 mfi_poll(sc, ccb);
2786 else
2787 mfi_start(sc, ccb);
2788
2789 KERNEL_LOCK();
2790 return;
2791
2792 stuffup:
2793 xs->error = XS_DRIVER_STUFFUP;
2794 KERNEL_LOCK();
2795 scsi_done(xs);
2796 }
Cache object: 45490cd38bd58035576f55e81b00dd58
|