FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/mfi.c
1 /* $NetBSD: mfi.c,v 1.2.4.3 2007/10/15 05:09:59 riz Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.2.4.3 2007/10/15 05:09:59 riz Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <machine/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 int mfi_scsi_ioctl(struct scsipi_channel *, u_long, caddr_t, int,
67 struct proc *);
68 void mfiminphys(struct buf *bp);
69
70 struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
71 void mfi_put_ccb(struct mfi_ccb *);
72 int mfi_init_ccb(struct mfi_softc *);
73
74 struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
75 void mfi_freemem(struct mfi_softc *, struct mfi_mem *);
76
77 int mfi_transition_firmware(struct mfi_softc *);
78 int mfi_initialize_firmware(struct mfi_softc *);
79 int mfi_get_info(struct mfi_softc *);
80 uint32_t mfi_read(struct mfi_softc *, bus_size_t);
81 void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
82 int mfi_poll(struct mfi_ccb *);
83 int mfi_despatch_cmd(struct mfi_ccb *);
84 int mfi_create_sgl(struct mfi_ccb *, int);
85
86 /* commands */
87 int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
88 int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *, uint32_t,
89 uint32_t);
90 void mfi_scsi_xs_done(struct mfi_ccb *);
91 int mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
92 void *, uint8_t *);
93 void mfi_mgmt_done(struct mfi_ccb *);
94
95 #if NBIO > 0
96 int mfi_ioctl(struct device *, u_long, caddr_t);
97 int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
98 int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
99 int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
100 int mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
101 int mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
102 int mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
103 int mfi_bio_hs(struct mfi_softc *, int, int, void *);
104 int mfi_create_sensors(struct mfi_softc *);
105 int mfi_sensor_gtredata(struct sysmon_envsys *,
106 struct envsys_tre_data *);
107 int mfi_sensor_streinfo(struct sysmon_envsys *,
108 struct envsys_basic_info *);
109 #endif /* NBIO > 0 */
110
111 struct mfi_ccb *
112 mfi_get_ccb(struct mfi_softc *sc)
113 {
114 struct mfi_ccb *ccb;
115 int s;
116
117 s = splbio();
118 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
119 if (ccb) {
120 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
121 ccb->ccb_state = MFI_CCB_READY;
122 }
123 splx(s);
124
125 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
126
127 return (ccb);
128 }
129
130 void
131 mfi_put_ccb(struct mfi_ccb *ccb)
132 {
133 struct mfi_softc *sc = ccb->ccb_sc;
134 int s;
135
136 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
137
138 s = splbio();
139 ccb->ccb_state = MFI_CCB_FREE;
140 ccb->ccb_xs = NULL;
141 ccb->ccb_flags = 0;
142 ccb->ccb_done = NULL;
143 ccb->ccb_direction = 0;
144 ccb->ccb_frame_size = 0;
145 ccb->ccb_extra_frames = 0;
146 ccb->ccb_sgl = NULL;
147 ccb->ccb_data = NULL;
148 ccb->ccb_len = 0;
149 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
150 splx(s);
151 }
152
153 int
154 mfi_init_ccb(struct mfi_softc *sc)
155 {
156 struct mfi_ccb *ccb;
157 uint32_t i;
158 int error;
159
160 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
161
162 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
163 M_DEVBUF, M_WAITOK);
164 memset(sc->sc_ccb, 0, sizeof(struct mfi_ccb) * sc->sc_max_cmds);
165
166 for (i = 0; i < sc->sc_max_cmds; i++) {
167 ccb = &sc->sc_ccb[i];
168
169 ccb->ccb_sc = sc;
170
171 /* select i'th frame */
172 ccb->ccb_frame = (union mfi_frame *)
173 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
174 ccb->ccb_pframe =
175 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
176 ccb->ccb_frame->mfr_header.mfh_context = i;
177
178 /* select i'th sense */
179 ccb->ccb_sense = (struct mfi_sense *)
180 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
181 ccb->ccb_psense =
182 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
183
184 /* create a dma map for transfer */
185 error = bus_dmamap_create(sc->sc_dmat,
186 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
187 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
188 if (error) {
189 printf("%s: cannot create ccb dmamap (%d)\n",
190 DEVNAME(sc), error);
191 goto destroy;
192 }
193
194 DNPRINTF(MFI_D_CCB,
195 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
196 ccb->ccb_frame->mfr_header.mfh_context, ccb,
197 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
198 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
199 (u_long)ccb->ccb_dmamap);
200
201 /* add ccb to queue */
202 mfi_put_ccb(ccb);
203 }
204
205 return (0);
206 destroy:
207 /* free dma maps and ccb memory */
208 while (i) {
209 ccb = &sc->sc_ccb[i];
210 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
211 i--;
212 }
213
214 free(sc->sc_ccb, M_DEVBUF);
215
216 return (1);
217 }
218
219 uint32_t
220 mfi_read(struct mfi_softc *sc, bus_size_t r)
221 {
222 uint32_t rv;
223
224 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
225 BUS_SPACE_BARRIER_READ);
226 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
227
228 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
229 return (rv);
230 }
231
232 void
233 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
234 {
235 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
236
237 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
238 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
239 BUS_SPACE_BARRIER_WRITE);
240 }
241
242 struct mfi_mem *
243 mfi_allocmem(struct mfi_softc *sc, size_t size)
244 {
245 struct mfi_mem *mm;
246 int nsegs;
247
248 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
249 (long)size);
250
251 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT);
252 if (mm == NULL)
253 return (NULL);
254
255 memset(mm, 0, sizeof(struct mfi_mem));
256 mm->am_size = size;
257
258 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
259 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
260 goto amfree;
261
262 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
263 &nsegs, BUS_DMA_NOWAIT) != 0)
264 goto destroy;
265
266 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
267 BUS_DMA_NOWAIT) != 0)
268 goto free;
269
270 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
271 BUS_DMA_NOWAIT) != 0)
272 goto unmap;
273
274 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
275 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
276
277 memset(mm->am_kva, 0, size);
278 return (mm);
279
280 unmap:
281 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
282 free:
283 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
284 destroy:
285 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
286 amfree:
287 free(mm, M_DEVBUF);
288
289 return (NULL);
290 }
291
292 void
293 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
294 {
295 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
296
297 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
298 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
299 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
300 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
301 free(mm, M_DEVBUF);
302 }
303
304 int
305 mfi_transition_firmware(struct mfi_softc *sc)
306 {
307 int32_t fw_state, cur_state;
308 int max_wait, i;
309
310 fw_state = mfi_read(sc, MFI_OMSG0) & MFI_STATE_MASK;
311
312 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
313 fw_state);
314
315 while (fw_state != MFI_STATE_READY) {
316 DNPRINTF(MFI_D_MISC,
317 "%s: waiting for firmware to become ready\n",
318 DEVNAME(sc));
319 cur_state = fw_state;
320 switch (fw_state) {
321 case MFI_STATE_FAULT:
322 printf("%s: firmware fault\n", DEVNAME(sc));
323 return (1);
324 case MFI_STATE_WAIT_HANDSHAKE:
325 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
326 max_wait = 2;
327 break;
328 case MFI_STATE_OPERATIONAL:
329 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
330 max_wait = 10;
331 break;
332 case MFI_STATE_UNDEFINED:
333 case MFI_STATE_BB_INIT:
334 max_wait = 2;
335 break;
336 case MFI_STATE_FW_INIT:
337 case MFI_STATE_DEVICE_SCAN:
338 case MFI_STATE_FLUSH_CACHE:
339 max_wait = 20;
340 break;
341 default:
342 printf("%s: unknown firmware state %d\n",
343 DEVNAME(sc), fw_state);
344 return (1);
345 }
346 for (i = 0; i < (max_wait * 10); i++) {
347 fw_state = mfi_read(sc, MFI_OMSG0) & MFI_STATE_MASK;
348 if (fw_state == cur_state)
349 DELAY(100000);
350 else
351 break;
352 }
353 if (fw_state == cur_state) {
354 printf("%s: firmware stuck in state %#x\n",
355 DEVNAME(sc), fw_state);
356 return (1);
357 }
358 }
359
360 return (0);
361 }
362
363 int
364 mfi_initialize_firmware(struct mfi_softc *sc)
365 {
366 struct mfi_ccb *ccb;
367 struct mfi_init_frame *init;
368 struct mfi_init_qinfo *qinfo;
369
370 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
371
372 if ((ccb = mfi_get_ccb(sc)) == NULL)
373 return (1);
374
375 init = &ccb->ccb_frame->mfr_init;
376 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
377
378 memset(qinfo, 0, sizeof *qinfo);
379 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
380 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
381 offsetof(struct mfi_prod_cons, mpc_reply_q));
382 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
383 offsetof(struct mfi_prod_cons, mpc_producer));
384 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
385 offsetof(struct mfi_prod_cons, mpc_consumer));
386
387 init->mif_header.mfh_cmd = MFI_CMD_INIT;
388 init->mif_header.mfh_data_len = sizeof *qinfo;
389 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
390
391 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
392 DEVNAME(sc),
393 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
394 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
395
396 if (mfi_poll(ccb)) {
397 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
398 return (1);
399 }
400
401 mfi_put_ccb(ccb);
402
403 return (0);
404 }
405
406 int
407 mfi_get_info(struct mfi_softc *sc)
408 {
409 #ifdef MFI_DEBUG
410 int i;
411 #endif
412 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
413
414 if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
415 sizeof(sc->sc_info), &sc->sc_info, NULL))
416 return (1);
417
418 #ifdef MFI_DEBUG
419
420 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
421 printf("%s: active FW %s Version %s date %s time %s\n",
422 DEVNAME(sc),
423 sc->sc_info.mci_image_component[i].mic_name,
424 sc->sc_info.mci_image_component[i].mic_version,
425 sc->sc_info.mci_image_component[i].mic_build_date,
426 sc->sc_info.mci_image_component[i].mic_build_time);
427 }
428
429 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
430 printf("%s: pending FW %s Version %s date %s time %s\n",
431 DEVNAME(sc),
432 sc->sc_info.mci_pending_image_component[i].mic_name,
433 sc->sc_info.mci_pending_image_component[i].mic_version,
434 sc->sc_info.mci_pending_image_component[i].mic_build_date,
435 sc->sc_info.mci_pending_image_component[i].mic_build_time);
436 }
437
438 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
439 DEVNAME(sc),
440 sc->sc_info.mci_max_arms,
441 sc->sc_info.mci_max_spans,
442 sc->sc_info.mci_max_arrays,
443 sc->sc_info.mci_max_lds,
444 sc->sc_info.mci_product_name);
445
446 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
447 DEVNAME(sc),
448 sc->sc_info.mci_serial_number,
449 sc->sc_info.mci_hw_present,
450 sc->sc_info.mci_current_fw_time,
451 sc->sc_info.mci_max_cmds,
452 sc->sc_info.mci_max_sg_elements);
453
454 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
455 DEVNAME(sc),
456 sc->sc_info.mci_max_request_size,
457 sc->sc_info.mci_lds_present,
458 sc->sc_info.mci_lds_degraded,
459 sc->sc_info.mci_lds_offline,
460 sc->sc_info.mci_pd_present);
461
462 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
463 DEVNAME(sc),
464 sc->sc_info.mci_pd_disks_present,
465 sc->sc_info.mci_pd_disks_pred_failure,
466 sc->sc_info.mci_pd_disks_failed);
467
468 printf("%s: nvram %d mem %d flash %d\n",
469 DEVNAME(sc),
470 sc->sc_info.mci_nvram_size,
471 sc->sc_info.mci_memory_size,
472 sc->sc_info.mci_flash_size);
473
474 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
475 DEVNAME(sc),
476 sc->sc_info.mci_ram_correctable_errors,
477 sc->sc_info.mci_ram_uncorrectable_errors,
478 sc->sc_info.mci_cluster_allowed,
479 sc->sc_info.mci_cluster_active);
480
481 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
482 DEVNAME(sc),
483 sc->sc_info.mci_max_strips_per_io,
484 sc->sc_info.mci_raid_levels,
485 sc->sc_info.mci_adapter_ops,
486 sc->sc_info.mci_ld_ops);
487
488 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
489 DEVNAME(sc),
490 sc->sc_info.mci_stripe_sz_ops.min,
491 sc->sc_info.mci_stripe_sz_ops.max,
492 sc->sc_info.mci_pd_ops,
493 sc->sc_info.mci_pd_mix_support);
494
495 printf("%s: ecc_bucket %d pckg_prop %s\n",
496 DEVNAME(sc),
497 sc->sc_info.mci_ecc_bucket_count,
498 sc->sc_info.mci_package_version);
499
500 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
501 DEVNAME(sc),
502 sc->sc_info.mci_properties.mcp_seq_num,
503 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
504 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
505 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
506
507 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
508 DEVNAME(sc),
509 sc->sc_info.mci_properties.mcp_rebuild_rate,
510 sc->sc_info.mci_properties.mcp_patrol_read_rate,
511 sc->sc_info.mci_properties.mcp_bgi_rate,
512 sc->sc_info.mci_properties.mcp_cc_rate);
513
514 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
515 DEVNAME(sc),
516 sc->sc_info.mci_properties.mcp_recon_rate,
517 sc->sc_info.mci_properties.mcp_cache_flush_interval,
518 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
519 sc->sc_info.mci_properties.mcp_spinup_delay,
520 sc->sc_info.mci_properties.mcp_cluster_enable);
521
522 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
523 DEVNAME(sc),
524 sc->sc_info.mci_properties.mcp_coercion_mode,
525 sc->sc_info.mci_properties.mcp_alarm_enable,
526 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
527 sc->sc_info.mci_properties.mcp_disable_battery_warn,
528 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
529
530 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
531 DEVNAME(sc),
532 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
533 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
534 sc->sc_info.mci_properties.mcp_expose_encl_devices);
535
536 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
537 DEVNAME(sc),
538 sc->sc_info.mci_pci.mip_vendor,
539 sc->sc_info.mci_pci.mip_device,
540 sc->sc_info.mci_pci.mip_subvendor,
541 sc->sc_info.mci_pci.mip_subdevice);
542
543 printf("%s: type %#x port_count %d port_addr ",
544 DEVNAME(sc),
545 sc->sc_info.mci_host.mih_type,
546 sc->sc_info.mci_host.mih_port_count);
547
548 for (i = 0; i < 8; i++)
549 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
550 printf("\n");
551
552 printf("%s: type %.x port_count %d port_addr ",
553 DEVNAME(sc),
554 sc->sc_info.mci_device.mid_type,
555 sc->sc_info.mci_device.mid_port_count);
556
557 for (i = 0; i < 8; i++)
558 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
559 printf("\n");
560 #endif /* MFI_DEBUG */
561
562 return (0);
563 }
564
565 void
566 mfiminphys(struct buf *bp)
567 {
568 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
569
570 /* XXX currently using MFI_MAXFER = MAXPHYS */
571 if (bp->b_bcount > MFI_MAXFER)
572 bp->b_bcount = MFI_MAXFER;
573 minphys(bp);
574 }
575
576 int
577 mfi_attach(struct mfi_softc *sc)
578 {
579 struct scsipi_adapter *adapt = &sc->sc_adapt;
580 struct scsipi_channel *chan = &sc->sc_chan;
581 uint32_t status, frames;
582 int i;
583
584 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
585
586 if (mfi_transition_firmware(sc))
587 return (1);
588
589 TAILQ_INIT(&sc->sc_ccb_freeq);
590
591 status = mfi_read(sc, MFI_OMSG0);
592 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
593 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
594 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
595 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
596
597 /* consumer/producer and reply queue memory */
598 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
599 sizeof(struct mfi_prod_cons));
600 if (sc->sc_pcq == NULL) {
601 aprint_error("%s: unable to allocate reply queue memory\n",
602 DEVNAME(sc));
603 goto nopcq;
604 }
605 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
606 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
608
609 /* frame memory */
610 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
611 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
612 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
613 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
614 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
615 if (sc->sc_frames == NULL) {
616 aprint_error("%s: unable to allocate frame memory\n",
617 DEVNAME(sc));
618 goto noframe;
619 }
620 /* XXX hack, fix this */
621 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
622 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
623 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
624 goto noframe;
625 }
626
627 /* sense memory */
628 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
629 if (sc->sc_sense == NULL) {
630 aprint_error("%s: unable to allocate sense memory\n",
631 DEVNAME(sc));
632 goto nosense;
633 }
634
635 /* now that we have all memory bits go initialize ccbs */
636 if (mfi_init_ccb(sc)) {
637 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
638 goto noinit;
639 }
640
641 /* kickstart firmware with all addresses and pointers */
642 if (mfi_initialize_firmware(sc)) {
643 aprint_error("%s: could not initialize firmware\n",
644 DEVNAME(sc));
645 goto noinit;
646 }
647
648 if (mfi_get_info(sc)) {
649 aprint_error("%s: could not retrieve controller information\n",
650 DEVNAME(sc));
651 goto noinit;
652 }
653
654 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
655 DEVNAME(sc),
656 sc->sc_info.mci_lds_present,
657 sc->sc_info.mci_package_version,
658 sc->sc_info.mci_memory_size);
659
660 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
661 sc->sc_max_ld = sc->sc_ld_cnt;
662 for (i = 0; i < sc->sc_ld_cnt; i++)
663 sc->sc_ld[i].ld_present = 1;
664
665 memset(adapt, 0, sizeof(*adapt));
666 adapt->adapt_dev = &sc->sc_dev;
667 adapt->adapt_nchannels = 1;
668 if (sc->sc_ld_cnt)
669 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
670 else
671 adapt->adapt_openings = sc->sc_max_cmds;
672 adapt->adapt_max_periph = adapt->adapt_openings;
673 adapt->adapt_request = mfi_scsipi_request;
674 adapt->adapt_minphys = mfiminphys;
675 adapt->adapt_ioctl = mfi_scsi_ioctl;
676
677 memset(chan, 0, sizeof(*chan));
678 chan->chan_adapter = adapt;
679 chan->chan_bustype = &scsi_bustype;
680 chan->chan_channel = 0;
681 chan->chan_flags = 0;
682 chan->chan_nluns = 8;
683 chan->chan_ntargets = MFI_MAX_LD;
684 chan->chan_id = MFI_MAX_LD;
685
686 (void) config_found(&sc->sc_dev, &sc->sc_chan, scsiprint);
687
688 /* enable interrupts */
689 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
690
691 #if NBIO > 0
692 if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
693 panic("%s: controller registration failed", DEVNAME(sc));
694 else
695 sc->sc_ioctl = mfi_ioctl;
696 if (mfi_create_sensors(sc) != 0)
697 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
698 #endif /* NBIO > 0 */
699
700 return (0);
701 noinit:
702 mfi_freemem(sc, sc->sc_sense);
703 nosense:
704 mfi_freemem(sc, sc->sc_frames);
705 noframe:
706 mfi_freemem(sc, sc->sc_pcq);
707 nopcq:
708 return (1);
709 }
710
711 int
712 mfi_despatch_cmd(struct mfi_ccb *ccb)
713 {
714 struct mfi_softc *sc = ccb->ccb_sc;
715 DNPRINTF(MFI_D_CMD, "%s: mfi_despatch_cmd\n", DEVNAME(sc));
716
717 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
718 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
719 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
720 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
721 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
722 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
723
724 mfi_write(ccb->ccb_sc, MFI_IQP, htole32((ccb->ccb_pframe >> 3) |
725 ccb->ccb_extra_frames));
726
727 return(0);
728 }
729
730 int
731 mfi_poll(struct mfi_ccb *ccb)
732 {
733 struct mfi_softc *sc = ccb->ccb_sc;
734 struct mfi_frame_header *hdr;
735 int to = 0;
736
737 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
738
739 hdr = &ccb->ccb_frame->mfr_header;
740 hdr->mfh_cmd_status = 0xff;
741 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
742
743 mfi_despatch_cmd(ccb);
744 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
745 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
746 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
747
748 while (hdr->mfh_cmd_status == 0xff) {
749 delay(1000);
750 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
751 break;
752 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
753 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
754 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
755 }
756 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
757 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
758 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
759
760 if (ccb->ccb_data != NULL) {
761 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
762 DEVNAME(sc));
763 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
764 ccb->ccb_dmamap->dm_mapsize,
765 (ccb->ccb_direction & MFI_DATA_IN) ?
766 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
767
768 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
769 }
770
771 if (hdr->mfh_cmd_status == 0xff) {
772 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
773 hdr->mfh_context);
774 ccb->ccb_flags |= MFI_CCB_F_ERR;
775 return (1);
776 }
777
778 return (0);
779 }
780
781 int
782 mfi_intr(void *arg)
783 {
784 struct mfi_softc *sc = arg;
785 struct mfi_prod_cons *pcq;
786 struct mfi_ccb *ccb;
787 uint32_t status, producer, consumer, ctx;
788 int claimed = 0;
789
790 status = mfi_read(sc, MFI_OSTS);
791 if ((status & MFI_OSTS_INTR_VALID) == 0)
792 return (claimed);
793 /* write status back to acknowledge interrupt */
794 mfi_write(sc, MFI_OSTS, status);
795
796 pcq = MFIMEM_KVA(sc->sc_pcq);
797
798 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
799 (u_long)sc, (u_long)pcq);
800
801 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
802 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
803 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
804
805 producer = pcq->mpc_producer;
806 consumer = pcq->mpc_consumer;
807
808 while (consumer != producer) {
809 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
810 DEVNAME(sc), producer, consumer);
811
812 ctx = pcq->mpc_reply_q[consumer];
813 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
814 if (ctx == MFI_INVALID_CTX)
815 printf("%s: invalid context, p: %d c: %d\n",
816 DEVNAME(sc), producer, consumer);
817 else {
818 /* XXX remove from queue and call scsi_done */
819 ccb = &sc->sc_ccb[ctx];
820 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
821 DEVNAME(sc), ctx);
822 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
823 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
824 sc->sc_frames_size,
825 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
826 ccb->ccb_done(ccb);
827
828 claimed = 1;
829 }
830 consumer++;
831 if (consumer == (sc->sc_max_cmds + 1))
832 consumer = 0;
833 }
834
835 pcq->mpc_consumer = consumer;
836 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
837 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
838 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
839
840 return (claimed);
841 }
842
843 int
844 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
845 uint32_t blockcnt)
846 {
847 struct scsipi_periph *periph = xs->xs_periph;
848 struct mfi_io_frame *io;
849
850 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
851 periph->periph_channel->chan_adapter->adapt_dev->dv_xname,
852 periph->periph_target);
853
854 if (!xs->data)
855 return (1);
856
857 io = &ccb->ccb_frame->mfr_io;
858 if (xs->xs_control & XS_CTL_DATA_IN) {
859 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
860 ccb->ccb_direction = MFI_DATA_IN;
861 } else {
862 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
863 ccb->ccb_direction = MFI_DATA_OUT;
864 }
865 io->mif_header.mfh_target_id = periph->periph_target;
866 io->mif_header.mfh_timeout = 0;
867 io->mif_header.mfh_flags = 0;
868 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
869 io->mif_header.mfh_data_len= blockcnt;
870 io->mif_lba_hi = 0;
871 io->mif_lba_lo = blockno;
872 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
873 io->mif_sense_addr_hi = 0;
874
875 ccb->ccb_done = mfi_scsi_xs_done;
876 ccb->ccb_xs = xs;
877 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
878 ccb->ccb_sgl = &io->mif_sgl;
879 ccb->ccb_data = xs->data;
880 ccb->ccb_len = xs->datalen;
881
882 if (mfi_create_sgl(ccb, xs->xs_control & XS_CTL_NOSLEEP) ?
883 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
884 return (1);
885
886 return (0);
887 }
888
889 void
890 mfi_scsi_xs_done(struct mfi_ccb *ccb)
891 {
892 struct scsipi_xfer *xs = ccb->ccb_xs;
893 struct mfi_softc *sc = ccb->ccb_sc;
894 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
895
896 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
897 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
898
899 if (xs->data != NULL) {
900 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
901 DEVNAME(sc));
902 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
903 ccb->ccb_dmamap->dm_mapsize,
904 (xs->xs_control & XS_CTL_DATA_IN) ?
905 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
906
907 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
908 }
909
910 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
911 xs->error = XS_DRIVER_STUFFUP;
912 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
913 DEVNAME(sc), hdr->mfh_cmd_status);
914
915 if (hdr->mfh_scsi_status != 0) {
916 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
917 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
918 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
919 DNPRINTF(MFI_D_INTR,
920 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
921 DEVNAME(sc), hdr->mfh_scsi_status,
922 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
923 memset(&xs->sense, 0, sizeof(xs->sense));
924 memcpy(&xs->sense, ccb->ccb_sense,
925 sizeof(struct scsi_sense_data));
926 xs->error = XS_SENSE;
927 }
928 } else {
929 xs->error = XS_NOERROR;
930 xs->status = SCSI_OK;
931 xs->resid = 0;
932 }
933
934 mfi_put_ccb(ccb);
935 scsipi_done(xs);
936 }
937
938 int
939 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
940 {
941 struct mfi_pass_frame *pf;
942 struct scsipi_periph *periph = xs->xs_periph;
943
944 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
945 periph->periph_channel->chan_adapter->adapt_dev->dv_xname,
946 periph->periph_target);
947
948 pf = &ccb->ccb_frame->mfr_pass;
949 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
950 pf->mpf_header.mfh_target_id = periph->periph_target;
951 pf->mpf_header.mfh_lun_id = 0;
952 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
953 pf->mpf_header.mfh_timeout = 0;
954 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
955 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
956
957 pf->mpf_sense_addr_hi = 0;
958 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
959
960 memset(pf->mpf_cdb, 0, 16);
961 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
962
963 ccb->ccb_done = mfi_scsi_xs_done;
964 ccb->ccb_xs = xs;
965 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
966 ccb->ccb_sgl = &pf->mpf_sgl;
967
968 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
969 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
970 MFI_DATA_IN : MFI_DATA_OUT;
971 else
972 ccb->ccb_direction = MFI_DATA_NONE;
973
974 if (xs->data) {
975 ccb->ccb_data = xs->data;
976 ccb->ccb_len = xs->datalen;
977
978 if (mfi_create_sgl(ccb, xs->xs_control & XS_CTL_NOSLEEP) ?
979 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
980 return (1);
981 }
982
983 return (0);
984 }
985
986 void
987 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
988 void *arg)
989 {
990 struct scsipi_periph *periph;
991 struct scsipi_xfer *xs;
992 struct scsipi_adapter *adapt = chan->chan_adapter;
993 struct mfi_softc *sc = (void *) adapt->adapt_dev;
994 struct mfi_ccb *ccb;
995 struct scsi_rw_6 *rw;
996 struct scsipi_rw_10 *rwb;
997 uint32_t blockno, blockcnt;
998 uint8_t target;
999 uint8_t mbox[MFI_MBOX_SIZE];
1000 int s;
1001
1002 switch (req) {
1003 case ADAPTER_REQ_GROW_RESOURCES:
1004 /* Not supported. */
1005 return;
1006 case ADAPTER_REQ_SET_XFER_MODE:
1007 /* Not supported. */
1008 return;
1009 case ADAPTER_REQ_RUN_XFER:
1010 break;
1011 }
1012
1013 xs = arg;
1014
1015 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1016 DEVNAME(sc), req, xs->cmd->opcode);
1017
1018 periph = xs->xs_periph;
1019 target = periph->periph_target;
1020
1021 s = splbio();
1022 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1023 periph->periph_lun != 0) {
1024 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1025 DEVNAME(sc), target);
1026 xs->error = XS_SELTIMEOUT;
1027 scsipi_done(xs);
1028 splx(s);
1029 return;
1030 }
1031
1032 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1033 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1034 xs->error = XS_RESOURCE_SHORTAGE;
1035 scsipi_done(xs);
1036 splx(s);
1037 return;
1038 }
1039
1040 switch (xs->cmd->opcode) {
1041 /* IO path */
1042 case READ_10:
1043 case WRITE_10:
1044 rwb = (struct scsipi_rw_10 *)xs->cmd;
1045 blockno = _4btol(rwb->addr);
1046 blockcnt = _2btol(rwb->length);
1047 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1048 mfi_put_ccb(ccb);
1049 goto stuffup;
1050 }
1051 break;
1052
1053 case SCSI_READ_6_COMMAND:
1054 case SCSI_WRITE_6_COMMAND:
1055 rw = (struct scsi_rw_6 *)xs->cmd;
1056 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1057 blockcnt = rw->length ? rw->length : 0x100;
1058 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1059 mfi_put_ccb(ccb);
1060 goto stuffup;
1061 }
1062 break;
1063
1064 case SCSI_SYNCHRONIZE_CACHE_10:
1065 mfi_put_ccb(ccb); /* we don't need this */
1066
1067 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1068 if (mfi_mgmt(sc, MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE,
1069 0, NULL, mbox))
1070 goto stuffup;
1071 xs->error = XS_NOERROR;
1072 xs->status = SCSI_OK;
1073 xs->resid = 0;
1074 scsipi_done(xs);
1075 splx(s);
1076 return;
1077 /* NOTREACHED */
1078
1079 /* hand it of to the firmware and let it deal with it */
1080 case SCSI_TEST_UNIT_READY:
1081 /* save off sd? after autoconf */
1082 if (!cold) /* XXX bogus */
1083 strlcpy(sc->sc_ld[target].ld_dev, sc->sc_dev.dv_xname,
1084 sizeof(sc->sc_ld[target].ld_dev));
1085 /* FALLTHROUGH */
1086
1087 default:
1088 if (mfi_scsi_ld(ccb, xs)) {
1089 mfi_put_ccb(ccb);
1090 goto stuffup;
1091 }
1092 break;
1093 }
1094
1095 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1096
1097 if (xs->xs_control & XS_CTL_POLL) {
1098 if (mfi_poll(ccb)) {
1099 /* XXX check for sense in ccb->ccb_sense? */
1100 printf("%s: mfi_scsipi_request poll failed\n",
1101 DEVNAME(sc));
1102 mfi_put_ccb(ccb);
1103 bzero(&xs->sense, sizeof(xs->sense));
1104 xs->sense.scsi_sense.response_code =
1105 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1106 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1107 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1108 xs->error = XS_SENSE;
1109 xs->status = SCSI_CHECK;
1110 } else {
1111 DNPRINTF(MFI_D_DMA,
1112 "%s: mfi_scsipi_request poll complete %d\n",
1113 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1114 xs->error = XS_NOERROR;
1115 xs->status = SCSI_OK;
1116 xs->resid = 0;
1117 }
1118 mfi_put_ccb(ccb);
1119 scsipi_done(xs);
1120 splx(s);
1121 return;
1122 }
1123
1124 mfi_despatch_cmd(ccb);
1125
1126 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1127 ccb->ccb_dmamap->dm_nsegs);
1128
1129 splx(s);
1130 return;
1131
1132 stuffup:
1133 xs->error = XS_DRIVER_STUFFUP;
1134 scsipi_done(xs);
1135 splx(s);
1136 }
1137
1138 int
1139 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1140 {
1141 struct mfi_softc *sc = ccb->ccb_sc;
1142 struct mfi_frame_header *hdr;
1143 bus_dma_segment_t *sgd;
1144 union mfi_sgl *sgl;
1145 int error, i;
1146
1147 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1148 (u_long)ccb->ccb_data);
1149
1150 if (!ccb->ccb_data)
1151 return (1);
1152
1153 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1154 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1155 if (error) {
1156 if (error == EFBIG)
1157 printf("more than %d dma segs\n",
1158 sc->sc_max_sgl);
1159 else
1160 printf("error %d loading dma map\n", error);
1161 return (1);
1162 }
1163
1164 hdr = &ccb->ccb_frame->mfr_header;
1165 sgl = ccb->ccb_sgl;
1166 sgd = ccb->ccb_dmamap->dm_segs;
1167 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1168 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1169 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1170 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1171 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1172 }
1173
1174 if (ccb->ccb_direction == MFI_DATA_IN) {
1175 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1176 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1177 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1178 } else {
1179 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1180 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1181 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1182 }
1183
1184 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1185 /* for 64 bit io make the sizeof a variable to hold whatever sg size */
1186 ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1187 ccb->ccb_dmamap->dm_nsegs;
1188 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1189
1190 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1191 " dm_nsegs: %d extra_frames: %d\n",
1192 DEVNAME(sc),
1193 hdr->mfh_sg_count,
1194 ccb->ccb_frame_size,
1195 sc->sc_frames_size,
1196 ccb->ccb_dmamap->dm_nsegs,
1197 ccb->ccb_extra_frames);
1198
1199 return (0);
1200 }
1201
1202 int
1203 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1204 void *buf, uint8_t *mbox)
1205 {
1206 struct mfi_ccb *ccb;
1207 struct mfi_dcmd_frame *dcmd;
1208 int rv = 1;
1209
1210 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(sc), opc);
1211
1212 if ((ccb = mfi_get_ccb(sc)) == NULL)
1213 return (rv);
1214
1215 dcmd = &ccb->ccb_frame->mfr_dcmd;
1216 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1217 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1218 dcmd->mdf_header.mfh_timeout = 0;
1219
1220 dcmd->mdf_opcode = opc;
1221 dcmd->mdf_header.mfh_data_len = 0;
1222 ccb->ccb_direction = dir;
1223 ccb->ccb_done = mfi_mgmt_done;
1224
1225 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1226
1227 /* handle special opcodes */
1228 if (mbox)
1229 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1230
1231 if (dir != MFI_DATA_NONE) {
1232 dcmd->mdf_header.mfh_data_len = len;
1233 ccb->ccb_data = buf;
1234 ccb->ccb_len = len;
1235 ccb->ccb_sgl = &dcmd->mdf_sgl;
1236
1237 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1238 goto done;
1239 }
1240
1241 if (cold) {
1242 if (mfi_poll(ccb))
1243 goto done;
1244 } else {
1245 mfi_despatch_cmd(ccb);
1246
1247 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt sleeping\n", DEVNAME(sc));
1248 while (ccb->ccb_state != MFI_CCB_DONE)
1249 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1250
1251 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1252 goto done;
1253 }
1254
1255 rv = 0;
1256
1257 done:
1258 mfi_put_ccb(ccb);
1259 return (rv);
1260 }
1261
1262 void
1263 mfi_mgmt_done(struct mfi_ccb *ccb)
1264 {
1265 struct mfi_softc *sc = ccb->ccb_sc;
1266 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1267
1268 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1269 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1270
1271 if (ccb->ccb_data != NULL) {
1272 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1273 DEVNAME(sc));
1274 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1275 ccb->ccb_dmamap->dm_mapsize,
1276 (ccb->ccb_direction & MFI_DATA_IN) ?
1277 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1278
1279 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1280 }
1281
1282 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1283 ccb->ccb_flags |= MFI_CCB_F_ERR;
1284
1285 ccb->ccb_state = MFI_CCB_DONE;
1286
1287 wakeup(ccb);
1288 }
1289
1290
1291 int
1292 mfi_scsi_ioctl(struct scsipi_channel *chan, u_long cmd, caddr_t arg,
1293 int flag, struct proc *p)
1294 {
1295 return (ENOTTY);
1296 }
1297
1298 #if NBIO > 0
1299 int
1300 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1301 {
1302 struct mfi_softc *sc = (struct mfi_softc *)dev;
1303 int error = 0;
1304
1305 int s = splbio();
1306 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1307
1308 switch (cmd) {
1309 case BIOCINQ:
1310 DNPRINTF(MFI_D_IOCTL, "inq\n");
1311 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1312 break;
1313
1314 case BIOCVOL:
1315 DNPRINTF(MFI_D_IOCTL, "vol\n");
1316 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1317 break;
1318
1319 case BIOCDISK:
1320 DNPRINTF(MFI_D_IOCTL, "disk\n");
1321 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1322 break;
1323
1324 case BIOCALARM:
1325 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1326 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1327 break;
1328
1329 case BIOCBLINK:
1330 DNPRINTF(MFI_D_IOCTL, "blink\n");
1331 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1332 break;
1333
1334 case BIOCSETSTATE:
1335 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1336 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1337 break;
1338
1339 default:
1340 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1341 error = EINVAL;
1342 }
1343 splx(s);
1344 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1345 return (error);
1346 }
1347
1348 int
1349 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1350 {
1351 struct mfi_conf *cfg;
1352 int rv = EINVAL;
1353
1354 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1355
1356 if (mfi_get_info(sc)) {
1357 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1358 DEVNAME(sc));
1359 return (EIO);
1360 }
1361
1362 /* get figures */
1363 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1364 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1365 goto freeme;
1366
1367 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1368 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1369 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1370
1371 rv = 0;
1372 freeme:
1373 free(cfg, M_DEVBUF);
1374 return (rv);
1375 }
1376
1377 int
1378 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1379 {
1380 int i, per, rv = EINVAL;
1381 uint8_t mbox[MFI_MBOX_SIZE];
1382
1383 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1384 DEVNAME(sc), bv->bv_volid);
1385
1386 if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1387 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1388 goto done;
1389
1390 i = bv->bv_volid;
1391 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1392 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1393 DEVNAME(sc), mbox[0]);
1394
1395 if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1396 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1397 goto done;
1398
1399 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1400 /* go do hotspares */
1401 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1402 goto done;
1403 }
1404
1405 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1406
1407 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1408 case MFI_LD_OFFLINE:
1409 bv->bv_status = BIOC_SVOFFLINE;
1410 break;
1411
1412 case MFI_LD_PART_DEGRADED:
1413 case MFI_LD_DEGRADED:
1414 bv->bv_status = BIOC_SVDEGRADED;
1415 break;
1416
1417 case MFI_LD_ONLINE:
1418 bv->bv_status = BIOC_SVONLINE;
1419 break;
1420
1421 default:
1422 bv->bv_status = BIOC_SVINVALID;
1423 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1424 DEVNAME(sc),
1425 sc->sc_ld_list.mll_list[i].mll_state);
1426 }
1427
1428 /* additional status can modify MFI status */
1429 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1430 case MFI_LD_PROG_CC:
1431 case MFI_LD_PROG_BGI:
1432 bv->bv_status = BIOC_SVSCRUB;
1433 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1434 bv->bv_percent = (per * 100) / 0xffff;
1435 bv->bv_seconds =
1436 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1437 break;
1438
1439 case MFI_LD_PROG_FGI:
1440 case MFI_LD_PROG_RECONSTRUCT:
1441 /* nothing yet */
1442 break;
1443 }
1444
1445 /*
1446 * The RAID levels are determined per the SNIA DDF spec, this is only
1447 * a subset that is valid for the MFI contrller.
1448 */
1449 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1450 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1451 MFI_DDF_SRL_SPANNED)
1452 bv->bv_level *= 10;
1453
1454 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1455 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1456
1457 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1458
1459 rv = 0;
1460 done:
1461 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1462 DEVNAME(sc), rv);
1463 return (rv);
1464 }
1465
1466 int
1467 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1468 {
1469 struct mfi_conf *cfg;
1470 struct mfi_array *ar;
1471 struct mfi_ld_cfg *ld;
1472 struct mfi_pd_details *pd;
1473 struct scsipi_inquiry_data *inqbuf;
1474 char vend[8+16+4+1];
1475 int i, rv = EINVAL;
1476 int arr, vol, disk;
1477 uint32_t size;
1478 uint8_t mbox[MFI_MBOX_SIZE];
1479
1480 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1481 DEVNAME(sc), bd->bd_diskid);
1482
1483 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1484
1485 /* send single element command to retrieve size for full structure */
1486 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1487 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1488 goto freeme;
1489
1490 size = cfg->mfc_size;
1491 free(cfg, M_DEVBUF);
1492
1493 /* memory for read config */
1494 cfg = malloc(size, M_DEVBUF, M_WAITOK);
1495 memset(cfg, 0, size);
1496 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1497 goto freeme;
1498
1499 ar = cfg->mfc_array;
1500
1501 /* calculate offset to ld structure */
1502 ld = (struct mfi_ld_cfg *)(
1503 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1504 cfg->mfc_array_size * cfg->mfc_no_array);
1505
1506 vol = bd->bd_volid;
1507
1508 if (vol >= cfg->mfc_no_ld) {
1509 /* do hotspares */
1510 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1511 goto freeme;
1512 }
1513
1514 /* find corresponding array for ld */
1515 for (i = 0, arr = 0; i < vol; i++)
1516 arr += ld[i].mlc_parm.mpa_span_depth;
1517
1518 /* offset disk into pd list */
1519 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1520
1521 /* offset array index into the next spans */
1522 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1523
1524 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1525 switch (ar[arr].pd[disk].mar_pd_state){
1526 case MFI_PD_UNCONFIG_GOOD:
1527 bd->bd_status = BIOC_SDUNUSED;
1528 break;
1529
1530 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1531 bd->bd_status = BIOC_SDHOTSPARE;
1532 break;
1533
1534 case MFI_PD_OFFLINE:
1535 bd->bd_status = BIOC_SDOFFLINE;
1536 break;
1537
1538 case MFI_PD_FAILED:
1539 bd->bd_status = BIOC_SDFAILED;
1540 break;
1541
1542 case MFI_PD_REBUILD:
1543 bd->bd_status = BIOC_SDREBUILD;
1544 break;
1545
1546 case MFI_PD_ONLINE:
1547 bd->bd_status = BIOC_SDONLINE;
1548 break;
1549
1550 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1551 default:
1552 bd->bd_status = BIOC_SDINVALID;
1553 break;
1554
1555 }
1556
1557 /* get the remaining fields */
1558 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1559 memset(pd, 0, sizeof(*pd));
1560 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1561 sizeof *pd, pd, mbox))
1562 goto freeme;
1563
1564 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1565
1566 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1567 bd->bd_channel = pd->mpd_enc_idx;
1568
1569 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1570 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1571 vend[sizeof vend - 1] = '\0';
1572 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1573
1574 /* XXX find a way to retrieve serial nr from drive */
1575 /* XXX find a way to get bd_procdev */
1576
1577 rv = 0;
1578 freeme:
1579 free(pd, M_DEVBUF);
1580 free(cfg, M_DEVBUF);
1581
1582 return (rv);
1583 }
1584
1585 int
1586 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1587 {
1588 uint32_t opc, dir = MFI_DATA_NONE;
1589 int rv = 0;
1590 int8_t ret;
1591
1592 switch(ba->ba_opcode) {
1593 case BIOC_SADISABLE:
1594 opc = MR_DCMD_SPEAKER_DISABLE;
1595 break;
1596
1597 case BIOC_SAENABLE:
1598 opc = MR_DCMD_SPEAKER_ENABLE;
1599 break;
1600
1601 case BIOC_SASILENCE:
1602 opc = MR_DCMD_SPEAKER_SILENCE;
1603 break;
1604
1605 case BIOC_GASTATUS:
1606 opc = MR_DCMD_SPEAKER_GET;
1607 dir = MFI_DATA_IN;
1608 break;
1609
1610 case BIOC_SATEST:
1611 opc = MR_DCMD_SPEAKER_TEST;
1612 break;
1613
1614 default:
1615 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1616 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1617 return (EINVAL);
1618 }
1619
1620 if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1621 rv = EINVAL;
1622 else
1623 if (ba->ba_opcode == BIOC_GASTATUS)
1624 ba->ba_status = ret;
1625 else
1626 ba->ba_status = 0;
1627
1628 return (rv);
1629 }
1630
1631 int
1632 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1633 {
1634 int i, found, rv = EINVAL;
1635 uint8_t mbox[MFI_MBOX_SIZE];
1636 uint32_t cmd;
1637 struct mfi_pd_list *pd;
1638
1639 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1640 bb->bb_status);
1641
1642 /* channel 0 means not in an enclosure so can't be blinked */
1643 if (bb->bb_channel == 0)
1644 return (EINVAL);
1645
1646 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1647
1648 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1649 MFI_PD_LIST_SIZE, pd, NULL))
1650 goto done;
1651
1652 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1653 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1654 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1655 found = 1;
1656 break;
1657 }
1658
1659 if (!found)
1660 goto done;
1661
1662 memset(mbox, 0, sizeof mbox);
1663
1664 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1665
1666 switch (bb->bb_status) {
1667 case BIOC_SBUNBLINK:
1668 cmd = MR_DCMD_PD_UNBLINK;
1669 break;
1670
1671 case BIOC_SBBLINK:
1672 cmd = MR_DCMD_PD_BLINK;
1673 break;
1674
1675 case BIOC_SBALARM:
1676 default:
1677 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1678 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1679 goto done;
1680 }
1681
1682
1683 if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1684 goto done;
1685
1686 rv = 0;
1687 done:
1688 free(pd, M_DEVBUF);
1689 return (rv);
1690 }
1691
1692 int
1693 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1694 {
1695 struct mfi_pd_list *pd;
1696 int i, found, rv = EINVAL;
1697 uint8_t mbox[MFI_MBOX_SIZE];
1698 uint32_t cmd;
1699
1700 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1701 bs->bs_status);
1702
1703 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1704
1705 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1706 MFI_PD_LIST_SIZE, pd, NULL))
1707 goto done;
1708
1709 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1710 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1711 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1712 found = 1;
1713 break;
1714 }
1715
1716 if (!found)
1717 goto done;
1718
1719 memset(mbox, 0, sizeof mbox);
1720
1721 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1722
1723 switch (bs->bs_status) {
1724 case BIOC_SSONLINE:
1725 mbox[2] = MFI_PD_ONLINE;
1726 cmd = MD_DCMD_PD_SET_STATE;
1727 break;
1728
1729 case BIOC_SSOFFLINE:
1730 mbox[2] = MFI_PD_OFFLINE;
1731 cmd = MD_DCMD_PD_SET_STATE;
1732 break;
1733
1734 case BIOC_SSHOTSPARE:
1735 mbox[2] = MFI_PD_HOTSPARE;
1736 cmd = MD_DCMD_PD_SET_STATE;
1737 break;
1738 /*
1739 case BIOC_SSREBUILD:
1740 cmd = MD_DCMD_PD_REBUILD;
1741 break;
1742 */
1743 default:
1744 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1745 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1746 goto done;
1747 }
1748
1749
1750 if (mfi_mgmt(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, mbox))
1751 goto done;
1752
1753 rv = 0;
1754 done:
1755 free(pd, M_DEVBUF);
1756 return (rv);
1757 }
1758
1759 int
1760 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1761 {
1762 struct mfi_conf *cfg;
1763 struct mfi_hotspare *hs;
1764 struct mfi_pd_details *pd;
1765 struct bioc_disk *sdhs;
1766 struct bioc_vol *vdhs;
1767 struct scsipi_inquiry_data *inqbuf;
1768 char vend[8+16+4+1];
1769 int i, rv = EINVAL;
1770 uint32_t size;
1771 uint8_t mbox[MFI_MBOX_SIZE];
1772
1773 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1774
1775 if (!bio_hs)
1776 return (EINVAL);
1777
1778 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1779
1780 /* send single element command to retrieve size for full structure */
1781 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1782 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1783 goto freeme;
1784
1785 size = cfg->mfc_size;
1786 free(cfg, M_DEVBUF);
1787
1788 /* memory for read config */
1789 cfg = malloc(size, M_DEVBUF, M_WAITOK);
1790 memset(cfg, 0, size);
1791 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1792 goto freeme;
1793
1794 /* calculate offset to hs structure */
1795 hs = (struct mfi_hotspare *)(
1796 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1797 cfg->mfc_array_size * cfg->mfc_no_array +
1798 cfg->mfc_ld_size * cfg->mfc_no_ld);
1799
1800 if (volid < cfg->mfc_no_ld)
1801 goto freeme; /* not a hotspare */
1802
1803 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1804 goto freeme; /* not a hotspare */
1805
1806 /* offset into hotspare structure */
1807 i = volid - cfg->mfc_no_ld;
1808
1809 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1810 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1811 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1812
1813 /* get pd fields */
1814 memset(mbox, 0, sizeof mbox);
1815 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1816 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1817 sizeof *pd, pd, mbox)) {
1818 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1819 DEVNAME(sc));
1820 goto freeme;
1821 }
1822
1823 switch (type) {
1824 case MFI_MGMT_VD:
1825 vdhs = bio_hs;
1826 vdhs->bv_status = BIOC_SVONLINE;
1827 vdhs->bv_size = pd->mpd_size / 2; /* XXX why? / 2 */
1828 vdhs->bv_level = -1; /* hotspare */
1829 vdhs->bv_nodisk = 1;
1830 break;
1831
1832 case MFI_MGMT_SD:
1833 sdhs = bio_hs;
1834 sdhs->bd_status = BIOC_SDHOTSPARE;
1835 sdhs->bd_size = pd->mpd_size / 2; /* XXX why? / 2 */
1836 sdhs->bd_channel = pd->mpd_enc_idx;
1837 sdhs->bd_target = pd->mpd_enc_slot;
1838 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1839 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
1840 vend[sizeof vend - 1] = '\0';
1841 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1842 break;
1843
1844 default:
1845 goto freeme;
1846 }
1847
1848 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1849 rv = 0;
1850 freeme:
1851 free(pd, M_DEVBUF);
1852 free(cfg, M_DEVBUF);
1853
1854 return (rv);
1855 }
1856
1857 int
1858 mfi_create_sensors(struct mfi_softc *sc)
1859 {
1860 int i;
1861 struct envsys_range env_ranges[2];
1862 int nsensors = sc->sc_ld_cnt;
1863
1864 env_ranges[0].low = 0;
1865 env_ranges[0].high = nsensors;
1866 env_ranges[0].units = ENVSYS_DRIVE;
1867 env_ranges[1].low = 1;
1868 env_ranges[1].high = 0;
1869 env_ranges[1].units = 0;
1870
1871 sc->sc_sensor_data =
1872 malloc(sizeof(struct envsys_tre_data) * nsensors,
1873 M_DEVBUF, M_NOWAIT | M_ZERO);
1874 if (sc->sc_sensor_data == NULL) {
1875 aprint_error("%s: can't allocate envsys_tre_data\n",
1876 DEVNAME(sc));
1877 return(ENOMEM);
1878 }
1879 sc->sc_sensor_info =
1880 malloc(sizeof(struct envsys_basic_info) * nsensors,
1881 M_DEVBUF, M_NOWAIT | M_ZERO);
1882 if (sc->sc_sensor_info == NULL) {
1883 aprint_error("%s: can't allocate envsys_basic_info\n",
1884 DEVNAME(sc));
1885 return(ENOMEM);
1886 }
1887 for (i = 0; i < nsensors; i++) {
1888 sc->sc_sensor_data[i].sensor = i;
1889 sc->sc_sensor_data[i].units = ENVSYS_DRIVE;
1890 sc->sc_sensor_data[i].validflags = ENVSYS_FVALID;
1891 sc->sc_sensor_data[i].warnflags = ENVSYS_WARN_OK;
1892 sc->sc_sensor_info[i].sensor = i;
1893 sc->sc_sensor_info[i].units = ENVSYS_DRIVE;
1894 sc->sc_sensor_info[i].validflags = ENVSYS_FVALID;
1895 /* logical drives */
1896 snprintf(sc->sc_sensor_info[i].desc,
1897 sizeof(sc->sc_sensor_info[i].desc), "%s:%d",
1898 DEVNAME(sc), i);
1899 }
1900 sc->sc_ranges = env_ranges;
1901 sc->sc_envsys.sme_cookie = sc;
1902 sc->sc_envsys.sme_gtredata = mfi_sensor_gtredata;
1903 sc->sc_envsys.sme_streinfo = mfi_sensor_streinfo;
1904 sc->sc_envsys.sme_nsensors = sc->sc_ld_cnt;
1905 sc->sc_envsys.sme_envsys_version = 1000;
1906 if (sysmon_envsys_register(&sc->sc_envsys)) {
1907 printf("%s: unable to register with sysmon\n", DEVNAME(sc));
1908 return(1);
1909 }
1910 return (0);
1911 }
1912
1913 int
1914 mfi_sensor_gtredata(struct sysmon_envsys *sme, struct envsys_tre_data *tred)
1915 {
1916 struct mfi_softc *sc = sme->sme_cookie;
1917 struct bioc_vol bv;
1918 int s;
1919
1920 if (tred->sensor >= sc->sc_ld_cnt || tred->sensor < 0)
1921 return EINVAL;
1922
1923 bzero(&bv, sizeof(bv));
1924 bv.bv_volid = tred->sensor;
1925 s = splbio();
1926 if (mfi_ioctl_vol(sc, &bv)) {
1927 splx(s);
1928 return EIO;
1929 }
1930 splx(s);
1931
1932 switch(bv.bv_status) {
1933 case BIOC_SVOFFLINE:
1934 tred->cur.data_us = ENVSYS_DRIVE_FAIL;
1935 tred->warnflags = ENVSYS_WARN_CRITOVER;
1936 break;
1937
1938 case BIOC_SVDEGRADED:
1939 tred->cur.data_us = ENVSYS_DRIVE_PFAIL;
1940 tred->warnflags = ENVSYS_WARN_OVER;
1941 break;
1942
1943 case BIOC_SVSCRUB:
1944 case BIOC_SVONLINE:
1945 tred->cur.data_us = ENVSYS_DRIVE_ONLINE;
1946 tred->warnflags = ENVSYS_WARN_OK;
1947 break;
1948
1949 case BIOC_SVINVALID:
1950 /* FALLTRHOUGH */
1951 default:
1952 tred->cur.data_us = 0; /* unknown */
1953 tred->warnflags = ENVSYS_WARN_CRITOVER;
1954 }
1955 tred->validflags = ENVSYS_FVALID | ENVSYS_FCURVALID;
1956 tred->units = ENVSYS_DRIVE;
1957 return 0;
1958 }
1959
1960 int
1961 mfi_sensor_streinfo(struct sysmon_envsys *sme, struct envsys_basic_info *binfo)
1962 {
1963 binfo->validflags = 0;
1964 return 0;
1965 }
1966 #endif /* NBIO > 0 */
Cache object: a3fc3527af4687c38830a0bd5affeed4
|