FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/mpi.c
1 /* $OpenBSD: mpi.c,v 1.224 2022/04/16 19:19:59 naddy Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5 * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "bio.h"
21
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/mutex.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/task.h>
33
34 #include <machine/bus.h>
35
36 #include <scsi/scsi_all.h>
37 #include <scsi/scsiconf.h>
38
39 #include <dev/biovar.h>
40 #include <dev/ic/mpireg.h>
41 #include <dev/ic/mpivar.h>
42
43 #ifdef MPI_DEBUG
44 uint32_t mpi_debug = 0
45 /* | MPI_D_CMD */
46 /* | MPI_D_INTR */
47 /* | MPI_D_MISC */
48 /* | MPI_D_DMA */
49 /* | MPI_D_IOCTL */
50 /* | MPI_D_RW */
51 /* | MPI_D_MEM */
52 /* | MPI_D_CCB */
53 /* | MPI_D_PPR */
54 /* | MPI_D_RAID */
55 /* | MPI_D_EVT */
56 ;
57 #endif
58
59 struct cfdriver mpi_cd = {
60 NULL,
61 "mpi",
62 DV_DULL
63 };
64
65 void mpi_scsi_cmd(struct scsi_xfer *);
66 void mpi_scsi_cmd_done(struct mpi_ccb *);
67 int mpi_scsi_probe(struct scsi_link *);
68 int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
69 int);
70
71 const struct scsi_adapter mpi_switch = {
72 mpi_scsi_cmd, NULL, mpi_scsi_probe, NULL, mpi_scsi_ioctl
73 };
74
75 struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t);
76 void mpi_dmamem_free(struct mpi_softc *,
77 struct mpi_dmamem *);
78 int mpi_alloc_ccbs(struct mpi_softc *);
79 void *mpi_get_ccb(void *);
80 void mpi_put_ccb(void *, void *);
81 int mpi_alloc_replies(struct mpi_softc *);
82 void mpi_push_replies(struct mpi_softc *);
83 void mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
84
85 void mpi_start(struct mpi_softc *, struct mpi_ccb *);
86 int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
87 void mpi_poll_done(struct mpi_ccb *);
88 void mpi_reply(struct mpi_softc *, u_int32_t);
89
90 void mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
91 void mpi_wait_done(struct mpi_ccb *);
92
93 int mpi_cfg_spi_port(struct mpi_softc *);
94 void mpi_squash_ppr(struct mpi_softc *);
95 void mpi_run_ppr(struct mpi_softc *);
96 int mpi_ppr(struct mpi_softc *, struct scsi_link *,
97 struct mpi_cfg_raid_physdisk *, int, int, int);
98 int mpi_inq(struct mpi_softc *, u_int16_t, int);
99
100 int mpi_cfg_sas(struct mpi_softc *);
101 int mpi_cfg_fc(struct mpi_softc *);
102
103 void mpi_timeout_xs(void *);
104 int mpi_load_xs(struct mpi_ccb *);
105
106 u_int32_t mpi_read(struct mpi_softc *, bus_size_t);
107 void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
108 int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
109 u_int32_t);
110 int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
111 u_int32_t);
112
113 int mpi_init(struct mpi_softc *);
114 int mpi_reset_soft(struct mpi_softc *);
115 int mpi_reset_hard(struct mpi_softc *);
116
117 int mpi_handshake_send(struct mpi_softc *, void *, size_t);
118 int mpi_handshake_recv_dword(struct mpi_softc *,
119 u_int32_t *);
120 int mpi_handshake_recv(struct mpi_softc *, void *, size_t);
121
122 void mpi_empty_done(struct mpi_ccb *);
123
124 int mpi_iocinit(struct mpi_softc *);
125 int mpi_iocfacts(struct mpi_softc *);
126 int mpi_portfacts(struct mpi_softc *);
127 int mpi_portenable(struct mpi_softc *);
128 int mpi_cfg_coalescing(struct mpi_softc *);
129 void mpi_get_raid(struct mpi_softc *);
130 int mpi_fwupload(struct mpi_softc *);
131 int mpi_manufacturing(struct mpi_softc *);
132 int mpi_scsi_probe_virtual(struct scsi_link *);
133
134 int mpi_eventnotify(struct mpi_softc *);
135 void mpi_eventnotify_done(struct mpi_ccb *);
136 void mpi_eventnotify_free(struct mpi_softc *,
137 struct mpi_rcb *);
138 void mpi_eventack(void *, void *);
139 void mpi_eventack_done(struct mpi_ccb *);
140 int mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
141 void mpi_evt_sas_detach(void *, void *);
142 void mpi_evt_sas_detach_done(struct mpi_ccb *);
143 void mpi_fc_rescan(void *);
144
145 int mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
146 u_int8_t, u_int32_t, int, void *);
147 int mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
148 void *, int, void *, size_t);
149
150 int mpi_ioctl_cache(struct scsi_link *, u_long,
151 struct dk_cache *);
152
153 #if NBIO > 0
154 int mpi_bio_get_pg0_raid(struct mpi_softc *, int);
155 int mpi_ioctl(struct device *, u_long, caddr_t);
156 int mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
157 int mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
158 int mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
159 int mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
160 #ifndef SMALL_KERNEL
161 int mpi_create_sensors(struct mpi_softc *);
162 void mpi_refresh_sensors(void *);
163 #endif /* SMALL_KERNEL */
164 #endif /* NBIO > 0 */
165
166 #define DEVNAME(s) ((s)->sc_dev.dv_xname)
167
168 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t))
169
170 #define mpi_read_db(s) mpi_read((s), MPI_DOORBELL)
171 #define mpi_write_db(s, v) mpi_write((s), MPI_DOORBELL, (v))
172 #define mpi_read_intr(s) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
173 MPI_INTR_STATUS)
174 #define mpi_write_intr(s, v) mpi_write((s), MPI_INTR_STATUS, (v))
175 #define mpi_pop_reply(s) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
176 MPI_REPLY_QUEUE)
177 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
178 MPI_REPLY_QUEUE, (v))
179
180 #define mpi_wait_db_int(s) mpi_wait_ne((s), MPI_INTR_STATUS, \
181 MPI_INTR_STATUS_DOORBELL, 0)
182 #define mpi_wait_db_ack(s) mpi_wait_eq((s), MPI_INTR_STATUS, \
183 MPI_INTR_STATUS_IOCDOORBELL, 0)
184
185 #define MPI_PG_EXTENDED (1<<0)
186 #define MPI_PG_POLL (1<<1)
187 #define MPI_PG_FMT "\020" "\002POLL" "\001EXTENDED"
188
189 #define mpi_cfg_header(_s, _t, _n, _a, _h) \
190 mpi_req_cfg_header((_s), (_t), (_n), (_a), \
191 MPI_PG_POLL, (_h))
192 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \
193 mpi_req_cfg_header((_s), (_t), (_n), (_a), \
194 MPI_PG_POLL|MPI_PG_EXTENDED, (_h))
195
196 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \
197 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \
198 (_h), (_r), (_p), (_l))
199 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \
200 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \
201 (_h), (_r), (_p), (_l))
202
203 static inline void
204 mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva)
205 {
206 htolem32(&sge->sg_addr_lo, dva);
207 htolem32(&sge->sg_addr_hi, dva >> 32);
208 }
209
210 int
211 mpi_attach(struct mpi_softc *sc)
212 {
213 struct scsibus_attach_args saa;
214 struct mpi_ccb *ccb;
215
216 printf("\n");
217
218 rw_init(&sc->sc_lock, "mpi_lock");
219 task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc);
220
221 /* disable interrupts */
222 mpi_write(sc, MPI_INTR_MASK,
223 MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
224
225 if (mpi_init(sc) != 0) {
226 printf("%s: unable to initialise\n", DEVNAME(sc));
227 return (1);
228 }
229
230 if (mpi_iocfacts(sc) != 0) {
231 printf("%s: unable to get iocfacts\n", DEVNAME(sc));
232 return (1);
233 }
234
235 if (mpi_alloc_ccbs(sc) != 0) {
236 /* error already printed */
237 return (1);
238 }
239
240 if (mpi_alloc_replies(sc) != 0) {
241 printf("%s: unable to allocate reply space\n", DEVNAME(sc));
242 goto free_ccbs;
243 }
244
245 if (mpi_iocinit(sc) != 0) {
246 printf("%s: unable to send iocinit\n", DEVNAME(sc));
247 goto free_ccbs;
248 }
249
250 /* spin until we're operational */
251 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
252 MPI_DOORBELL_STATE_OPER) != 0) {
253 printf("%s: state: 0x%08x\n", DEVNAME(sc),
254 mpi_read_db(sc) & MPI_DOORBELL_STATE);
255 printf("%s: operational state timeout\n", DEVNAME(sc));
256 goto free_ccbs;
257 }
258
259 mpi_push_replies(sc);
260
261 if (mpi_portfacts(sc) != 0) {
262 printf("%s: unable to get portfacts\n", DEVNAME(sc));
263 goto free_replies;
264 }
265
266 if (mpi_cfg_coalescing(sc) != 0) {
267 printf("%s: unable to configure coalescing\n", DEVNAME(sc));
268 goto free_replies;
269 }
270
271 switch (sc->sc_porttype) {
272 case MPI_PORTFACTS_PORTTYPE_SAS:
273 SIMPLEQ_INIT(&sc->sc_evt_scan_queue);
274 mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO);
275 scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
276 mpi_evt_sas_detach, sc);
277 /* FALLTHROUGH */
278 case MPI_PORTFACTS_PORTTYPE_FC:
279 if (mpi_eventnotify(sc) != 0) {
280 printf("%s: unable to enable events\n", DEVNAME(sc));
281 goto free_replies;
282 }
283 break;
284 }
285
286 if (mpi_portenable(sc) != 0) {
287 printf("%s: unable to enable port\n", DEVNAME(sc));
288 goto free_replies;
289 }
290
291 if (mpi_fwupload(sc) != 0) {
292 printf("%s: unable to upload firmware\n", DEVNAME(sc));
293 goto free_replies;
294 }
295
296 if (mpi_manufacturing(sc) != 0) {
297 printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc));
298 goto free_replies;
299 }
300
301 switch (sc->sc_porttype) {
302 case MPI_PORTFACTS_PORTTYPE_SCSI:
303 if (mpi_cfg_spi_port(sc) != 0) {
304 printf("%s: unable to configure spi\n", DEVNAME(sc));
305 goto free_replies;
306 }
307 mpi_squash_ppr(sc);
308 break;
309 case MPI_PORTFACTS_PORTTYPE_SAS:
310 if (mpi_cfg_sas(sc) != 0) {
311 printf("%s: unable to configure sas\n", DEVNAME(sc));
312 goto free_replies;
313 }
314 break;
315 case MPI_PORTFACTS_PORTTYPE_FC:
316 if (mpi_cfg_fc(sc) != 0) {
317 printf("%s: unable to configure fc\n", DEVNAME(sc));
318 goto free_replies;
319 }
320 break;
321 }
322
323 /* get raid pages */
324 mpi_get_raid(sc);
325 #if NBIO > 0
326 if (sc->sc_flags & MPI_F_RAID) {
327 if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
328 panic("%s: controller registration failed",
329 DEVNAME(sc));
330 else {
331 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,
332 2, 0, &sc->sc_cfg_hdr) != 0) {
333 panic("%s: can't get IOC page 2 hdr",
334 DEVNAME(sc));
335 }
336
337 sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length,
338 4, M_TEMP, M_WAITOK | M_CANFAIL);
339 if (sc->sc_vol_page == NULL) {
340 panic("%s: can't get memory for IOC page 2, "
341 "bio disabled", DEVNAME(sc));
342 }
343
344 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,
345 sc->sc_vol_page,
346 sc->sc_cfg_hdr.page_length * 4) != 0) {
347 panic("%s: can't get IOC page 2", DEVNAME(sc));
348 }
349
350 sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
351 (sc->sc_vol_page + 1);
352
353 sc->sc_ioctl = mpi_ioctl;
354 }
355 }
356 #endif /* NBIO > 0 */
357
358 saa.saa_adapter = &mpi_switch;
359 saa.saa_adapter_softc = sc;
360 saa.saa_adapter_target = sc->sc_target;
361 saa.saa_adapter_buswidth = sc->sc_buswidth;
362 saa.saa_luns = 8;
363 saa.saa_openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16);
364 saa.saa_pool = &sc->sc_iopool;
365 saa.saa_wwpn = sc->sc_port_wwn;
366 saa.saa_wwnn = sc->sc_node_wwn;
367 saa.saa_quirks = saa.saa_flags = 0;
368
369 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
370 &saa, scsiprint);
371
372 /* do domain validation */
373 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
374 mpi_run_ppr(sc);
375
376 /* enable interrupts */
377 mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
378
379 #if NBIO > 0
380 #ifndef SMALL_KERNEL
381 mpi_create_sensors(sc);
382 #endif /* SMALL_KERNEL */
383 #endif /* NBIO > 0 */
384
385 return (0);
386
387 free_replies:
388 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
389 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
390 mpi_dmamem_free(sc, sc->sc_replies);
391 free_ccbs:
392 while ((ccb = mpi_get_ccb(sc)) != NULL)
393 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
394 mpi_dmamem_free(sc, sc->sc_requests);
395 free(sc->sc_ccbs, M_DEVBUF, 0);
396
397 return(1);
398 }
399
400 int
401 mpi_cfg_spi_port(struct mpi_softc *sc)
402 {
403 struct mpi_cfg_hdr hdr;
404 struct mpi_cfg_spi_port_pg1 port;
405
406 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,
407 &hdr) != 0)
408 return (1);
409
410 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0)
411 return (1);
412
413 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
414 DNPRINTF(MPI_D_MISC, "%s: port_scsi_id: %d port_resp_ids 0x%04x\n",
415 DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
416 DNPRINTF(MPI_D_MISC, "%s: on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
417 letoh32(port.port_scsi_id));
418 DNPRINTF(MPI_D_MISC, "%s: target_config: 0x%02x id_config: 0x%04x\n",
419 DEVNAME(sc), port.target_config, letoh16(port.id_config));
420
421 if (port.port_scsi_id == sc->sc_target &&
422 port.port_resp_ids == htole16(1 << sc->sc_target) &&
423 port.on_bus_timer_value != htole32(0x0))
424 return (0);
425
426 DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
427 sc->sc_target);
428 port.port_scsi_id = sc->sc_target;
429 port.port_resp_ids = htole16(1 << sc->sc_target);
430 port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */
431
432 if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) {
433 printf("%s: unable to configure port scsi id\n", DEVNAME(sc));
434 return (1);
435 }
436
437 return (0);
438 }
439
440 void
441 mpi_squash_ppr(struct mpi_softc *sc)
442 {
443 struct mpi_cfg_hdr hdr;
444 struct mpi_cfg_spi_dev_pg1 page;
445 int i;
446
447 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
448
449 for (i = 0; i < sc->sc_buswidth; i++) {
450 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
451 1, i, &hdr) != 0)
452 return;
453
454 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
455 return;
456
457 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x "
458 "req_offset: 0x%02x req_period: 0x%02x "
459 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
460 page.req_params1, page.req_offset, page.req_period,
461 page.req_params2, letoh32(page.configuration));
462
463 page.req_params1 = 0x0;
464 page.req_offset = 0x0;
465 page.req_period = 0x0;
466 page.req_params2 = 0x0;
467 page.configuration = htole32(0x0);
468
469 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
470 return;
471 }
472 }
473
474 void
475 mpi_run_ppr(struct mpi_softc *sc)
476 {
477 struct mpi_cfg_hdr hdr;
478 struct mpi_cfg_spi_port_pg0 port_pg;
479 struct mpi_cfg_ioc_pg3 *physdisk_pg;
480 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk;
481 size_t pagelen;
482 struct scsi_link *link;
483 int i, tries;
484
485 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
486 &hdr) != 0) {
487 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
488 DEVNAME(sc));
489 return;
490 }
491
492 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
493 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
494 DEVNAME(sc));
495 return;
496 }
497
498 for (i = 0; i < sc->sc_buswidth; i++) {
499 link = scsi_get_link(sc->sc_scsibus, i, 0);
500 if (link == NULL)
501 continue;
502
503 /* do not ppr volumes */
504 if (link->flags & SDEV_VIRTUAL)
505 continue;
506
507 tries = 0;
508 while (mpi_ppr(sc, link, NULL, port_pg.min_period,
509 port_pg.max_offset, tries) == EAGAIN)
510 tries++;
511 }
512
513 if ((sc->sc_flags & MPI_F_RAID) == 0)
514 return;
515
516 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
517 &hdr) != 0) {
518 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
519 "fetch ioc pg 3 header\n", DEVNAME(sc));
520 return;
521 }
522
523 pagelen = hdr.page_length * 4; /* dwords to bytes */
524 physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
525 if (physdisk_pg == NULL) {
526 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
527 "allocate ioc pg 3\n", DEVNAME(sc));
528 return;
529 }
530 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
531
532 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
533 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
534 "fetch ioc page 3\n", DEVNAME(sc));
535 goto out;
536 }
537
538 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc),
539 physdisk_pg->no_phys_disks);
540
541 for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
542 physdisk = &physdisk_list[i];
543
544 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d "
545 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
546 physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
547 physdisk->phys_disk_num);
548
549 if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
550 continue;
551
552 tries = 0;
553 while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
554 port_pg.max_offset, tries) == EAGAIN)
555 tries++;
556 }
557
558 out:
559 free(physdisk_pg, M_TEMP, pagelen);
560 }
561
562 int
563 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
564 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
565 {
566 struct mpi_cfg_hdr hdr0, hdr1;
567 struct mpi_cfg_spi_dev_pg0 pg0;
568 struct mpi_cfg_spi_dev_pg1 pg1;
569 u_int32_t address;
570 int id;
571 int raid = 0;
572
573 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
574 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
575 link->quirks);
576
577 if (try >= 3)
578 return (EIO);
579
580 if (physdisk == NULL) {
581 if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
582 return (EIO);
583
584 address = link->target;
585 id = link->target;
586 } else {
587 raid = 1;
588 address = (physdisk->phys_disk_bus << 8) |
589 (physdisk->phys_disk_id);
590 id = physdisk->phys_disk_num;
591 }
592
593 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
594 address, &hdr0) != 0) {
595 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
596 DEVNAME(sc));
597 return (EIO);
598 }
599
600 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
601 address, &hdr1) != 0) {
602 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
603 DEVNAME(sc));
604 return (EIO);
605 }
606
607 #ifdef MPI_DEBUG
608 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
609 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
610 DEVNAME(sc));
611 return (EIO);
612 }
613
614 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
615 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
616 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
617 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
618 #endif
619
620 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
621 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
622 DEVNAME(sc));
623 return (EIO);
624 }
625
626 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
627 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
628 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
629 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
630
631 pg1.req_params1 = 0;
632 pg1.req_offset = offset;
633 pg1.req_period = period;
634 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
635
636 if (raid || !(link->quirks & SDEV_NOSYNC)) {
637 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
638
639 switch (try) {
640 case 0: /* U320 */
641 break;
642 case 1: /* U160 */
643 pg1.req_period = 0x09;
644 break;
645 case 2: /* U80 */
646 pg1.req_period = 0x0a;
647 break;
648 }
649
650 if (pg1.req_period < 0x09) {
651 /* Ultra320: enable QAS & PACKETIZED */
652 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
653 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
654 }
655 if (pg1.req_period < 0xa) {
656 /* >= Ultra160: enable dual xfers */
657 pg1.req_params1 |=
658 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
659 }
660 }
661
662 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
663 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
664 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
665 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
666
667 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
668 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
669 DEVNAME(sc));
670 return (EIO);
671 }
672
673 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
674 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
675 DEVNAME(sc));
676 return (EIO);
677 }
678
679 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
680 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
681 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
682 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
683
684 if (mpi_inq(sc, id, raid) != 0) {
685 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
686 "target %d\n", DEVNAME(sc), link->target);
687 return (EIO);
688 }
689
690 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
691 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
692 "inquiry\n", DEVNAME(sc));
693 return (EIO);
694 }
695
696 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
697 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
698 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
699 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
700
701 if (!(lemtoh32(&pg0.information) & 0x07) && (try == 0)) {
702 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
703 DEVNAME(sc));
704 return (EAGAIN);
705 }
706
707 if ((((lemtoh32(&pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
708 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
709 DEVNAME(sc));
710 return (EAGAIN);
711 }
712
713 if (lemtoh32(&pg0.information) & 0x0e) {
714 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
715 DEVNAME(sc), lemtoh32(&pg0.information));
716 return (EAGAIN);
717 }
718
719 switch(pg0.neg_period) {
720 case 0x08:
721 period = 160;
722 break;
723 case 0x09:
724 period = 80;
725 break;
726 case 0x0a:
727 period = 40;
728 break;
729 case 0x0b:
730 period = 20;
731 break;
732 case 0x0c:
733 period = 10;
734 break;
735 default:
736 period = 0;
737 break;
738 }
739
740 printf("%s: %s %d %s at %dMHz width %dbit offset %d "
741 "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
742 id, period ? "Sync" : "Async", period,
743 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
744 pg0.neg_offset,
745 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
746 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
747 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
748
749 return (0);
750 }
751
752 int
753 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
754 {
755 struct mpi_ccb *ccb;
756 struct scsi_inquiry inq;
757 struct inq_bundle {
758 struct mpi_msg_scsi_io io;
759 struct mpi_sge sge;
760 struct scsi_inquiry_data inqbuf;
761 struct scsi_sense_data sense;
762 } __packed *bundle;
763 struct mpi_msg_scsi_io *io;
764 struct mpi_sge *sge;
765
766 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
767
768 memset(&inq, 0, sizeof(inq));
769 inq.opcode = INQUIRY;
770 _lto2b(sizeof(struct scsi_inquiry_data), inq.length);
771
772 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
773 if (ccb == NULL)
774 return (1);
775
776 ccb->ccb_done = mpi_empty_done;
777
778 bundle = ccb->ccb_cmd;
779 io = &bundle->io;
780 sge = &bundle->sge;
781
782 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
783 MPI_FUNCTION_SCSI_IO_REQUEST;
784 /*
785 * bus is always 0
786 * io->bus = htole16(sc->sc_bus);
787 */
788 io->target_id = target;
789
790 io->cdb_length = sizeof(inq);
791 io->sense_buf_len = sizeof(struct scsi_sense_data);
792 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
793
794 /*
795 * always lun 0
796 * io->lun[0] = htobe16(link->lun);
797 */
798
799 io->direction = MPI_SCSIIO_DIR_READ;
800 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
801
802 memcpy(io->cdb, &inq, sizeof(inq));
803
804 htolem32(&io->data_length, sizeof(struct scsi_inquiry_data));
805
806 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
807 offsetof(struct inq_bundle, sense));
808
809 htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
810 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
811 (u_int32_t)sizeof(inq));
812
813 mpi_dvatosge(sge, ccb->ccb_cmd_dva +
814 offsetof(struct inq_bundle, inqbuf));
815
816 if (mpi_poll(sc, ccb, 5000) != 0)
817 return (1);
818
819 if (ccb->ccb_rcb != NULL)
820 mpi_push_reply(sc, ccb->ccb_rcb);
821
822 scsi_io_put(&sc->sc_iopool, ccb);
823
824 return (0);
825 }
826
827 int
828 mpi_cfg_sas(struct mpi_softc *sc)
829 {
830 struct mpi_ecfg_hdr ehdr;
831 struct mpi_cfg_sas_iou_pg1 *pg;
832 size_t pagelen;
833 int rv = 0;
834
835 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,
836 &ehdr) != 0)
837 return (0);
838
839 pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
840 pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
841 if (pg == NULL)
842 return (ENOMEM);
843
844 if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0)
845 goto out;
846
847 if (pg->max_sata_q_depth != 32) {
848 pg->max_sata_q_depth = 32;
849
850 if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0)
851 goto out;
852 }
853
854 out:
855 free(pg, M_TEMP, pagelen);
856 return (rv);
857 }
858
859 int
860 mpi_cfg_fc(struct mpi_softc *sc)
861 {
862 struct mpi_cfg_hdr hdr;
863 struct mpi_cfg_fc_port_pg0 pg0;
864 struct mpi_cfg_fc_port_pg1 pg1;
865
866 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,
867 &hdr) != 0) {
868 printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc));
869 return (1);
870 }
871
872 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) {
873 printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc));
874 return (1);
875 }
876
877 sc->sc_port_wwn = letoh64(pg0.wwpn);
878 sc->sc_node_wwn = letoh64(pg0.wwnn);
879
880 /* configure port config more to our liking */
881 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,
882 &hdr) != 0) {
883 printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc));
884 return (1);
885 }
886
887 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) {
888 printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc));
889 return (1);
890 }
891
892 SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |
893 MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN));
894
895 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) {
896 printf("%s: unable to set FC port page 1\n", DEVNAME(sc));
897 return (1);
898 }
899
900 return (0);
901 }
902
903 void
904 mpi_detach(struct mpi_softc *sc)
905 {
906
907 }
908
909 int
910 mpi_intr(void *arg)
911 {
912 struct mpi_softc *sc = arg;
913 u_int32_t reg;
914 int rv = 0;
915
916 if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0)
917 return (rv);
918
919 while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
920 mpi_reply(sc, reg);
921 rv = 1;
922 }
923
924 return (rv);
925 }
926
927 void
928 mpi_reply(struct mpi_softc *sc, u_int32_t reg)
929 {
930 struct mpi_ccb *ccb;
931 struct mpi_rcb *rcb = NULL;
932 struct mpi_msg_reply *reply = NULL;
933 u_int32_t reply_dva;
934 int id;
935 int i;
936
937 DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
938
939 if (reg & MPI_REPLY_QUEUE_ADDRESS) {
940 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
941 i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
942 MPI_REPLY_SIZE;
943 rcb = &sc->sc_rcbs[i];
944
945 bus_dmamap_sync(sc->sc_dmat,
946 MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,
947 MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
948
949 reply = rcb->rcb_reply;
950
951 id = lemtoh32(&reply->msg_context);
952 } else {
953 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
954 case MPI_REPLY_QUEUE_TYPE_INIT:
955 id = reg & MPI_REPLY_QUEUE_CONTEXT;
956 break;
957
958 default:
959 panic("%s: unsupported context reply",
960 DEVNAME(sc));
961 }
962 }
963
964 DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
965 DEVNAME(sc), id, reply);
966
967 ccb = &sc->sc_ccbs[id];
968
969 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
970 ccb->ccb_offset, MPI_REQUEST_SIZE,
971 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
972 ccb->ccb_state = MPI_CCB_READY;
973 ccb->ccb_rcb = rcb;
974
975 ccb->ccb_done(ccb);
976 }
977
978 struct mpi_dmamem *
979 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
980 {
981 struct mpi_dmamem *mdm;
982 int nsegs;
983
984 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO);
985 if (mdm == NULL)
986 return (NULL);
987
988 mdm->mdm_size = size;
989
990 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
991 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
992 goto mdmfree;
993
994 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
995 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
996 goto destroy;
997
998 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
999 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
1000 goto free;
1001
1002 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1003 NULL, BUS_DMA_NOWAIT) != 0)
1004 goto unmap;
1005
1006 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1007 "map: %#x nsegs: %d segs: %#x kva: %x\n",
1008 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1009
1010 return (mdm);
1011
1012 unmap:
1013 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1014 free:
1015 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1016 destroy:
1017 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1018 mdmfree:
1019 free(mdm, M_DEVBUF, sizeof *mdm);
1020
1021 return (NULL);
1022 }
1023
1024 void
1025 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1026 {
1027 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1028
1029 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
1030 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1031 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1032 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1033 free(mdm, M_DEVBUF, sizeof *mdm);
1034 }
1035
1036 int
1037 mpi_alloc_ccbs(struct mpi_softc *sc)
1038 {
1039 struct mpi_ccb *ccb;
1040 u_int8_t *cmd;
1041 int i;
1042
1043 SLIST_INIT(&sc->sc_ccb_free);
1044 mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1045
1046 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb),
1047 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1048 if (sc->sc_ccbs == NULL) {
1049 printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1050 return (1);
1051 }
1052
1053 sc->sc_requests = mpi_dmamem_alloc(sc,
1054 MPI_REQUEST_SIZE * sc->sc_maxcmds);
1055 if (sc->sc_requests == NULL) {
1056 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1057 goto free_ccbs;
1058 }
1059 cmd = MPI_DMA_KVA(sc->sc_requests);
1060 memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds);
1061
1062 for (i = 0; i < sc->sc_maxcmds; i++) {
1063 ccb = &sc->sc_ccbs[i];
1064
1065 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1066 sc->sc_max_sgl_len, MAXPHYS, 0,
1067 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1068 &ccb->ccb_dmamap) != 0) {
1069 printf("%s: unable to create dma map\n", DEVNAME(sc));
1070 goto free_maps;
1071 }
1072
1073 ccb->ccb_sc = sc;
1074 ccb->ccb_id = i;
1075 ccb->ccb_offset = MPI_REQUEST_SIZE * i;
1076 ccb->ccb_state = MPI_CCB_READY;
1077
1078 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1079 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
1080 ccb->ccb_offset;
1081
1082 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1083 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1084 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1085 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1086 ccb->ccb_cmd_dva);
1087
1088 mpi_put_ccb(sc, ccb);
1089 }
1090
1091 scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1092
1093 return (0);
1094
1095 free_maps:
1096 while ((ccb = mpi_get_ccb(sc)) != NULL)
1097 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1098
1099 mpi_dmamem_free(sc, sc->sc_requests);
1100 free_ccbs:
1101 free(sc->sc_ccbs, M_DEVBUF, 0);
1102
1103 return (1);
1104 }
1105
1106 void *
1107 mpi_get_ccb(void *xsc)
1108 {
1109 struct mpi_softc *sc = xsc;
1110 struct mpi_ccb *ccb;
1111
1112 mtx_enter(&sc->sc_ccb_mtx);
1113 ccb = SLIST_FIRST(&sc->sc_ccb_free);
1114 if (ccb != NULL) {
1115 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1116 ccb->ccb_state = MPI_CCB_READY;
1117 }
1118 mtx_leave(&sc->sc_ccb_mtx);
1119
1120 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1121
1122 return (ccb);
1123 }
1124
1125 void
1126 mpi_put_ccb(void *xsc, void *io)
1127 {
1128 struct mpi_softc *sc = xsc;
1129 struct mpi_ccb *ccb = io;
1130
1131 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1132
1133 #ifdef DIAGNOSTIC
1134 if (ccb->ccb_state == MPI_CCB_FREE)
1135 panic("mpi_put_ccb: double free");
1136 #endif
1137
1138 ccb->ccb_state = MPI_CCB_FREE;
1139 ccb->ccb_cookie = NULL;
1140 ccb->ccb_done = NULL;
1141 memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE);
1142 mtx_enter(&sc->sc_ccb_mtx);
1143 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1144 mtx_leave(&sc->sc_ccb_mtx);
1145 }
1146
1147 int
1148 mpi_alloc_replies(struct mpi_softc *sc)
1149 {
1150 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1151
1152 sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF,
1153 M_WAITOK|M_CANFAIL);
1154 if (sc->sc_rcbs == NULL)
1155 return (1);
1156
1157 sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE);
1158 if (sc->sc_replies == NULL) {
1159 free(sc->sc_rcbs, M_DEVBUF, 0);
1160 return (1);
1161 }
1162
1163 return (0);
1164 }
1165
1166 void
1167 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1168 {
1169 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
1170 rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1171 mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1172 }
1173
1174 void
1175 mpi_push_replies(struct mpi_softc *sc)
1176 {
1177 struct mpi_rcb *rcb;
1178 char *kva = MPI_DMA_KVA(sc->sc_replies);
1179 int i;
1180
1181 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
1182 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1183
1184 for (i = 0; i < sc->sc_repq; i++) {
1185 rcb = &sc->sc_rcbs[i];
1186
1187 rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
1188 rcb->rcb_offset = MPI_REPLY_SIZE * i;
1189 rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
1190 MPI_REPLY_SIZE * i;
1191 mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1192 }
1193 }
1194
1195 void
1196 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1197 {
1198 struct mpi_msg_request *msg;
1199
1200 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1201 ccb->ccb_cmd_dva);
1202
1203 msg = ccb->ccb_cmd;
1204 htolem32(&msg->msg_context, ccb->ccb_id);
1205
1206 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
1207 ccb->ccb_offset, MPI_REQUEST_SIZE,
1208 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1209
1210 ccb->ccb_state = MPI_CCB_QUEUED;
1211 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1212 MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
1213 }
1214
1215 int
1216 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1217 {
1218 void (*done)(struct mpi_ccb *);
1219 void *cookie;
1220 int rv = 1;
1221 u_int32_t reg;
1222
1223 DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1224 timeout);
1225
1226 done = ccb->ccb_done;
1227 cookie = ccb->ccb_cookie;
1228
1229 ccb->ccb_done = mpi_poll_done;
1230 ccb->ccb_cookie = &rv;
1231
1232 mpi_start(sc, ccb);
1233 while (rv == 1) {
1234 reg = mpi_pop_reply(sc);
1235 if (reg == 0xffffffff) {
1236 if (timeout-- == 0) {
1237 printf("%s: timeout\n", DEVNAME(sc));
1238 goto timeout;
1239 }
1240
1241 delay(1000);
1242 continue;
1243 }
1244
1245 mpi_reply(sc, reg);
1246 }
1247
1248 ccb->ccb_cookie = cookie;
1249 done(ccb);
1250
1251 timeout:
1252 return (rv);
1253 }
1254
1255 void
1256 mpi_poll_done(struct mpi_ccb *ccb)
1257 {
1258 int *rv = ccb->ccb_cookie;
1259
1260 *rv = 0;
1261 }
1262
1263 void
1264 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1265 {
1266 struct mutex cookie = MUTEX_INITIALIZER(IPL_BIO);
1267 void (*done)(struct mpi_ccb *);
1268
1269 done = ccb->ccb_done;
1270 ccb->ccb_done = mpi_wait_done;
1271 ccb->ccb_cookie = &cookie;
1272
1273 /* XXX this will wait forever for the ccb to complete */
1274
1275 mpi_start(sc, ccb);
1276
1277 mtx_enter(&cookie);
1278 while (ccb->ccb_cookie != NULL)
1279 msleep_nsec(ccb, &cookie, PRIBIO, "mpiwait", INFSLP);
1280 mtx_leave(&cookie);
1281
1282 done(ccb);
1283 }
1284
1285 void
1286 mpi_wait_done(struct mpi_ccb *ccb)
1287 {
1288 struct mutex *cookie = ccb->ccb_cookie;
1289
1290 mtx_enter(cookie);
1291 ccb->ccb_cookie = NULL;
1292 wakeup_one(ccb);
1293 mtx_leave(cookie);
1294 }
1295
1296 void
1297 mpi_scsi_cmd(struct scsi_xfer *xs)
1298 {
1299 struct scsi_link *link = xs->sc_link;
1300 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1301 struct mpi_ccb *ccb;
1302 struct mpi_ccb_bundle *mcb;
1303 struct mpi_msg_scsi_io *io;
1304
1305 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1306
1307 KERNEL_UNLOCK();
1308
1309 if (xs->cmdlen > MPI_CDB_LEN) {
1310 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1311 DEVNAME(sc), xs->cmdlen);
1312 memset(&xs->sense, 0, sizeof(xs->sense));
1313 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
1314 xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1315 xs->sense.add_sense_code = 0x20;
1316 xs->error = XS_SENSE;
1317 goto done;
1318 }
1319
1320 ccb = xs->io;
1321
1322 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1323 DEVNAME(sc), ccb->ccb_id, xs->flags);
1324
1325 ccb->ccb_cookie = xs;
1326 ccb->ccb_done = mpi_scsi_cmd_done;
1327
1328 mcb = ccb->ccb_cmd;
1329 io = &mcb->mcb_io;
1330
1331 io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1332 /*
1333 * bus is always 0
1334 * io->bus = htole16(sc->sc_bus);
1335 */
1336 io->target_id = link->target;
1337
1338 io->cdb_length = xs->cmdlen;
1339 io->sense_buf_len = sizeof(xs->sense);
1340 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1341
1342 htobem16(&io->lun[0], link->lun);
1343
1344 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1345 case SCSI_DATA_IN:
1346 io->direction = MPI_SCSIIO_DIR_READ;
1347 break;
1348 case SCSI_DATA_OUT:
1349 io->direction = MPI_SCSIIO_DIR_WRITE;
1350 break;
1351 default:
1352 io->direction = MPI_SCSIIO_DIR_NONE;
1353 break;
1354 }
1355
1356 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI &&
1357 (link->quirks & SDEV_NOTAGS))
1358 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1359 else
1360 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1361
1362 memcpy(io->cdb, &xs->cmd, xs->cmdlen);
1363
1364 htolem32(&io->data_length, xs->datalen);
1365
1366 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
1367 offsetof(struct mpi_ccb_bundle, mcb_sense));
1368
1369 if (mpi_load_xs(ccb) != 0)
1370 goto stuffup;
1371
1372 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1373
1374 if (xs->flags & SCSI_POLL) {
1375 if (mpi_poll(sc, ccb, xs->timeout) != 0)
1376 goto stuffup;
1377 } else
1378 mpi_start(sc, ccb);
1379
1380 KERNEL_LOCK();
1381 return;
1382
1383 stuffup:
1384 xs->error = XS_DRIVER_STUFFUP;
1385 done:
1386 KERNEL_LOCK();
1387 scsi_done(xs);
1388 }
1389
1390 void
1391 mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1392 {
1393 struct mpi_softc *sc = ccb->ccb_sc;
1394 struct scsi_xfer *xs = ccb->ccb_cookie;
1395 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd;
1396 bus_dmamap_t dmap = ccb->ccb_dmamap;
1397 struct mpi_msg_scsi_io_error *sie;
1398
1399 if (xs->datalen != 0) {
1400 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1401 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1402 BUS_DMASYNC_POSTWRITE);
1403
1404 bus_dmamap_unload(sc->sc_dmat, dmap);
1405 }
1406
1407 /* timeout_del */
1408 xs->error = XS_NOERROR;
1409 xs->resid = 0;
1410
1411 if (ccb->ccb_rcb == NULL) {
1412 /* no scsi error, we're ok so drop out early */
1413 xs->status = SCSI_OK;
1414 KERNEL_LOCK();
1415 scsi_done(xs);
1416 KERNEL_UNLOCK();
1417 return;
1418 }
1419
1420 sie = ccb->ccb_rcb->rcb_reply;
1421
1422 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1423 "flags 0x%x\n", DEVNAME(sc), xs->cmd.opcode, xs->datalen,
1424 xs->flags);
1425 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d "
1426 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1427 sie->msg_length, sie->function);
1428 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d "
1429 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1430 sie->sense_buf_len, sie->msg_flags);
1431 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
1432 letoh32(sie->msg_context));
1433 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
1434 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1435 sie->scsi_state, letoh16(sie->ioc_status));
1436 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1437 letoh32(sie->ioc_loginfo));
1438 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
1439 letoh32(sie->transfer_count));
1440 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
1441 letoh32(sie->sense_count));
1442 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
1443 letoh32(sie->response_info));
1444 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc),
1445 letoh16(sie->tag));
1446
1447 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS)
1448 xs->status = SCSI_TERMINATED;
1449 else
1450 xs->status = sie->scsi_status;
1451 xs->resid = 0;
1452
1453 switch (lemtoh16(&sie->ioc_status)) {
1454 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1455 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
1456 /* FALLTHROUGH */
1457 case MPI_IOCSTATUS_SUCCESS:
1458 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1459 switch (xs->status) {
1460 case SCSI_OK:
1461 xs->error = XS_NOERROR;
1462 break;
1463
1464 case SCSI_CHECK:
1465 xs->error = XS_SENSE;
1466 break;
1467
1468 case SCSI_BUSY:
1469 case SCSI_QUEUE_FULL:
1470 xs->error = XS_BUSY;
1471 break;
1472
1473 default:
1474 xs->error = XS_DRIVER_STUFFUP;
1475 break;
1476 }
1477 break;
1478
1479 case MPI_IOCSTATUS_BUSY:
1480 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1481 xs->error = XS_BUSY;
1482 break;
1483
1484 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1485 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1486 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1487 xs->error = XS_SELTIMEOUT;
1488 break;
1489
1490 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1491 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1492 xs->error = XS_RESET;
1493 break;
1494
1495 default:
1496 xs->error = XS_DRIVER_STUFFUP;
1497 break;
1498 }
1499
1500 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1501 memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense));
1502
1503 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc),
1504 xs->error, xs->status);
1505
1506 mpi_push_reply(sc, ccb->ccb_rcb);
1507 KERNEL_LOCK();
1508 scsi_done(xs);
1509 KERNEL_UNLOCK();
1510 }
1511
1512 void
1513 mpi_timeout_xs(void *arg)
1514 {
1515 /* XXX */
1516 }
1517
1518 int
1519 mpi_load_xs(struct mpi_ccb *ccb)
1520 {
1521 struct mpi_softc *sc = ccb->ccb_sc;
1522 struct scsi_xfer *xs = ccb->ccb_cookie;
1523 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd;
1524 struct mpi_msg_scsi_io *io = &mcb->mcb_io;
1525 struct mpi_sge *sge = NULL;
1526 struct mpi_sge *nsge = &mcb->mcb_sgl[0];
1527 struct mpi_sge *ce = NULL, *nce;
1528 bus_dmamap_t dmap = ccb->ccb_dmamap;
1529 u_int32_t addr, flags;
1530 int i, error;
1531
1532 if (xs->datalen == 0) {
1533 htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
1534 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1535 return (0);
1536 }
1537
1538 error = bus_dmamap_load(sc->sc_dmat, dmap,
1539 xs->data, xs->datalen, NULL, BUS_DMA_STREAMING |
1540 ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK));
1541 if (error) {
1542 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1543 return (1);
1544 }
1545
1546 flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1547 if (xs->flags & SCSI_DATA_OUT)
1548 flags |= MPI_SGE_FL_DIR_OUT;
1549
1550 if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1551 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1552 io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
1553 }
1554
1555 for (i = 0; i < dmap->dm_nsegs; i++) {
1556
1557 if (nsge == ce) {
1558 nsge++;
1559 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1560
1561 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1562 nce = &nsge[sc->sc_chain_len - 1];
1563 addr = (u_int32_t *)nce - (u_int32_t *)nsge;
1564 addr = addr << 16 |
1565 sizeof(struct mpi_sge) * sc->sc_chain_len;
1566 } else {
1567 nce = NULL;
1568 addr = sizeof(struct mpi_sge) *
1569 (dmap->dm_nsegs - i);
1570 }
1571
1572 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1573 MPI_SGE_FL_SIZE_64 | addr);
1574
1575 mpi_dvatosge(ce, ccb->ccb_cmd_dva +
1576 ((u_int8_t *)nsge - (u_int8_t *)mcb));
1577
1578 ce = nce;
1579 }
1580
1581 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc),
1582 i, dmap->dm_segs[i].ds_len,
1583 (u_int64_t)dmap->dm_segs[i].ds_addr);
1584
1585 sge = nsge++;
1586
1587 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1588 mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr);
1589 }
1590
1591 /* terminate list */
1592 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1593 MPI_SGE_FL_EOL);
1594
1595 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1596 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1597 BUS_DMASYNC_PREWRITE);
1598
1599 return (0);
1600 }
1601
1602 int
1603 mpi_scsi_probe_virtual(struct scsi_link *link)
1604 {
1605 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1606 struct mpi_cfg_hdr hdr;
1607 struct mpi_cfg_raid_vol_pg0 *rp0;
1608 int len;
1609 int rv;
1610
1611 if (!ISSET(sc->sc_flags, MPI_F_RAID))
1612 return (0);
1613
1614 if (link->lun > 0)
1615 return (0);
1616
1617 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
1618 0, link->target, MPI_PG_POLL, &hdr);
1619 if (rv != 0)
1620 return (0);
1621
1622 len = hdr.page_length * 4;
1623 rp0 = malloc(len, M_TEMP, M_NOWAIT);
1624 if (rp0 == NULL)
1625 return (ENOMEM);
1626
1627 rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len);
1628 if (rv == 0)
1629 SET(link->flags, SDEV_VIRTUAL);
1630
1631 free(rp0, M_TEMP, len);
1632 return (0);
1633 }
1634
1635 int
1636 mpi_scsi_probe(struct scsi_link *link)
1637 {
1638 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1639 struct mpi_ecfg_hdr ehdr;
1640 struct mpi_cfg_sas_dev_pg0 pg0;
1641 u_int32_t address;
1642 int rv;
1643
1644 rv = mpi_scsi_probe_virtual(link);
1645 if (rv != 0)
1646 return (rv);
1647
1648 if (ISSET(link->flags, SDEV_VIRTUAL))
1649 return (0);
1650
1651 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS)
1652 return (0);
1653
1654 address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target;
1655
1656 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,
1657 address, &ehdr) != 0)
1658 return (EIO);
1659
1660 if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0)
1661 return (0);
1662
1663 DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1664 DEVNAME(sc), link->target);
1665 DNPRINTF(MPI_D_MISC, "%s: slot: 0x%04x enc_handle: 0x%04x\n",
1666 DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1667 DNPRINTF(MPI_D_MISC, "%s: sas_addr: 0x%016llx\n", DEVNAME(sc),
1668 letoh64(pg0.sas_addr));
1669 DNPRINTF(MPI_D_MISC, "%s: parent_dev_handle: 0x%04x phy_num: 0x%02x "
1670 "access_status: 0x%02x\n", DEVNAME(sc),
1671 letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1672 DNPRINTF(MPI_D_MISC, "%s: dev_handle: 0x%04x "
1673 "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1674 letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1675 DNPRINTF(MPI_D_MISC, "%s: device_info: 0x%08x\n", DEVNAME(sc),
1676 letoh32(pg0.device_info));
1677 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%04x physical_port: 0x%02x\n",
1678 DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1679
1680 if (ISSET(lemtoh32(&pg0.device_info),
1681 MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
1682 DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1683 DEVNAME(sc), link->target);
1684 link->flags |= SDEV_ATAPI;
1685 }
1686
1687 return (0);
1688 }
1689
1690 u_int32_t
1691 mpi_read(struct mpi_softc *sc, bus_size_t r)
1692 {
1693 u_int32_t rv;
1694
1695 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1696 BUS_SPACE_BARRIER_READ);
1697 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1698
1699 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1700
1701 return (rv);
1702 }
1703
1704 void
1705 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1706 {
1707 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1708
1709 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1710 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1711 BUS_SPACE_BARRIER_WRITE);
1712 }
1713
1714 int
1715 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1716 u_int32_t target)
1717 {
1718 int i;
1719
1720 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1721 mask, target);
1722
1723 for (i = 0; i < 10000; i++) {
1724 if ((mpi_read(sc, r) & mask) == target)
1725 return (0);
1726 delay(1000);
1727 }
1728
1729 return (1);
1730 }
1731
1732 int
1733 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1734 u_int32_t target)
1735 {
1736 int i;
1737
1738 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1739 mask, target);
1740
1741 for (i = 0; i < 10000; i++) {
1742 if ((mpi_read(sc, r) & mask) != target)
1743 return (0);
1744 delay(1000);
1745 }
1746
1747 return (1);
1748 }
1749
1750 int
1751 mpi_init(struct mpi_softc *sc)
1752 {
1753 u_int32_t db;
1754 int i;
1755
1756 /* spin until the IOC leaves the RESET state */
1757 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1758 MPI_DOORBELL_STATE_RESET) != 0) {
1759 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1760 "reset state\n", DEVNAME(sc));
1761 return (1);
1762 }
1763
1764 /* check current ownership */
1765 db = mpi_read_db(sc);
1766 if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1767 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1768 DEVNAME(sc));
1769 return (0);
1770 }
1771
1772 for (i = 0; i < 5; i++) {
1773 switch (db & MPI_DOORBELL_STATE) {
1774 case MPI_DOORBELL_STATE_READY:
1775 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1776 DEVNAME(sc));
1777 return (0);
1778
1779 case MPI_DOORBELL_STATE_OPER:
1780 case MPI_DOORBELL_STATE_FAULT:
1781 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1782 "reset\n" , DEVNAME(sc));
1783 if (mpi_reset_soft(sc) != 0)
1784 mpi_reset_hard(sc);
1785 break;
1786
1787 case MPI_DOORBELL_STATE_RESET:
1788 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1789 "out of reset\n", DEVNAME(sc));
1790 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1791 MPI_DOORBELL_STATE_RESET) != 0)
1792 return (1);
1793 break;
1794 }
1795 db = mpi_read_db(sc);
1796 }
1797
1798 return (1);
1799 }
1800
1801 int
1802 mpi_reset_soft(struct mpi_softc *sc)
1803 {
1804 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1805
1806 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1807 return (1);
1808
1809 mpi_write_db(sc,
1810 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1811 if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1812 MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1813 return (1);
1814
1815 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1816 MPI_DOORBELL_STATE_READY) != 0)
1817 return (1);
1818
1819 return (0);
1820 }
1821
1822 int
1823 mpi_reset_hard(struct mpi_softc *sc)
1824 {
1825 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1826
1827 /* enable diagnostic register */
1828 mpi_write(sc, MPI_WRITESEQ, 0xff);
1829 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1830 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1831 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1832 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1833 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1834
1835 /* reset ioc */
1836 mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1837
1838 delay(10000);
1839
1840 /* disable diagnostic register */
1841 mpi_write(sc, MPI_WRITESEQ, 0xff);
1842
1843 /* restore pci bits? */
1844
1845 /* firmware bits? */
1846 return (0);
1847 }
1848
1849 int
1850 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1851 {
1852 u_int32_t *query = buf;
1853 int i;
1854
1855 /* make sure the doorbell is not in use. */
1856 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1857 return (1);
1858
1859 /* clear pending doorbell interrupts */
1860 if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1861 mpi_write_intr(sc, 0);
1862
1863 /*
1864 * first write the doorbell with the handshake function and the
1865 * dword count.
1866 */
1867 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1868 MPI_DOORBELL_DWORDS(dwords));
1869
1870 /*
1871 * the doorbell used bit will be set because a doorbell function has
1872 * started. Wait for the interrupt and then ack it.
1873 */
1874 if (mpi_wait_db_int(sc) != 0)
1875 return (1);
1876 mpi_write_intr(sc, 0);
1877
1878 /* poll for the acknowledgement. */
1879 if (mpi_wait_db_ack(sc) != 0)
1880 return (1);
1881
1882 /* write the query through the doorbell. */
1883 for (i = 0; i < dwords; i++) {
1884 mpi_write_db(sc, htole32(query[i]));
1885 if (mpi_wait_db_ack(sc) != 0)
1886 return (1);
1887 }
1888
1889 return (0);
1890 }
1891
1892 int
1893 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1894 {
1895 u_int16_t *words = (u_int16_t *)dword;
1896 int i;
1897
1898 for (i = 0; i < 2; i++) {
1899 if (mpi_wait_db_int(sc) != 0)
1900 return (1);
1901 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1902 mpi_write_intr(sc, 0);
1903 }
1904
1905 return (0);
1906 }
1907
1908 int
1909 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1910 {
1911 struct mpi_msg_reply *reply = buf;
1912 u_int32_t *dbuf = buf, dummy;
1913 int i;
1914
1915 /* get the first dword so we can read the length out of the header. */
1916 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1917 return (1);
1918
1919 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1920 DEVNAME(sc), dwords, reply->msg_length);
1921
1922 /*
1923 * the total length, in dwords, is in the message length field of the
1924 * reply header.
1925 */
1926 for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1927 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1928 return (1);
1929 }
1930
1931 /* if there's extra stuff to come off the ioc, discard it */
1932 while (i++ < reply->msg_length) {
1933 if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1934 return (1);
1935 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1936 "0x%08x\n", DEVNAME(sc), dummy);
1937 }
1938
1939 /* wait for the doorbell used bit to be reset and clear the intr */
1940 if (mpi_wait_db_int(sc) != 0)
1941 return (1);
1942 mpi_write_intr(sc, 0);
1943
1944 return (0);
1945 }
1946
1947 void
1948 mpi_empty_done(struct mpi_ccb *ccb)
1949 {
1950 /* nothing to do */
1951 }
1952
1953 int
1954 mpi_iocfacts(struct mpi_softc *sc)
1955 {
1956 struct mpi_msg_iocfacts_request ifq;
1957 struct mpi_msg_iocfacts_reply ifp;
1958
1959 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1960
1961 memset(&ifq, 0, sizeof(ifq));
1962 memset(&ifp, 0, sizeof(ifp));
1963
1964 ifq.function = MPI_FUNCTION_IOC_FACTS;
1965 ifq.chain_offset = 0;
1966 ifq.msg_flags = 0;
1967 ifq.msg_context = htole32(0xdeadbeef);
1968
1969 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1970 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1971 DEVNAME(sc));
1972 return (1);
1973 }
1974
1975 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1976 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1977 DEVNAME(sc));
1978 return (1);
1979 }
1980
1981 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n",
1982 DEVNAME(sc), ifp.function, ifp.msg_length,
1983 ifp.msg_version_maj, ifp.msg_version_min);
1984 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x "
1985 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1986 ifp.ioc_number, ifp.header_version_maj,
1987 ifp.header_version_min);
1988 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc),
1989 letoh32(ifp.msg_context));
1990 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n",
1991 DEVNAME(sc), letoh16(ifp.ioc_status),
1992 letoh16(ifp.ioc_exceptions));
1993 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc),
1994 letoh32(ifp.ioc_loginfo));
1995 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x "
1996 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
1997 ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
1998 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n",
1999 DEVNAME(sc), letoh16(ifp.request_frame_size),
2000 letoh16(ifp.reply_queue_depth));
2001 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc),
2002 letoh16(ifp.product_id));
2003 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2004 letoh32(ifp.current_host_mfa_hi_addr));
2005 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d "
2006 "global_credits: %d\n",
2007 DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2008 letoh16(ifp.global_credits));
2009 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2010 letoh32(ifp.current_sense_buffer_hi_addr));
2011 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n",
2012 DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2013 letoh16(ifp.current_reply_frame_size));
2014 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc),
2015 letoh32(ifp.fw_image_size));
2016 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2017 letoh32(ifp.ioc_capabilities));
2018 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x "
2019 "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2020 ifp.fw_version_maj, ifp.fw_version_min,
2021 ifp.fw_version_unit, ifp.fw_version_dev);
2022 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n",
2023 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2024 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x "
2025 "addr 0x%08lx%08lx\n", DEVNAME(sc),
2026 letoh32(ifp.host_page_buffer_sge.sg_hdr),
2027 letoh32(ifp.host_page_buffer_sge.sg_addr_hi),
2028 letoh32(ifp.host_page_buffer_sge.sg_addr_lo));
2029
2030 sc->sc_fw_maj = ifp.fw_version_maj;
2031 sc->sc_fw_min = ifp.fw_version_min;
2032 sc->sc_fw_unit = ifp.fw_version_unit;
2033 sc->sc_fw_dev = ifp.fw_version_dev;
2034
2035 sc->sc_maxcmds = lemtoh16(&ifp.global_credits);
2036 sc->sc_maxchdepth = ifp.max_chain_depth;
2037 sc->sc_ioc_number = ifp.ioc_number;
2038 if (sc->sc_flags & MPI_F_SPI)
2039 sc->sc_buswidth = 16;
2040 else
2041 sc->sc_buswidth =
2042 (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2043 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
2044 sc->sc_fw_len = lemtoh32(&ifp.fw_image_size);
2045
2046 sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth));
2047
2048 /*
2049 * you can fit sg elements on the end of the io cmd if they fit in the
2050 * request frame size.
2051 */
2052 sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size) * 4) -
2053 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2054 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc),
2055 sc->sc_first_sgl_len);
2056
2057 sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size) * 4) /
2058 sizeof(struct mpi_sge);
2059 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc),
2060 sc->sc_chain_len);
2061
2062 /* the sgl tailing the io cmd loses an entry to the chain element. */
2063 sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
2064 /* the sgl chains lose an entry for each chain element */
2065 sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
2066 sc->sc_chain_len;
2067 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc),
2068 sc->sc_max_sgl_len);
2069
2070 /* XXX we're ignoring the max chain depth */
2071
2072 return (0);
2073 }
2074
2075 int
2076 mpi_iocinit(struct mpi_softc *sc)
2077 {
2078 struct mpi_msg_iocinit_request iiq;
2079 struct mpi_msg_iocinit_reply iip;
2080 u_int32_t hi_addr;
2081
2082 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2083
2084 memset(&iiq, 0, sizeof(iiq));
2085 memset(&iip, 0, sizeof(iip));
2086
2087 iiq.function = MPI_FUNCTION_IOC_INIT;
2088 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
2089
2090 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2091 iiq.max_buses = 1;
2092
2093 iiq.msg_context = htole32(0xd00fd00f);
2094
2095 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
2096
2097 hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32);
2098 htolem32(&iiq.host_mfa_hi_addr, hi_addr);
2099 htolem32(&iiq.sense_buffer_hi_addr, hi_addr);
2100
2101 iiq.msg_version_maj = 0x01;
2102 iiq.msg_version_min = 0x02;
2103
2104 iiq.hdr_version_unit = 0x0d;
2105 iiq.hdr_version_dev = 0x00;
2106
2107 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
2108 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2109 DEVNAME(sc));
2110 return (1);
2111 }
2112
2113 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
2114 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2115 DEVNAME(sc));
2116 return (1);
2117 }
2118
2119 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d "
2120 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2121 iip.msg_length, iip.whoinit);
2122 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d "
2123 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2124 iip.max_buses, iip.max_devices, iip.flags);
2125 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2126 letoh32(iip.msg_context));
2127 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2128 letoh16(iip.ioc_status));
2129 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2130 letoh32(iip.ioc_loginfo));
2131
2132 return (0);
2133 }
2134
2135 int
2136 mpi_portfacts(struct mpi_softc *sc)
2137 {
2138 struct mpi_ccb *ccb;
2139 struct mpi_msg_portfacts_request *pfq;
2140 volatile struct mpi_msg_portfacts_reply *pfp;
2141 int rv = 1;
2142
2143 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2144
2145 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2146 if (ccb == NULL) {
2147 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2148 DEVNAME(sc));
2149 return (rv);
2150 }
2151
2152 ccb->ccb_done = mpi_empty_done;
2153 pfq = ccb->ccb_cmd;
2154
2155 pfq->function = MPI_FUNCTION_PORT_FACTS;
2156 pfq->chain_offset = 0;
2157 pfq->msg_flags = 0;
2158 pfq->port_number = 0;
2159
2160 if (mpi_poll(sc, ccb, 50000) != 0) {
2161 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2162 goto err;
2163 }
2164
2165 if (ccb->ccb_rcb == NULL) {
2166 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2167 DEVNAME(sc));
2168 goto err;
2169 }
2170 pfp = ccb->ccb_rcb->rcb_reply;
2171
2172 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n",
2173 DEVNAME(sc), pfp->function, pfp->msg_length);
2174 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n",
2175 DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2176 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2177 letoh32(pfp->msg_context));
2178 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2179 letoh16(pfp->ioc_status));
2180 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2181 letoh32(pfp->ioc_loginfo));
2182 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n",
2183 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2184 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n",
2185 DEVNAME(sc), letoh16(pfp->protocol_flags),
2186 letoh16(pfp->port_scsi_id));
2187 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d "
2188 "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2189 letoh16(pfp->max_persistent_ids),
2190 letoh16(pfp->max_posted_cmd_buffers));
2191 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc),
2192 letoh16(pfp->max_lan_buckets));
2193
2194 sc->sc_porttype = pfp->port_type;
2195 if (sc->sc_target == -1)
2196 sc->sc_target = lemtoh16(&pfp->port_scsi_id);
2197
2198 mpi_push_reply(sc, ccb->ccb_rcb);
2199 rv = 0;
2200 err:
2201 scsi_io_put(&sc->sc_iopool, ccb);
2202
2203 return (rv);
2204 }
2205
2206 int
2207 mpi_cfg_coalescing(struct mpi_softc *sc)
2208 {
2209 struct mpi_cfg_hdr hdr;
2210 struct mpi_cfg_ioc_pg1 pg;
2211 u_int32_t flags;
2212
2213 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) {
2214 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2215 DEVNAME(sc));
2216 return (1);
2217 }
2218
2219 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
2220 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2221 DEVNAME(sc));
2222 return (1);
2223 }
2224
2225 DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2226 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%08x\n", DEVNAME(sc),
2227 letoh32(pg.flags));
2228 DNPRINTF(MPI_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc),
2229 letoh32(pg.coalescing_timeout));
2230 DNPRINTF(MPI_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n",
2231 DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2232
2233 flags = lemtoh32(&pg.flags);
2234 if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING))
2235 return (0);
2236
2237 CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING));
2238 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) {
2239 DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2240 DEVNAME(sc));
2241 return (1);
2242 }
2243
2244 return (0);
2245 }
2246
2247 int
2248 mpi_eventnotify(struct mpi_softc *sc)
2249 {
2250 struct mpi_ccb *ccb;
2251 struct mpi_msg_event_request *enq;
2252
2253 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2254 if (ccb == NULL) {
2255 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2256 DEVNAME(sc));
2257 return (1);
2258 }
2259
2260 sc->sc_evt_ccb = ccb;
2261 SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
2262 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
2263 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2264 mpi_eventack, sc);
2265
2266 ccb->ccb_done = mpi_eventnotify_done;
2267 enq = ccb->ccb_cmd;
2268
2269 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
2270 enq->chain_offset = 0;
2271 enq->event_switch = MPI_EVENT_SWITCH_ON;
2272
2273 mpi_start(sc, ccb);
2274 return (0);
2275 }
2276
2277 void
2278 mpi_eventnotify_done(struct mpi_ccb *ccb)
2279 {
2280 struct mpi_softc *sc = ccb->ccb_sc;
2281 struct mpi_rcb *rcb = ccb->ccb_rcb;
2282 struct mpi_msg_event_reply *enp = rcb->rcb_reply;
2283
2284 DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2285
2286 DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d "
2287 "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2288 letoh16(enp->data_length));
2289 DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n",
2290 DEVNAME(sc), enp->ack_required, enp->msg_flags);
2291 DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2292 letoh32(enp->msg_context));
2293 DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2294 letoh16(enp->ioc_status));
2295 DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2296 letoh32(enp->ioc_loginfo));
2297 DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc),
2298 letoh32(enp->event));
2299 DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc),
2300 letoh32(enp->event_context));
2301
2302 switch (lemtoh32(&enp->event)) {
2303 /* ignore these */
2304 case MPI_EVENT_EVENT_CHANGE:
2305 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2306 break;
2307
2308 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2309 if (sc->sc_scsibus == NULL)
2310 break;
2311
2312 if (mpi_evt_sas(sc, rcb) != 0) {
2313 /* reply is freed later on */
2314 return;
2315 }
2316 break;
2317
2318 case MPI_EVENT_RESCAN:
2319 if (sc->sc_scsibus != NULL &&
2320 sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC)
2321 task_add(systq, &sc->sc_evt_rescan);
2322 break;
2323
2324 default:
2325 DNPRINTF(MPI_D_EVT, "%s: unhandled event 0x%02x\n",
2326 DEVNAME(sc), lemtoh32(&enp->event));
2327 break;
2328 }
2329
2330 mpi_eventnotify_free(sc, rcb);
2331 }
2332
2333 void
2334 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2335 {
2336 struct mpi_msg_event_reply *enp = rcb->rcb_reply;
2337
2338 if (enp->ack_required) {
2339 mtx_enter(&sc->sc_evt_ack_mtx);
2340 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2341 mtx_leave(&sc->sc_evt_ack_mtx);
2342 scsi_ioh_add(&sc->sc_evt_ack_handler);
2343 } else
2344 mpi_push_reply(sc, rcb);
2345 }
2346
2347 int
2348 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2349 {
2350 struct mpi_evt_sas_change *ch;
2351 u_int8_t *data;
2352
2353 data = rcb->rcb_reply;
2354 data += sizeof(struct mpi_msg_event_reply);
2355 ch = (struct mpi_evt_sas_change *)data;
2356
2357 if (ch->bus != 0)
2358 return (0);
2359
2360 switch (ch->reason) {
2361 case MPI_EVT_SASCH_REASON_ADDED:
2362 case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
2363 KERNEL_LOCK();
2364 if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2365 printf("%s: unable to request attach of %d\n",
2366 DEVNAME(sc), ch->target);
2367 }
2368 KERNEL_UNLOCK();
2369 break;
2370
2371 case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
2372 KERNEL_LOCK();
2373 scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE);
2374 KERNEL_UNLOCK();
2375
2376 mtx_enter(&sc->sc_evt_scan_mtx);
2377 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link);
2378 mtx_leave(&sc->sc_evt_scan_mtx);
2379 scsi_ioh_add(&sc->sc_evt_scan_handler);
2380
2381 /* we'll handle event ack later on */
2382 return (1);
2383
2384 case MPI_EVT_SASCH_REASON_SMART_DATA:
2385 case MPI_EVT_SASCH_REASON_UNSUPPORTED:
2386 case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
2387 break;
2388 default:
2389 printf("%s: unknown reason for SAS device status change: "
2390 "0x%02x\n", DEVNAME(sc), ch->reason);
2391 break;
2392 }
2393
2394 return (0);
2395 }
2396
2397 void
2398 mpi_evt_sas_detach(void *cookie, void *io)
2399 {
2400 struct mpi_softc *sc = cookie;
2401 struct mpi_ccb *ccb = io;
2402 struct mpi_rcb *rcb, *next;
2403 struct mpi_msg_event_reply *enp;
2404 struct mpi_evt_sas_change *ch;
2405 struct mpi_msg_scsi_task_request *str;
2406
2407 DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2408
2409 mtx_enter(&sc->sc_evt_scan_mtx);
2410 rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue);
2411 if (rcb != NULL) {
2412 next = SIMPLEQ_NEXT(rcb, rcb_link);
2413 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link);
2414 }
2415 mtx_leave(&sc->sc_evt_scan_mtx);
2416
2417 if (rcb == NULL) {
2418 scsi_io_put(&sc->sc_iopool, ccb);
2419 return;
2420 }
2421
2422 enp = rcb->rcb_reply;
2423 ch = (struct mpi_evt_sas_change *)(enp + 1);
2424
2425 ccb->ccb_done = mpi_evt_sas_detach_done;
2426 str = ccb->ccb_cmd;
2427
2428 str->target_id = ch->target;
2429 str->bus = 0;
2430 str->function = MPI_FUNCTION_SCSI_TASK_MGMT;
2431
2432 str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET;
2433
2434 mpi_eventnotify_free(sc, rcb);
2435
2436 mpi_start(sc, ccb);
2437
2438 if (next != NULL)
2439 scsi_ioh_add(&sc->sc_evt_scan_handler);
2440 }
2441
2442 void
2443 mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2444 {
2445 struct mpi_softc *sc = ccb->ccb_sc;
2446 struct mpi_msg_scsi_task_reply *r = ccb->ccb_rcb->rcb_reply;
2447
2448 KERNEL_LOCK();
2449 if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2450 DETACH_FORCE) != 0) {
2451 printf("%s: unable to request detach of %d\n",
2452 DEVNAME(sc), r->target_id);
2453 }
2454 KERNEL_UNLOCK();
2455
2456 mpi_push_reply(sc, ccb->ccb_rcb);
2457 scsi_io_put(&sc->sc_iopool, ccb);
2458 }
2459
2460 void
2461 mpi_fc_rescan(void *xsc)
2462 {
2463 struct mpi_softc *sc = xsc;
2464 struct mpi_cfg_hdr hdr;
2465 struct mpi_cfg_fc_device_pg0 pg;
2466 struct scsi_link *link;
2467 u_int8_t devmap[256 / NBBY];
2468 u_int32_t id = 0xffffff;
2469 int i;
2470
2471 memset(devmap, 0, sizeof(devmap));
2472
2473 do {
2474 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0,
2475 id, 0, &hdr) != 0) {
2476 printf("%s: header get for rescan of 0x%08x failed\n",
2477 DEVNAME(sc), id);
2478 return;
2479 }
2480
2481 memset(&pg, 0, sizeof(pg));
2482 if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2483 break;
2484
2485 if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) &&
2486 pg.current_bus == 0)
2487 setbit(devmap, pg.current_target_id);
2488
2489 id = lemtoh32(&pg.port_id);
2490 } while (id <= 0xff0000);
2491
2492 for (i = 0; i < sc->sc_buswidth; i++) {
2493 link = scsi_get_link(sc->sc_scsibus, i, 0);
2494
2495 if (isset(devmap, i)) {
2496 if (link == NULL)
2497 scsi_probe_target(sc->sc_scsibus, i);
2498 } else {
2499 if (link != NULL) {
2500 scsi_activate(sc->sc_scsibus, i, -1,
2501 DVACT_DEACTIVATE);
2502 scsi_detach_target(sc->sc_scsibus, i,
2503 DETACH_FORCE);
2504 }
2505 }
2506 }
2507 }
2508
2509 void
2510 mpi_eventack(void *cookie, void *io)
2511 {
2512 struct mpi_softc *sc = cookie;
2513 struct mpi_ccb *ccb = io;
2514 struct mpi_rcb *rcb, *next;
2515 struct mpi_msg_event_reply *enp;
2516 struct mpi_msg_eventack_request *eaq;
2517
2518 DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2519
2520 mtx_enter(&sc->sc_evt_ack_mtx);
2521 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
2522 if (rcb != NULL) {
2523 next = SIMPLEQ_NEXT(rcb, rcb_link);
2524 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
2525 }
2526 mtx_leave(&sc->sc_evt_ack_mtx);
2527
2528 if (rcb == NULL) {
2529 scsi_io_put(&sc->sc_iopool, ccb);
2530 return;
2531 }
2532
2533 enp = rcb->rcb_reply;
2534
2535 ccb->ccb_done = mpi_eventack_done;
2536 eaq = ccb->ccb_cmd;
2537
2538 eaq->function = MPI_FUNCTION_EVENT_ACK;
2539
2540 eaq->event = enp->event;
2541 eaq->event_context = enp->event_context;
2542
2543 mpi_push_reply(sc, rcb);
2544 mpi_start(sc, ccb);
2545
2546 if (next != NULL)
2547 scsi_ioh_add(&sc->sc_evt_ack_handler);
2548 }
2549
2550 void
2551 mpi_eventack_done(struct mpi_ccb *ccb)
2552 {
2553 struct mpi_softc *sc = ccb->ccb_sc;
2554
2555 DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2556
2557 mpi_push_reply(sc, ccb->ccb_rcb);
2558 scsi_io_put(&sc->sc_iopool, ccb);
2559 }
2560
2561 int
2562 mpi_portenable(struct mpi_softc *sc)
2563 {
2564 struct mpi_ccb *ccb;
2565 struct mpi_msg_portenable_request *peq;
2566 int rv = 0;
2567
2568 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2569
2570 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2571 if (ccb == NULL) {
2572 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2573 DEVNAME(sc));
2574 return (1);
2575 }
2576
2577 ccb->ccb_done = mpi_empty_done;
2578 peq = ccb->ccb_cmd;
2579
2580 peq->function = MPI_FUNCTION_PORT_ENABLE;
2581 peq->port_number = 0;
2582
2583 if (mpi_poll(sc, ccb, 50000) != 0) {
2584 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2585 return (1);
2586 }
2587
2588 if (ccb->ccb_rcb == NULL) {
2589 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2590 DEVNAME(sc));
2591 rv = 1;
2592 } else
2593 mpi_push_reply(sc, ccb->ccb_rcb);
2594
2595 scsi_io_put(&sc->sc_iopool, ccb);
2596
2597 return (rv);
2598 }
2599
2600 int
2601 mpi_fwupload(struct mpi_softc *sc)
2602 {
2603 struct mpi_ccb *ccb;
2604 struct {
2605 struct mpi_msg_fwupload_request req;
2606 struct mpi_sge sge;
2607 } __packed *bundle;
2608 struct mpi_msg_fwupload_reply *upp;
2609 int rv = 0;
2610
2611 if (sc->sc_fw_len == 0)
2612 return (0);
2613
2614 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2615
2616 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2617 if (sc->sc_fw == NULL) {
2618 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2619 DEVNAME(sc), sc->sc_fw_len);
2620 return (1);
2621 }
2622
2623 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2624 if (ccb == NULL) {
2625 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2626 DEVNAME(sc));
2627 goto err;
2628 }
2629
2630 ccb->ccb_done = mpi_empty_done;
2631 bundle = ccb->ccb_cmd;
2632
2633 bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2634
2635 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2636
2637 bundle->req.tce.details_length = 12;
2638 htolem32(&bundle->req.tce.image_size, sc->sc_fw_len);
2639
2640 htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2641 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2642 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2643 mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw));
2644
2645 if (mpi_poll(sc, ccb, 50000) != 0) {
2646 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2647 goto err;
2648 }
2649
2650 if (ccb->ccb_rcb == NULL)
2651 panic("%s: unable to do fw upload", DEVNAME(sc));
2652 upp = ccb->ccb_rcb->rcb_reply;
2653
2654 if (lemtoh16(&upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2655 rv = 1;
2656
2657 mpi_push_reply(sc, ccb->ccb_rcb);
2658 scsi_io_put(&sc->sc_iopool, ccb);
2659
2660 return (rv);
2661
2662 err:
2663 mpi_dmamem_free(sc, sc->sc_fw);
2664 return (1);
2665 }
2666
2667 int
2668 mpi_manufacturing(struct mpi_softc *sc)
2669 {
2670 char board_name[33];
2671 struct mpi_cfg_hdr hdr;
2672 struct mpi_cfg_manufacturing_pg0 *pg;
2673 size_t pagelen;
2674 int rv = 1;
2675
2676 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING,
2677 0, 0, &hdr) != 0)
2678 return (1);
2679
2680 pagelen = hdr.page_length * 4; /* dwords to bytes */
2681 if (pagelen < sizeof(*pg))
2682 return (1);
2683
2684 pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2685 if (pg == NULL)
2686 return (1);
2687
2688 if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen) != 0)
2689 goto out;
2690
2691 scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name));
2692
2693 printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc), board_name,
2694 sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev);
2695
2696 rv = 0;
2697
2698 out:
2699 free(pg, M_TEMP, pagelen);
2700 return (rv);
2701 }
2702
2703 void
2704 mpi_get_raid(struct mpi_softc *sc)
2705 {
2706 struct mpi_cfg_hdr hdr;
2707 struct mpi_cfg_ioc_pg2 *vol_page;
2708 size_t pagelen;
2709 u_int32_t capabilities;
2710
2711 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2712
2713 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2714 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2715 "for IOC page 2\n", DEVNAME(sc));
2716 return;
2717 }
2718
2719 pagelen = hdr.page_length * 4; /* dwords to bytes */
2720 vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2721 if (vol_page == NULL) {
2722 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2723 "space for ioc config page 2\n", DEVNAME(sc));
2724 return;
2725 }
2726
2727 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2728 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2729 "page 2\n", DEVNAME(sc));
2730 goto out;
2731 }
2732
2733 capabilities = lemtoh32(&vol_page->capabilities);
2734
2735 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc),
2736 letoh32(vol_page->capabilities));
2737 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d "
2738 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2739 vol_page->active_vols, vol_page->max_vols,
2740 vol_page->active_physdisks, vol_page->max_physdisks);
2741
2742 /* don't walk list if there are no RAID capability */
2743 if (capabilities == 0xdeadbeef) {
2744 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2745 goto out;
2746 }
2747
2748 if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID))
2749 sc->sc_flags |= MPI_F_RAID;
2750
2751 out:
2752 free(vol_page, M_TEMP, pagelen);
2753 }
2754
2755 int
2756 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2757 u_int32_t address, int flags, void *p)
2758 {
2759 struct mpi_ccb *ccb;
2760 struct mpi_msg_config_request *cq;
2761 struct mpi_msg_config_reply *cp;
2762 struct mpi_cfg_hdr *hdr = p;
2763 struct mpi_ecfg_hdr *ehdr = p;
2764 int etype = 0;
2765 int rv = 0;
2766
2767 DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2768 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2769 address, flags, MPI_PG_FMT);
2770
2771 ccb = scsi_io_get(&sc->sc_iopool,
2772 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2773 if (ccb == NULL) {
2774 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2775 DEVNAME(sc));
2776 return (1);
2777 }
2778
2779 if (ISSET(flags, MPI_PG_EXTENDED)) {
2780 etype = type;
2781 type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2782 }
2783
2784 cq = ccb->ccb_cmd;
2785
2786 cq->function = MPI_FUNCTION_CONFIG;
2787
2788 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2789
2790 cq->config_header.page_number = number;
2791 cq->config_header.page_type = type;
2792 cq->ext_page_type = etype;
2793 htolem32(&cq->page_address, address);
2794 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2795 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2796
2797 ccb->ccb_done = mpi_empty_done;
2798 if (ISSET(flags, MPI_PG_POLL)) {
2799 if (mpi_poll(sc, ccb, 50000) != 0) {
2800 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2801 DEVNAME(sc));
2802 return (1);
2803 }
2804 } else
2805 mpi_wait(sc, ccb);
2806
2807 if (ccb->ccb_rcb == NULL)
2808 panic("%s: unable to fetch config header", DEVNAME(sc));
2809 cp = ccb->ccb_rcb->rcb_reply;
2810
2811 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: "
2812 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2813 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2814 "msg_flags: 0x%02x\n", DEVNAME(sc),
2815 letoh16(cp->ext_page_length), cp->ext_page_type,
2816 cp->msg_flags);
2817 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2818 letoh32(cp->msg_context));
2819 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2820 letoh16(cp->ioc_status));
2821 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2822 letoh32(cp->ioc_loginfo));
2823 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2824 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2825 cp->config_header.page_version,
2826 cp->config_header.page_length,
2827 cp->config_header.page_number,
2828 cp->config_header.page_type);
2829
2830 if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2831 rv = 1;
2832 else if (ISSET(flags, MPI_PG_EXTENDED)) {
2833 memset(ehdr, 0, sizeof(*ehdr));
2834 ehdr->page_version = cp->config_header.page_version;
2835 ehdr->page_number = cp->config_header.page_number;
2836 ehdr->page_type = cp->config_header.page_type;
2837 ehdr->ext_page_length = cp->ext_page_length;
2838 ehdr->ext_page_type = cp->ext_page_type;
2839 } else
2840 *hdr = cp->config_header;
2841
2842 mpi_push_reply(sc, ccb->ccb_rcb);
2843 scsi_io_put(&sc->sc_iopool, ccb);
2844
2845 return (rv);
2846 }
2847
2848 int
2849 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2850 void *p, int read, void *page, size_t len)
2851 {
2852 struct mpi_ccb *ccb;
2853 struct mpi_msg_config_request *cq;
2854 struct mpi_msg_config_reply *cp;
2855 struct mpi_cfg_hdr *hdr = p;
2856 struct mpi_ecfg_hdr *ehdr = p;
2857 char *kva;
2858 int page_length;
2859 int rv = 0;
2860
2861 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2862 DEVNAME(sc), address, read, hdr->page_type);
2863
2864 page_length = ISSET(flags, MPI_PG_EXTENDED) ?
2865 lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2866
2867 if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2868 len < page_length * 4)
2869 return (1);
2870
2871 ccb = scsi_io_get(&sc->sc_iopool,
2872 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2873 if (ccb == NULL) {
2874 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2875 return (1);
2876 }
2877
2878 cq = ccb->ccb_cmd;
2879
2880 cq->function = MPI_FUNCTION_CONFIG;
2881
2882 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2883 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2884
2885 if (ISSET(flags, MPI_PG_EXTENDED)) {
2886 cq->config_header.page_version = ehdr->page_version;
2887 cq->config_header.page_number = ehdr->page_number;
2888 cq->config_header.page_type = ehdr->page_type;
2889 cq->ext_page_len = ehdr->ext_page_length;
2890 cq->ext_page_type = ehdr->ext_page_type;
2891 } else
2892 cq->config_header = *hdr;
2893 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2894 htolem32(&cq->page_address, address);
2895 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2896 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2897 (page_length * 4) |
2898 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2899
2900 /* bounce the page via the request space to avoid more bus_dma games */
2901 mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2902 sizeof(struct mpi_msg_config_request));
2903
2904 kva = ccb->ccb_cmd;
2905 kva += sizeof(struct mpi_msg_config_request);
2906 if (!read)
2907 memcpy(kva, page, len);
2908
2909 ccb->ccb_done = mpi_empty_done;
2910 if (ISSET(flags, MPI_PG_POLL)) {
2911 if (mpi_poll(sc, ccb, 50000) != 0) {
2912 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2913 DEVNAME(sc));
2914 return (1);
2915 }
2916 } else
2917 mpi_wait(sc, ccb);
2918
2919 if (ccb->ccb_rcb == NULL) {
2920 scsi_io_put(&sc->sc_iopool, ccb);
2921 return (1);
2922 }
2923 cp = ccb->ccb_rcb->rcb_reply;
2924
2925 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: "
2926 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2927 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2928 "msg_flags: 0x%02x\n", DEVNAME(sc),
2929 letoh16(cp->ext_page_length), cp->ext_page_type,
2930 cp->msg_flags);
2931 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2932 letoh32(cp->msg_context));
2933 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2934 letoh16(cp->ioc_status));
2935 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2936 letoh32(cp->ioc_loginfo));
2937 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2938 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2939 cp->config_header.page_version,
2940 cp->config_header.page_length,
2941 cp->config_header.page_number,
2942 cp->config_header.page_type);
2943
2944 if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2945 rv = 1;
2946 else if (read)
2947 memcpy(page, kva, len);
2948
2949 mpi_push_reply(sc, ccb->ccb_rcb);
2950 scsi_io_put(&sc->sc_iopool, ccb);
2951
2952 return (rv);
2953 }
2954
2955 int
2956 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2957 {
2958 struct mpi_softc *sc = link->bus->sb_adapter_softc;
2959
2960 DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2961
2962 switch (cmd) {
2963 case DIOCGCACHE:
2964 case DIOCSCACHE:
2965 if (ISSET(link->flags, SDEV_VIRTUAL)) {
2966 return (mpi_ioctl_cache(link, cmd,
2967 (struct dk_cache *)addr));
2968 }
2969 break;
2970
2971 default:
2972 if (sc->sc_ioctl)
2973 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2974
2975 break;
2976 }
2977
2978 return (ENOTTY);
2979 }
2980
2981 int
2982 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2983 {
2984 struct mpi_softc *sc = link->bus->sb_adapter_softc;
2985 struct mpi_ccb *ccb;
2986 int len, rv;
2987 struct mpi_cfg_hdr hdr;
2988 struct mpi_cfg_raid_vol_pg0 *rpg0;
2989 int enabled;
2990 struct mpi_msg_raid_action_request *req;
2991 struct mpi_msg_raid_action_reply *rep;
2992 struct mpi_raid_settings settings;
2993
2994 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
2995 link->target, MPI_PG_POLL, &hdr);
2996 if (rv != 0)
2997 return (EIO);
2998
2999 len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
3000 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3001 rpg0 = malloc(len, M_TEMP, M_NOWAIT);
3002 if (rpg0 == NULL)
3003 return (ENOMEM);
3004
3005 if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1,
3006 rpg0, len) != 0) {
3007 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3008 DEVNAME(sc));
3009 rv = EIO;
3010 goto done;
3011 }
3012
3013 enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings),
3014 MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0;
3015
3016 if (cmd == DIOCGCACHE) {
3017 dc->wrcache = enabled;
3018 dc->rdcache = 0;
3019 goto done;
3020 } /* else DIOCSCACHE */
3021
3022 if (dc->rdcache) {
3023 rv = EOPNOTSUPP;
3024 goto done;
3025 }
3026
3027 if (((dc->wrcache) ? 1 : 0) == enabled)
3028 goto done;
3029
3030 settings = rpg0->settings;
3031 if (dc->wrcache) {
3032 SET(settings.volume_settings,
3033 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3034 } else {
3035 CLR(settings.volume_settings,
3036 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3037 }
3038
3039 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
3040 if (ccb == NULL) {
3041 rv = ENOMEM;
3042 goto done;
3043 }
3044
3045 req = ccb->ccb_cmd;
3046 req->function = MPI_FUNCTION_RAID_ACTION;
3047 req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS;
3048 req->vol_id = rpg0->volume_id;
3049 req->vol_bus = rpg0->volume_bus;
3050
3051 memcpy(&req->data_word, &settings, sizeof(req->data_word));
3052 ccb->ccb_done = mpi_empty_done;
3053 if (mpi_poll(sc, ccb, 50000) != 0) {
3054 rv = EIO;
3055 goto done;
3056 }
3057
3058 rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3059 if (rep == NULL)
3060 panic("%s: raid volume settings change failed", DEVNAME(sc));
3061
3062 switch (lemtoh16(&rep->action_status)) {
3063 case MPI_RAID_ACTION_STATUS_OK:
3064 rv = 0;
3065 break;
3066 default:
3067 rv = EIO;
3068 break;
3069 }
3070
3071 mpi_push_reply(sc, ccb->ccb_rcb);
3072 scsi_io_put(&sc->sc_iopool, ccb);
3073
3074 done:
3075 free(rpg0, M_TEMP, len);
3076 return (rv);
3077 }
3078
3079 #if NBIO > 0
3080 int
3081 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3082 {
3083 int len, rv = EINVAL;
3084 u_int32_t address;
3085 struct mpi_cfg_hdr hdr;
3086 struct mpi_cfg_raid_vol_pg0 *rpg0;
3087
3088 /* get IOC page 2 */
3089 if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3090 sc->sc_cfg_hdr.page_length * 4) != 0) {
3091 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3092 "fetch IOC page 2\n", DEVNAME(sc));
3093 goto done;
3094 }
3095
3096 /* XXX return something else than EINVAL to indicate within hs range */
3097 if (id > sc->sc_vol_page->active_vols) {
3098 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3099 "id: %d\n", DEVNAME(sc), id);
3100 goto done;
3101 }
3102
3103 /* replace current buffer with new one */
3104 len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3105 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3106 rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL);
3107 if (rpg0 == NULL) {
3108 printf("%s: can't get memory for RAID page 0, "
3109 "bio disabled\n", DEVNAME(sc));
3110 goto done;
3111 }
3112 if (sc->sc_rpg0)
3113 free(sc->sc_rpg0, M_DEVBUF, 0);
3114 sc->sc_rpg0 = rpg0;
3115
3116 /* get raid vol page 0 */
3117 address = sc->sc_vol_list[id].vol_id |
3118 (sc->sc_vol_list[id].vol_bus << 8);
3119 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3120 address, 0, &hdr) != 0)
3121 goto done;
3122 if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3123 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3124 DEVNAME(sc));
3125 goto done;
3126 }
3127
3128 rv = 0;
3129 done:
3130 return (rv);
3131 }
3132
3133 int
3134 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3135 {
3136 struct mpi_softc *sc = (struct mpi_softc *)dev;
3137 int error = 0;
3138
3139 DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3140
3141 /* make sure we have bio enabled */
3142 if (sc->sc_ioctl != mpi_ioctl)
3143 return (EINVAL);
3144
3145 rw_enter_write(&sc->sc_lock);
3146
3147 switch (cmd) {
3148 case BIOCINQ:
3149 DNPRINTF(MPI_D_IOCTL, "inq\n");
3150 error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3151 break;
3152
3153 case BIOCVOL:
3154 DNPRINTF(MPI_D_IOCTL, "vol\n");
3155 error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3156 break;
3157
3158 case BIOCDISK:
3159 DNPRINTF(MPI_D_IOCTL, "disk\n");
3160 error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3161 break;
3162
3163 case BIOCALARM:
3164 DNPRINTF(MPI_D_IOCTL, "alarm\n");
3165 break;
3166
3167 case BIOCBLINK:
3168 DNPRINTF(MPI_D_IOCTL, "blink\n");
3169 break;
3170
3171 case BIOCSETSTATE:
3172 DNPRINTF(MPI_D_IOCTL, "setstate\n");
3173 error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3174 break;
3175
3176 default:
3177 DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3178 error = ENOTTY;
3179 }
3180
3181 rw_exit_write(&sc->sc_lock);
3182
3183 return (error);
3184 }
3185
3186 int
3187 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3188 {
3189 if (!(sc->sc_flags & MPI_F_RAID)) {
3190 bi->bi_novol = 0;
3191 bi->bi_nodisk = 0;
3192 }
3193
3194 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3195 sc->sc_cfg_hdr.page_length * 4) != 0) {
3196 DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3197 "page 2\n", DEVNAME(sc));
3198 return (EINVAL);
3199 }
3200
3201 DNPRINTF(MPI_D_IOCTL, "%s: active_vols: %d max_vols: %d "
3202 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3203 sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3204 sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3205
3206 bi->bi_novol = sc->sc_vol_page->active_vols;
3207 bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3208 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3209
3210 return (0);
3211 }
3212
3213 int
3214 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3215 {
3216 int i, vol, id, rv = EINVAL;
3217 struct device *dev;
3218 struct scsi_link *link;
3219 struct mpi_cfg_raid_vol_pg0 *rpg0;
3220 char *vendp;
3221
3222 id = bv->bv_volid;
3223 if (mpi_bio_get_pg0_raid(sc, id))
3224 goto done;
3225
3226 if (id > sc->sc_vol_page->active_vols)
3227 return (EINVAL); /* XXX deal with hot spares */
3228
3229 rpg0 = sc->sc_rpg0;
3230 if (rpg0 == NULL)
3231 goto done;
3232
3233 /* determine status */
3234 switch (rpg0->volume_state) {
3235 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3236 bv->bv_status = BIOC_SVONLINE;
3237 break;
3238 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3239 bv->bv_status = BIOC_SVDEGRADED;
3240 break;
3241 case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3242 case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3243 bv->bv_status = BIOC_SVOFFLINE;
3244 break;
3245 default:
3246 bv->bv_status = BIOC_SVINVALID;
3247 }
3248
3249 /* override status if scrubbing or something */
3250 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING)
3251 bv->bv_status = BIOC_SVREBUILD;
3252
3253 bv->bv_size = (uint64_t)lemtoh32(&rpg0->max_lba) * 512;
3254
3255 switch (sc->sc_vol_list[id].vol_type) {
3256 case MPI_CFG_RAID_TYPE_RAID_IS:
3257 bv->bv_level = 0;
3258 break;
3259 case MPI_CFG_RAID_TYPE_RAID_IME:
3260 case MPI_CFG_RAID_TYPE_RAID_IM:
3261 bv->bv_level = 1;
3262 break;
3263 case MPI_CFG_RAID_TYPE_RAID_5:
3264 bv->bv_level = 5;
3265 break;
3266 case MPI_CFG_RAID_TYPE_RAID_6:
3267 bv->bv_level = 6;
3268 break;
3269 case MPI_CFG_RAID_TYPE_RAID_10:
3270 bv->bv_level = 10;
3271 break;
3272 case MPI_CFG_RAID_TYPE_RAID_50:
3273 bv->bv_level = 50;
3274 break;
3275 default:
3276 bv->bv_level = -1;
3277 }
3278
3279 bv->bv_nodisk = rpg0->num_phys_disks;
3280
3281 for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3282 link = scsi_get_link(sc->sc_scsibus, i, 0);
3283 if (link == NULL)
3284 continue;
3285
3286 /* skip if not a virtual disk */
3287 if (!(link->flags & SDEV_VIRTUAL))
3288 continue;
3289
3290 vol++;
3291 /* are we it? */
3292 if (vol == bv->bv_volid) {
3293 dev = link->device_softc;
3294 vendp = link->inqdata.vendor;
3295 memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor);
3296 bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3297 strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3298 break;
3299 }
3300 }
3301 rv = 0;
3302 done:
3303 return (rv);
3304 }
3305
3306 int
3307 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3308 {
3309 int pdid, id, rv = EINVAL;
3310 u_int32_t address;
3311 struct mpi_cfg_hdr hdr;
3312 struct mpi_cfg_raid_vol_pg0 *rpg0;
3313 struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3314 struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3315
3316 id = bd->bd_volid;
3317 if (mpi_bio_get_pg0_raid(sc, id))
3318 goto done;
3319
3320 if (id > sc->sc_vol_page->active_vols)
3321 return (EINVAL); /* XXX deal with hot spares */
3322
3323 rpg0 = sc->sc_rpg0;
3324 if (rpg0 == NULL)
3325 goto done;
3326
3327 pdid = bd->bd_diskid;
3328 if (pdid > rpg0->num_phys_disks)
3329 goto done;
3330 physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3331 physdisk += pdid;
3332
3333 /* get raid phys disk page 0 */
3334 address = physdisk->phys_disk_num;
3335 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,
3336 &hdr) != 0)
3337 goto done;
3338 if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) {
3339 bd->bd_status = BIOC_SDFAILED;
3340 return (0);
3341 }
3342 bd->bd_channel = pdpg0.phys_disk_bus;
3343 bd->bd_target = pdpg0.phys_disk_id;
3344 bd->bd_lun = 0;
3345 bd->bd_size = (uint64_t)lemtoh32(&pdpg0.max_lba) * 512;
3346 strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3347
3348 switch (pdpg0.phys_disk_state) {
3349 case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3350 bd->bd_status = BIOC_SDONLINE;
3351 break;
3352 case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING:
3353 case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED:
3354 bd->bd_status = BIOC_SDFAILED;
3355 break;
3356 case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL:
3357 case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER:
3358 case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3359 bd->bd_status = BIOC_SDOFFLINE;
3360 break;
3361 case MPI_CFG_RAID_PHYDISK_0_STATE_INIT:
3362 bd->bd_status = BIOC_SDSCRUB;
3363 break;
3364 case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT:
3365 default:
3366 bd->bd_status = BIOC_SDINVALID;
3367 break;
3368 }
3369
3370 /* XXX figure this out */
3371 /* bd_serial[32]; */
3372 /* bd_procdev[16]; */
3373
3374 rv = 0;
3375 done:
3376 return (rv);
3377 }
3378
3379 int
3380 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3381 {
3382 return (ENOTTY);
3383 }
3384
3385 #ifndef SMALL_KERNEL
3386 int
3387 mpi_create_sensors(struct mpi_softc *sc)
3388 {
3389 struct device *dev;
3390 struct scsi_link *link;
3391 int i, vol, nsensors;
3392
3393 /* count volumes */
3394 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3395 link = scsi_get_link(sc->sc_scsibus, i, 0);
3396 if (link == NULL)
3397 continue;
3398 /* skip if not a virtual disk */
3399 if (!(link->flags & SDEV_VIRTUAL))
3400 continue;
3401
3402 vol++;
3403 }
3404 if (vol == 0)
3405 return (0);
3406
3407 sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor),
3408 M_DEVBUF, M_NOWAIT | M_ZERO);
3409 if (sc->sc_sensors == NULL)
3410 return (1);
3411 nsensors = vol;
3412
3413 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3414 sizeof(sc->sc_sensordev.xname));
3415
3416 for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3417 link = scsi_get_link(sc->sc_scsibus, i, 0);
3418 if (link == NULL)
3419 continue;
3420 /* skip if not a virtual disk */
3421 if (!(link->flags & SDEV_VIRTUAL))
3422 continue;
3423
3424 dev = link->device_softc;
3425 strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3426 sizeof(sc->sc_sensors[vol].desc));
3427 sc->sc_sensors[vol].type = SENSOR_DRIVE;
3428 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3429 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3430
3431 vol++;
3432 }
3433
3434 if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL)
3435 goto bad;
3436
3437 sensordev_install(&sc->sc_sensordev);
3438
3439 return (0);
3440
3441 bad:
3442 free(sc->sc_sensors, M_DEVBUF, nsensors * sizeof(struct ksensor));
3443 return (1);
3444 }
3445
3446 void
3447 mpi_refresh_sensors(void *arg)
3448 {
3449 int i, vol;
3450 struct scsi_link *link;
3451 struct mpi_softc *sc = arg;
3452 struct mpi_cfg_raid_vol_pg0 *rpg0;
3453
3454 rw_enter_write(&sc->sc_lock);
3455
3456 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3457 link = scsi_get_link(sc->sc_scsibus, i, 0);
3458 if (link == NULL)
3459 continue;
3460 /* skip if not a virtual disk */
3461 if (!(link->flags & SDEV_VIRTUAL))
3462 continue;
3463
3464 if (mpi_bio_get_pg0_raid(sc, vol))
3465 continue;
3466
3467 rpg0 = sc->sc_rpg0;
3468 if (rpg0 == NULL)
3469 goto done;
3470
3471 /* determine status */
3472 switch (rpg0->volume_state) {
3473 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3474 sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE;
3475 sc->sc_sensors[vol].status = SENSOR_S_OK;
3476 break;
3477 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3478 sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL;
3479 sc->sc_sensors[vol].status = SENSOR_S_WARN;
3480 break;
3481 case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3482 case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3483 sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL;
3484 sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3485 break;
3486 default:
3487 sc->sc_sensors[vol].value = 0; /* unknown */
3488 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3489 }
3490
3491 /* override status if scrubbing or something */
3492 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) {
3493 sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD;
3494 sc->sc_sensors[vol].status = SENSOR_S_WARN;
3495 }
3496
3497 vol++;
3498 }
3499 done:
3500 rw_exit_write(&sc->sc_lock);
3501 }
3502 #endif /* SMALL_KERNEL */
3503 #endif /* NBIO > 0 */
Cache object: 90ad0fdfb737bbf2d5512d321bb47aee
|