FreeBSD/Linux Kernel Cross Reference
sys/dev/i2o/iop.c
1 /* $NetBSD: iop.c,v 1.42 2003/12/09 19:43:54 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.42 2003/12/09 19:43:54 ad Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 dev_type_open(iopopen);
111 dev_type_close(iopclose);
112 dev_type_ioctl(iopioctl);
113
114 const struct cdevsw iop_cdevsw = {
115 iopopen, iopclose, noread, nowrite, iopioctl,
116 nostop, notty, nopoll, nommap, nokqfilter,
117 };
118
119 #define IC_CONFIGURE 0x01
120 #define IC_PRIORITY 0x02
121
122 struct iop_class {
123 u_short ic_class;
124 u_short ic_flags;
125 #ifdef I2OVERBOSE
126 const char *ic_caption;
127 #endif
128 } static const iop_class[] = {
129 {
130 I2O_CLASS_EXECUTIVE,
131 0,
132 IFVERBOSE("executive")
133 },
134 {
135 I2O_CLASS_DDM,
136 0,
137 COMMENT("device driver module")
138 },
139 {
140 I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 IC_CONFIGURE | IC_PRIORITY,
142 IFVERBOSE("random block storage")
143 },
144 {
145 I2O_CLASS_SEQUENTIAL_STORAGE,
146 IC_CONFIGURE | IC_PRIORITY,
147 IFVERBOSE("sequential storage")
148 },
149 {
150 I2O_CLASS_LAN,
151 IC_CONFIGURE | IC_PRIORITY,
152 IFVERBOSE("LAN port")
153 },
154 {
155 I2O_CLASS_WAN,
156 IC_CONFIGURE | IC_PRIORITY,
157 IFVERBOSE("WAN port")
158 },
159 {
160 I2O_CLASS_FIBRE_CHANNEL_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("fibrechannel port")
163 },
164 {
165 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 0,
167 COMMENT("fibrechannel peripheral")
168 },
169 {
170 I2O_CLASS_SCSI_PERIPHERAL,
171 0,
172 COMMENT("SCSI peripheral")
173 },
174 {
175 I2O_CLASS_ATE_PORT,
176 IC_CONFIGURE,
177 IFVERBOSE("ATE port")
178 },
179 {
180 I2O_CLASS_ATE_PERIPHERAL,
181 0,
182 COMMENT("ATE peripheral")
183 },
184 {
185 I2O_CLASS_FLOPPY_CONTROLLER,
186 IC_CONFIGURE,
187 IFVERBOSE("floppy controller")
188 },
189 {
190 I2O_CLASS_FLOPPY_DEVICE,
191 0,
192 COMMENT("floppy device")
193 },
194 {
195 I2O_CLASS_BUS_ADAPTER_PORT,
196 IC_CONFIGURE,
197 IFVERBOSE("bus adapter port" )
198 },
199 };
200
201 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 static const char * const iop_status[] = {
203 "success",
204 "abort (dirty)",
205 "abort (no data transfer)",
206 "abort (partial transfer)",
207 "error (dirty)",
208 "error (no data transfer)",
209 "error (partial transfer)",
210 "undefined error code",
211 "process abort (dirty)",
212 "process abort (no data transfer)",
213 "process abort (partial transfer)",
214 "transaction error",
215 };
216 #endif
217
218 static inline u_int32_t iop_inl(struct iop_softc *, int);
219 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220
221 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
222 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
223
224 static void iop_config_interrupts(struct device *);
225 static void iop_configure_devices(struct iop_softc *, int, int);
226 static void iop_devinfo(int, char *);
227 static int iop_print(void *, const char *);
228 static void iop_shutdown(void *);
229 static int iop_submatch(struct device *, struct cfdata *, void *);
230
231 static void iop_adjqparam(struct iop_softc *, int);
232 static void iop_create_reconf_thread(void *);
233 static int iop_handle_reply(struct iop_softc *, u_int32_t);
234 static int iop_hrt_get(struct iop_softc *);
235 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
236 static void iop_intr_event(struct device *, struct iop_msg *, void *);
237 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
238 u_int32_t);
239 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
240 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
241 static int iop_ofifo_init(struct iop_softc *);
242 static int iop_passthrough(struct iop_softc *, struct ioppt *,
243 struct proc *);
244 static void iop_reconf_thread(void *);
245 static void iop_release_mfa(struct iop_softc *, u_int32_t);
246 static int iop_reset(struct iop_softc *);
247 static int iop_sys_enable(struct iop_softc *);
248 static int iop_systab_set(struct iop_softc *);
249 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
250
251 #ifdef I2ODEBUG
252 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
253 #endif
254
255 static inline u_int32_t
256 iop_inl(struct iop_softc *sc, int off)
257 {
258
259 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
261 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
262 }
263
264 static inline void
265 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
266 {
267
268 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
269 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
270 BUS_SPACE_BARRIER_WRITE);
271 }
272
273 static inline u_int32_t
274 iop_inl_msg(struct iop_softc *sc, int off)
275 {
276
277 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
278 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
279 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
280 }
281
282 static inline void
283 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
284 {
285
286 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
287 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
288 BUS_SPACE_BARRIER_WRITE);
289 }
290
291 /*
292 * Initialise the IOP and our interface.
293 */
294 void
295 iop_init(struct iop_softc *sc, const char *intrstr)
296 {
297 struct iop_msg *im;
298 int rv, i, j, state, nsegs;
299 u_int32_t mask;
300 char ident[64];
301
302 state = 0;
303
304 printf("I2O adapter");
305
306 if (iop_ictxhashtbl == NULL)
307 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
308 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
309
310 /* Disable interrupts at the IOP. */
311 mask = iop_inl(sc, IOP_REG_INTR_MASK);
312 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
313
314 /* Allocate a scratch DMA map for small miscellaneous shared data. */
315 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
316 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
317 printf("%s: cannot create scratch dmamap\n",
318 sc->sc_dv.dv_xname);
319 return;
320 }
321
322 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
323 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
324 printf("%s: cannot alloc scratch dmamem\n",
325 sc->sc_dv.dv_xname);
326 goto bail_out;
327 }
328 state++;
329
330 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
331 &sc->sc_scr, 0)) {
332 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
333 goto bail_out;
334 }
335 state++;
336
337 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
338 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
339 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
340 goto bail_out;
341 }
342 state++;
343
344 #ifdef I2ODEBUG
345 /* So that our debug checks don't choke. */
346 sc->sc_framesize = 128;
347 #endif
348
349 /* Reset the adapter and request status. */
350 if ((rv = iop_reset(sc)) != 0) {
351 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
352 goto bail_out;
353 }
354
355 if ((rv = iop_status_get(sc, 1)) != 0) {
356 printf("%s: not responding (get status)\n",
357 sc->sc_dv.dv_xname);
358 goto bail_out;
359 }
360
361 sc->sc_flags |= IOP_HAVESTATUS;
362 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
363 ident, sizeof(ident));
364 printf(" <%s>\n", ident);
365
366 #ifdef I2ODEBUG
367 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
368 le16toh(sc->sc_status.orgid),
369 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
370 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
371 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
372 le32toh(sc->sc_status.desiredprivmemsize),
373 le32toh(sc->sc_status.currentprivmemsize),
374 le32toh(sc->sc_status.currentprivmembase));
375 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
376 le32toh(sc->sc_status.desiredpriviosize),
377 le32toh(sc->sc_status.currentpriviosize),
378 le32toh(sc->sc_status.currentpriviobase));
379 #endif
380
381 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
382 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
383 sc->sc_maxob = IOP_MAX_OUTBOUND;
384 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
385 if (sc->sc_maxib > IOP_MAX_INBOUND)
386 sc->sc_maxib = IOP_MAX_INBOUND;
387 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
388 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
389 sc->sc_framesize = IOP_MAX_MSG_SIZE;
390
391 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
392 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
393 printf("%s: frame size too small (%d)\n",
394 sc->sc_dv.dv_xname, sc->sc_framesize);
395 goto bail_out;
396 }
397 #endif
398
399 /* Allocate message wrappers. */
400 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
401 if (im == NULL) {
402 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
403 goto bail_out;
404 }
405 state++;
406 sc->sc_ims = im;
407 SLIST_INIT(&sc->sc_im_freelist);
408
409 for (i = 0; i < sc->sc_maxib; i++, im++) {
410 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
411 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
412 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
413 &im->im_xfer[0].ix_map);
414 if (rv != 0) {
415 printf("%s: couldn't create dmamap (%d)",
416 sc->sc_dv.dv_xname, rv);
417 goto bail_out3;
418 }
419
420 im->im_tctx = i;
421 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
422 }
423
424 /* Initialise the IOP's outbound FIFO. */
425 if (iop_ofifo_init(sc) != 0) {
426 printf("%s: unable to init oubound FIFO\n",
427 sc->sc_dv.dv_xname);
428 goto bail_out3;
429 }
430
431 /*
432 * Defer further configuration until (a) interrupts are working and
433 * (b) we have enough information to build the system table.
434 */
435 config_interrupts((struct device *)sc, iop_config_interrupts);
436
437 /* Configure shutdown hook before we start any device activity. */
438 if (iop_sdh == NULL)
439 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
440
441 /* Ensure interrupts are enabled at the IOP. */
442 mask = iop_inl(sc, IOP_REG_INTR_MASK);
443 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
444
445 if (intrstr != NULL)
446 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
447 intrstr);
448
449 #ifdef I2ODEBUG
450 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
451 sc->sc_dv.dv_xname, sc->sc_maxib,
452 le32toh(sc->sc_status.maxinboundmframes),
453 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
454 #endif
455
456 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
457 return;
458
459 bail_out3:
460 if (state > 3) {
461 for (j = 0; j < i; j++)
462 bus_dmamap_destroy(sc->sc_dmat,
463 sc->sc_ims[j].im_xfer[0].ix_map);
464 free(sc->sc_ims, M_DEVBUF);
465 }
466 bail_out:
467 if (state > 2)
468 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
469 if (state > 1)
470 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
471 if (state > 0)
472 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
473 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
474 }
475
476 /*
477 * Perform autoconfiguration tasks.
478 */
479 static void
480 iop_config_interrupts(struct device *self)
481 {
482 struct iop_attach_args ia;
483 struct iop_softc *sc, *iop;
484 struct i2o_systab_entry *ste;
485 int rv, i, niop;
486
487 sc = (struct iop_softc *)self;
488 LIST_INIT(&sc->sc_iilist);
489
490 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
491
492 if (iop_hrt_get(sc) != 0) {
493 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
494 return;
495 }
496
497 /*
498 * Build the system table.
499 */
500 if (iop_systab == NULL) {
501 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
502 if ((iop = device_lookup(&iop_cd, i)) == NULL)
503 continue;
504 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
505 continue;
506 if (iop_status_get(iop, 1) != 0) {
507 printf("%s: unable to retrieve status\n",
508 sc->sc_dv.dv_xname);
509 iop->sc_flags &= ~IOP_HAVESTATUS;
510 continue;
511 }
512 niop++;
513 }
514 if (niop == 0)
515 return;
516
517 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
518 sizeof(struct i2o_systab);
519 iop_systab_size = i;
520 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
521
522 iop_systab->numentries = niop;
523 iop_systab->version = I2O_VERSION_11;
524
525 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
526 if ((iop = device_lookup(&iop_cd, i)) == NULL)
527 continue;
528 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
529 continue;
530
531 ste->orgid = iop->sc_status.orgid;
532 ste->iopid = iop->sc_dv.dv_unit + 2;
533 ste->segnumber =
534 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
535 ste->iopcaps = iop->sc_status.iopcaps;
536 ste->inboundmsgframesize =
537 iop->sc_status.inboundmframesize;
538 ste->inboundmsgportaddresslow =
539 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
540 ste++;
541 }
542 }
543
544 /*
545 * Post the system table to the IOP and bring it to the OPERATIONAL
546 * state.
547 */
548 if (iop_systab_set(sc) != 0) {
549 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
550 return;
551 }
552 if (iop_sys_enable(sc) != 0) {
553 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
554 return;
555 }
556
557 /*
558 * Set up an event handler for this IOP.
559 */
560 sc->sc_eventii.ii_dv = self;
561 sc->sc_eventii.ii_intr = iop_intr_event;
562 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
563 sc->sc_eventii.ii_tid = I2O_TID_IOP;
564 iop_initiator_register(sc, &sc->sc_eventii);
565
566 rv = iop_util_eventreg(sc, &sc->sc_eventii,
567 I2O_EVENT_EXEC_RESOURCE_LIMITS |
568 I2O_EVENT_EXEC_CONNECTION_FAIL |
569 I2O_EVENT_EXEC_ADAPTER_FAULT |
570 I2O_EVENT_EXEC_POWER_FAIL |
571 I2O_EVENT_EXEC_RESET_PENDING |
572 I2O_EVENT_EXEC_RESET_IMMINENT |
573 I2O_EVENT_EXEC_HARDWARE_FAIL |
574 I2O_EVENT_EXEC_XCT_CHANGE |
575 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
576 I2O_EVENT_GEN_DEVICE_RESET |
577 I2O_EVENT_GEN_STATE_CHANGE |
578 I2O_EVENT_GEN_GENERAL_WARNING);
579 if (rv != 0) {
580 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
581 return;
582 }
583
584 /*
585 * Attempt to match and attach a product-specific extension.
586 */
587 ia.ia_class = I2O_CLASS_ANY;
588 ia.ia_tid = I2O_TID_IOP;
589 config_found_sm(self, &ia, iop_print, iop_submatch);
590
591 /*
592 * Start device configuration.
593 */
594 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
595 if ((rv = iop_reconfigure(sc, 0)) == -1) {
596 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
597 return;
598 }
599 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
600
601 kthread_create(iop_create_reconf_thread, sc);
602 }
603
604 /*
605 * Create the reconfiguration thread. Called after the standard kernel
606 * threads have been created.
607 */
608 static void
609 iop_create_reconf_thread(void *cookie)
610 {
611 struct iop_softc *sc;
612 int rv;
613
614 sc = cookie;
615 sc->sc_flags |= IOP_ONLINE;
616
617 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
618 "%s", sc->sc_dv.dv_xname);
619 if (rv != 0) {
620 printf("%s: unable to create reconfiguration thread (%d)",
621 sc->sc_dv.dv_xname, rv);
622 return;
623 }
624 }
625
626 /*
627 * Reconfiguration thread; listens for LCT change notification, and
628 * initiates re-configuration if received.
629 */
630 static void
631 iop_reconf_thread(void *cookie)
632 {
633 struct iop_softc *sc;
634 struct lwp *l;
635 struct i2o_lct lct;
636 u_int32_t chgind;
637 int rv;
638
639 sc = cookie;
640 chgind = sc->sc_chgind + 1;
641 l = curlwp;
642
643 for (;;) {
644 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
645 sc->sc_dv.dv_xname, chgind));
646
647 PHOLD(l);
648 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
649 PRELE(l);
650
651 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
652 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
653
654 if (rv == 0 &&
655 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
656 iop_reconfigure(sc, le32toh(lct.changeindicator));
657 chgind = sc->sc_chgind + 1;
658 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
659 }
660
661 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
662 }
663 }
664
665 /*
666 * Reconfigure: find new and removed devices.
667 */
668 int
669 iop_reconfigure(struct iop_softc *sc, u_int chgind)
670 {
671 struct iop_msg *im;
672 struct i2o_hba_bus_scan mf;
673 struct i2o_lct_entry *le;
674 struct iop_initiator *ii, *nextii;
675 int rv, tid, i;
676
677 /*
678 * If the reconfiguration request isn't the result of LCT change
679 * notification, then be more thorough: ask all bus ports to scan
680 * their busses. Wait up to 5 minutes for each bus port to complete
681 * the request.
682 */
683 if (chgind == 0) {
684 if ((rv = iop_lct_get(sc)) != 0) {
685 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
686 return (rv);
687 }
688
689 le = sc->sc_lct->entry;
690 for (i = 0; i < sc->sc_nlctent; i++, le++) {
691 if ((le16toh(le->classid) & 4095) !=
692 I2O_CLASS_BUS_ADAPTER_PORT)
693 continue;
694 tid = le16toh(le->localtid) & 4095;
695
696 im = iop_msg_alloc(sc, IM_WAIT);
697
698 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
699 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
700 mf.msgictx = IOP_ICTX;
701 mf.msgtctx = im->im_tctx;
702
703 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
704 tid));
705
706 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
707 iop_msg_free(sc, im);
708 #ifdef I2ODEBUG
709 if (rv != 0)
710 printf("%s: bus scan failed\n",
711 sc->sc_dv.dv_xname);
712 #endif
713 }
714 } else if (chgind <= sc->sc_chgind) {
715 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
716 return (0);
717 }
718
719 /* Re-read the LCT and determine if it has changed. */
720 if ((rv = iop_lct_get(sc)) != 0) {
721 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
722 return (rv);
723 }
724 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
725
726 chgind = le32toh(sc->sc_lct->changeindicator);
727 if (chgind == sc->sc_chgind) {
728 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
729 return (0);
730 }
731 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
732 sc->sc_chgind = chgind;
733
734 if (sc->sc_tidmap != NULL)
735 free(sc->sc_tidmap, M_DEVBUF);
736 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
737 M_DEVBUF, M_NOWAIT|M_ZERO);
738
739 /* Allow 1 queued command per device while we're configuring. */
740 iop_adjqparam(sc, 1);
741
742 /*
743 * Match and attach child devices. We configure high-level devices
744 * first so that any claims will propagate throughout the LCT,
745 * hopefully masking off aliased devices as a result.
746 *
747 * Re-reading the LCT at this point is a little dangerous, but we'll
748 * trust the IOP (and the operator) to behave itself...
749 */
750 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
751 IC_CONFIGURE | IC_PRIORITY);
752 if ((rv = iop_lct_get(sc)) != 0)
753 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
754 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
755 IC_CONFIGURE);
756
757 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
758 nextii = LIST_NEXT(ii, ii_list);
759
760 /* Detach devices that were configured, but are now gone. */
761 for (i = 0; i < sc->sc_nlctent; i++)
762 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
763 break;
764 if (i == sc->sc_nlctent ||
765 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
766 config_detach(ii->ii_dv, DETACH_FORCE);
767
768 /*
769 * Tell initiators that existed before the re-configuration
770 * to re-configure.
771 */
772 if (ii->ii_reconfig == NULL)
773 continue;
774 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
775 printf("%s: %s failed reconfigure (%d)\n",
776 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
777 }
778
779 /* Re-adjust queue parameters and return. */
780 if (sc->sc_nii != 0)
781 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
782 / sc->sc_nii);
783
784 return (0);
785 }
786
787 /*
788 * Configure I2O devices into the system.
789 */
790 static void
791 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
792 {
793 struct iop_attach_args ia;
794 struct iop_initiator *ii;
795 const struct i2o_lct_entry *le;
796 struct device *dv;
797 int i, j, nent;
798 u_int usertid;
799
800 nent = sc->sc_nlctent;
801 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
802 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
803
804 /* Ignore the device if it's in use. */
805 usertid = le32toh(le->usertid) & 4095;
806 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
807 continue;
808
809 ia.ia_class = le16toh(le->classid) & 4095;
810 ia.ia_tid = sc->sc_tidmap[i].it_tid;
811
812 /* Ignore uninteresting devices. */
813 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
814 if (iop_class[j].ic_class == ia.ia_class)
815 break;
816 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
817 (iop_class[j].ic_flags & mask) != maskval)
818 continue;
819
820 /*
821 * Try to configure the device only if it's not already
822 * configured.
823 */
824 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
825 if (ia.ia_tid == ii->ii_tid) {
826 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
827 strcpy(sc->sc_tidmap[i].it_dvname,
828 ii->ii_dv->dv_xname);
829 break;
830 }
831 }
832 if (ii != NULL)
833 continue;
834
835 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
836 if (dv != NULL) {
837 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
838 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
839 }
840 }
841 }
842
843 /*
844 * Adjust queue parameters for all child devices.
845 */
846 static void
847 iop_adjqparam(struct iop_softc *sc, int mpi)
848 {
849 struct iop_initiator *ii;
850
851 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
852 if (ii->ii_adjqparam != NULL)
853 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
854 }
855
856 static void
857 iop_devinfo(int class, char *devinfo)
858 {
859 #ifdef I2OVERBOSE
860 int i;
861
862 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
863 if (class == iop_class[i].ic_class)
864 break;
865
866 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
867 sprintf(devinfo, "device (class 0x%x)", class);
868 else
869 strcpy(devinfo, iop_class[i].ic_caption);
870 #else
871
872 sprintf(devinfo, "device (class 0x%x)", class);
873 #endif
874 }
875
876 static int
877 iop_print(void *aux, const char *pnp)
878 {
879 struct iop_attach_args *ia;
880 char devinfo[256];
881
882 ia = aux;
883
884 if (pnp != NULL) {
885 iop_devinfo(ia->ia_class, devinfo);
886 aprint_normal("%s at %s", devinfo, pnp);
887 }
888 aprint_normal(" tid %d", ia->ia_tid);
889 return (UNCONF);
890 }
891
892 static int
893 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
894 {
895 struct iop_attach_args *ia;
896
897 ia = aux;
898
899 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
900 return (0);
901
902 return (config_match(parent, cf, aux));
903 }
904
905 /*
906 * Shut down all configured IOPs.
907 */
908 static void
909 iop_shutdown(void *junk)
910 {
911 struct iop_softc *sc;
912 int i;
913
914 printf("shutting down iop devices...");
915
916 for (i = 0; i < iop_cd.cd_ndevs; i++) {
917 if ((sc = device_lookup(&iop_cd, i)) == NULL)
918 continue;
919 if ((sc->sc_flags & IOP_ONLINE) == 0)
920 continue;
921
922 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
923 0, 5000);
924
925 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
926 /*
927 * Some AMI firmware revisions will go to sleep and
928 * never come back after this.
929 */
930 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
931 IOP_ICTX, 0, 1000);
932 }
933 }
934
935 /* Wait. Some boards could still be flushing, stupidly enough. */
936 delay(5000*1000);
937 printf(" done\n");
938 }
939
940 /*
941 * Retrieve IOP status.
942 */
943 int
944 iop_status_get(struct iop_softc *sc, int nosleep)
945 {
946 struct i2o_exec_status_get mf;
947 struct i2o_status *st;
948 paddr_t pa;
949 int rv, i;
950
951 pa = sc->sc_scr_seg->ds_addr;
952 st = (struct i2o_status *)sc->sc_scr;
953
954 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
955 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
956 mf.reserved[0] = 0;
957 mf.reserved[1] = 0;
958 mf.reserved[2] = 0;
959 mf.reserved[3] = 0;
960 mf.addrlow = (u_int32_t)pa;
961 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
962 mf.length = sizeof(sc->sc_status);
963
964 memset(st, 0, sizeof(*st));
965 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
966 BUS_DMASYNC_PREREAD);
967
968 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
969 return (rv);
970
971 for (i = 25; i != 0; i--) {
972 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
973 sizeof(*st), BUS_DMASYNC_POSTREAD);
974 if (st->syncbyte == 0xff)
975 break;
976 if (nosleep)
977 DELAY(100*1000);
978 else
979 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
980 }
981
982 if (st->syncbyte != 0xff) {
983 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
984 rv = EIO;
985 } else {
986 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
987 rv = 0;
988 }
989
990 return (rv);
991 }
992
993 /*
994 * Initialize and populate the IOP's outbound FIFO.
995 */
996 static int
997 iop_ofifo_init(struct iop_softc *sc)
998 {
999 bus_addr_t addr;
1000 bus_dma_segment_t seg;
1001 struct i2o_exec_outbound_init *mf;
1002 int i, rseg, rv;
1003 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1004
1005 sw = (u_int32_t *)sc->sc_scr;
1006
1007 mf = (struct i2o_exec_outbound_init *)mb;
1008 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1009 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1010 mf->msgictx = IOP_ICTX;
1011 mf->msgtctx = 0;
1012 mf->pagesize = PAGE_SIZE;
1013 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1014
1015 /*
1016 * The I2O spec says that there are two SGLs: one for the status
1017 * word, and one for a list of discarded MFAs. It continues to say
1018 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1019 * necessary; this isn't the case (and is in fact a bad thing).
1020 */
1021 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1022 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1023 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1024 (u_int32_t)sc->sc_scr_seg->ds_addr;
1025 mb[0] += 2 << 16;
1026
1027 *sw = 0;
1028 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1029 BUS_DMASYNC_PREREAD);
1030
1031 if ((rv = iop_post(sc, mb)) != 0)
1032 return (rv);
1033
1034 POLL(5000,
1035 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1036 BUS_DMASYNC_POSTREAD),
1037 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1038
1039 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1040 printf("%s: outbound FIFO init failed (%d)\n",
1041 sc->sc_dv.dv_xname, le32toh(*sw));
1042 return (EIO);
1043 }
1044
1045 /* Allocate DMA safe memory for the reply frames. */
1046 if (sc->sc_rep_phys == 0) {
1047 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1048
1049 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1050 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1051 if (rv != 0) {
1052 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1053 rv);
1054 return (rv);
1055 }
1056
1057 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1058 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1059 if (rv != 0) {
1060 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1061 return (rv);
1062 }
1063
1064 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1065 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1066 if (rv != 0) {
1067 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1068 rv);
1069 return (rv);
1070 }
1071
1072 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1073 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1074 if (rv != 0) {
1075 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1076 return (rv);
1077 }
1078
1079 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1080 }
1081
1082 /* Populate the outbound FIFO. */
1083 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1084 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1085 addr += sc->sc_framesize;
1086 }
1087
1088 return (0);
1089 }
1090
1091 /*
1092 * Read the specified number of bytes from the IOP's hardware resource table.
1093 */
1094 static int
1095 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1096 {
1097 struct iop_msg *im;
1098 int rv;
1099 struct i2o_exec_hrt_get *mf;
1100 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1101
1102 im = iop_msg_alloc(sc, IM_WAIT);
1103 mf = (struct i2o_exec_hrt_get *)mb;
1104 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1105 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1106 mf->msgictx = IOP_ICTX;
1107 mf->msgtctx = im->im_tctx;
1108
1109 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1110 rv = iop_msg_post(sc, im, mb, 30000);
1111 iop_msg_unmap(sc, im);
1112 iop_msg_free(sc, im);
1113 return (rv);
1114 }
1115
1116 /*
1117 * Read the IOP's hardware resource table.
1118 */
1119 static int
1120 iop_hrt_get(struct iop_softc *sc)
1121 {
1122 struct i2o_hrt hrthdr, *hrt;
1123 int size, rv;
1124
1125 PHOLD(curlwp);
1126 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1127 PRELE(curlwp);
1128 if (rv != 0)
1129 return (rv);
1130
1131 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1132 le16toh(hrthdr.numentries)));
1133
1134 size = sizeof(struct i2o_hrt) +
1135 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1136 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1137
1138 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1139 free(hrt, M_DEVBUF);
1140 return (rv);
1141 }
1142
1143 if (sc->sc_hrt != NULL)
1144 free(sc->sc_hrt, M_DEVBUF);
1145 sc->sc_hrt = hrt;
1146 return (0);
1147 }
1148
1149 /*
1150 * Request the specified number of bytes from the IOP's logical
1151 * configuration table. If a change indicator is specified, this
1152 * is a verbatim notification request, so the caller is prepared
1153 * to wait indefinitely.
1154 */
1155 static int
1156 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1157 u_int32_t chgind)
1158 {
1159 struct iop_msg *im;
1160 struct i2o_exec_lct_notify *mf;
1161 int rv;
1162 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1163
1164 im = iop_msg_alloc(sc, IM_WAIT);
1165 memset(lct, 0, size);
1166
1167 mf = (struct i2o_exec_lct_notify *)mb;
1168 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1169 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1170 mf->msgictx = IOP_ICTX;
1171 mf->msgtctx = im->im_tctx;
1172 mf->classid = I2O_CLASS_ANY;
1173 mf->changeindicator = chgind;
1174
1175 #ifdef I2ODEBUG
1176 printf("iop_lct_get0: reading LCT");
1177 if (chgind != 0)
1178 printf(" (async)");
1179 printf("\n");
1180 #endif
1181
1182 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1183 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1184 iop_msg_unmap(sc, im);
1185 iop_msg_free(sc, im);
1186 return (rv);
1187 }
1188
1189 /*
1190 * Read the IOP's logical configuration table.
1191 */
1192 int
1193 iop_lct_get(struct iop_softc *sc)
1194 {
1195 int esize, size, rv;
1196 struct i2o_lct *lct;
1197
1198 esize = le32toh(sc->sc_status.expectedlctsize);
1199 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1200 if (lct == NULL)
1201 return (ENOMEM);
1202
1203 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1204 free(lct, M_DEVBUF);
1205 return (rv);
1206 }
1207
1208 size = le16toh(lct->tablesize) << 2;
1209 if (esize != size) {
1210 free(lct, M_DEVBUF);
1211 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1212 if (lct == NULL)
1213 return (ENOMEM);
1214
1215 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1216 free(lct, M_DEVBUF);
1217 return (rv);
1218 }
1219 }
1220
1221 /* Swap in the new LCT. */
1222 if (sc->sc_lct != NULL)
1223 free(sc->sc_lct, M_DEVBUF);
1224 sc->sc_lct = lct;
1225 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1226 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1227 sizeof(struct i2o_lct_entry);
1228 return (0);
1229 }
1230
1231 /*
1232 * Post a SYS_ENABLE message to the adapter.
1233 */
1234 int
1235 iop_sys_enable(struct iop_softc *sc)
1236 {
1237 struct iop_msg *im;
1238 struct i2o_msg mf;
1239 int rv;
1240
1241 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1242
1243 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1244 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1245 mf.msgictx = IOP_ICTX;
1246 mf.msgtctx = im->im_tctx;
1247
1248 rv = iop_msg_post(sc, im, &mf, 30000);
1249 if (rv == 0) {
1250 if ((im->im_flags & IM_FAIL) != 0)
1251 rv = ENXIO;
1252 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1253 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1254 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1255 rv = 0;
1256 else
1257 rv = EIO;
1258 }
1259
1260 iop_msg_free(sc, im);
1261 return (rv);
1262 }
1263
1264 /*
1265 * Request the specified parameter group from the target. If an initiator
1266 * is specified (a) don't wait for the operation to complete, but instead
1267 * let the initiator's interrupt handler deal with the reply and (b) place a
1268 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1269 */
1270 int
1271 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1272 int size, struct iop_initiator *ii)
1273 {
1274 struct iop_msg *im;
1275 struct i2o_util_params_op *mf;
1276 int rv;
1277 struct iop_pgop *pgop;
1278 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1279
1280 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1281 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1282 iop_msg_free(sc, im);
1283 return (ENOMEM);
1284 }
1285 im->im_dvcontext = pgop;
1286
1287 mf = (struct i2o_util_params_op *)mb;
1288 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1289 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1290 mf->msgictx = IOP_ICTX;
1291 mf->msgtctx = im->im_tctx;
1292 mf->flags = 0;
1293
1294 pgop->olh.count = htole16(1);
1295 pgop->olh.reserved = htole16(0);
1296 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1297 pgop->oat.fieldcount = htole16(0xffff);
1298 pgop->oat.group = htole16(group);
1299
1300 if (ii == NULL)
1301 PHOLD(curlwp);
1302
1303 memset(buf, 0, size);
1304 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1305 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1306 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1307
1308 if (ii == NULL)
1309 PRELE(curlwp);
1310
1311 /* Detect errors; let partial transfers to count as success. */
1312 if (ii == NULL && rv == 0) {
1313 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1314 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1315 rv = 0;
1316 else
1317 rv = (im->im_reqstatus != 0 ? EIO : 0);
1318
1319 if (rv != 0)
1320 printf("%s: FIELD_GET failed for tid %d group %d\n",
1321 sc->sc_dv.dv_xname, tid, group);
1322 }
1323
1324 if (ii == NULL || rv != 0) {
1325 iop_msg_unmap(sc, im);
1326 iop_msg_free(sc, im);
1327 free(pgop, M_DEVBUF);
1328 }
1329
1330 return (rv);
1331 }
1332
1333 /*
1334 * Set a single field in a scalar parameter group.
1335 */
1336 int
1337 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1338 int size, int field)
1339 {
1340 struct iop_msg *im;
1341 struct i2o_util_params_op *mf;
1342 struct iop_pgop *pgop;
1343 int rv, totsize;
1344 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1345
1346 totsize = sizeof(*pgop) + size;
1347
1348 im = iop_msg_alloc(sc, IM_WAIT);
1349 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1350 iop_msg_free(sc, im);
1351 return (ENOMEM);
1352 }
1353
1354 mf = (struct i2o_util_params_op *)mb;
1355 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1356 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1357 mf->msgictx = IOP_ICTX;
1358 mf->msgtctx = im->im_tctx;
1359 mf->flags = 0;
1360
1361 pgop->olh.count = htole16(1);
1362 pgop->olh.reserved = htole16(0);
1363 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1364 pgop->oat.fieldcount = htole16(1);
1365 pgop->oat.group = htole16(group);
1366 pgop->oat.fields[0] = htole16(field);
1367 memcpy(pgop + 1, buf, size);
1368
1369 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1370 rv = iop_msg_post(sc, im, mb, 30000);
1371 if (rv != 0)
1372 printf("%s: FIELD_SET failed for tid %d group %d\n",
1373 sc->sc_dv.dv_xname, tid, group);
1374
1375 iop_msg_unmap(sc, im);
1376 iop_msg_free(sc, im);
1377 free(pgop, M_DEVBUF);
1378 return (rv);
1379 }
1380
1381 /*
1382 * Delete all rows in a tablular parameter group.
1383 */
1384 int
1385 iop_table_clear(struct iop_softc *sc, int tid, int group)
1386 {
1387 struct iop_msg *im;
1388 struct i2o_util_params_op *mf;
1389 struct iop_pgop pgop;
1390 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1391 int rv;
1392
1393 im = iop_msg_alloc(sc, IM_WAIT);
1394
1395 mf = (struct i2o_util_params_op *)mb;
1396 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1397 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1398 mf->msgictx = IOP_ICTX;
1399 mf->msgtctx = im->im_tctx;
1400 mf->flags = 0;
1401
1402 pgop.olh.count = htole16(1);
1403 pgop.olh.reserved = htole16(0);
1404 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1405 pgop.oat.fieldcount = htole16(0);
1406 pgop.oat.group = htole16(group);
1407 pgop.oat.fields[0] = htole16(0);
1408
1409 PHOLD(curlwp);
1410 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1411 rv = iop_msg_post(sc, im, mb, 30000);
1412 if (rv != 0)
1413 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1414 sc->sc_dv.dv_xname, tid, group);
1415
1416 iop_msg_unmap(sc, im);
1417 PRELE(curlwp);
1418 iop_msg_free(sc, im);
1419 return (rv);
1420 }
1421
1422 /*
1423 * Add a single row to a tabular parameter group. The row can have only one
1424 * field.
1425 */
1426 int
1427 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1428 int size, int row)
1429 {
1430 struct iop_msg *im;
1431 struct i2o_util_params_op *mf;
1432 struct iop_pgop *pgop;
1433 int rv, totsize;
1434 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1435
1436 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1437
1438 im = iop_msg_alloc(sc, IM_WAIT);
1439 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1440 iop_msg_free(sc, im);
1441 return (ENOMEM);
1442 }
1443
1444 mf = (struct i2o_util_params_op *)mb;
1445 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1446 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1447 mf->msgictx = IOP_ICTX;
1448 mf->msgtctx = im->im_tctx;
1449 mf->flags = 0;
1450
1451 pgop->olh.count = htole16(1);
1452 pgop->olh.reserved = htole16(0);
1453 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1454 pgop->oat.fieldcount = htole16(1);
1455 pgop->oat.group = htole16(group);
1456 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1457 pgop->oat.fields[1] = htole16(1); /* RowCount */
1458 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1459 memcpy(&pgop->oat.fields[3], buf, size);
1460
1461 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1462 rv = iop_msg_post(sc, im, mb, 30000);
1463 if (rv != 0)
1464 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1465 sc->sc_dv.dv_xname, tid, group, row);
1466
1467 iop_msg_unmap(sc, im);
1468 iop_msg_free(sc, im);
1469 free(pgop, M_DEVBUF);
1470 return (rv);
1471 }
1472
1473 /*
1474 * Execute a simple command (no parameters).
1475 */
1476 int
1477 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1478 int async, int timo)
1479 {
1480 struct iop_msg *im;
1481 struct i2o_msg mf;
1482 int rv, fl;
1483
1484 fl = (async != 0 ? IM_WAIT : IM_POLL);
1485 im = iop_msg_alloc(sc, fl);
1486
1487 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1488 mf.msgfunc = I2O_MSGFUNC(tid, function);
1489 mf.msgictx = ictx;
1490 mf.msgtctx = im->im_tctx;
1491
1492 rv = iop_msg_post(sc, im, &mf, timo);
1493 iop_msg_free(sc, im);
1494 return (rv);
1495 }
1496
1497 /*
1498 * Post the system table to the IOP.
1499 */
1500 static int
1501 iop_systab_set(struct iop_softc *sc)
1502 {
1503 struct i2o_exec_sys_tab_set *mf;
1504 struct iop_msg *im;
1505 bus_space_handle_t bsh;
1506 bus_addr_t boo;
1507 u_int32_t mema[2], ioa[2];
1508 int rv;
1509 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1510
1511 im = iop_msg_alloc(sc, IM_WAIT);
1512
1513 mf = (struct i2o_exec_sys_tab_set *)mb;
1514 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1515 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1516 mf->msgictx = IOP_ICTX;
1517 mf->msgtctx = im->im_tctx;
1518 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1519 mf->segnumber = 0;
1520
1521 mema[1] = sc->sc_status.desiredprivmemsize;
1522 ioa[1] = sc->sc_status.desiredpriviosize;
1523
1524 if (mema[1] != 0) {
1525 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1526 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1527 mema[0] = htole32(boo);
1528 if (rv != 0) {
1529 printf("%s: can't alloc priv mem space, err = %d\n",
1530 sc->sc_dv.dv_xname, rv);
1531 mema[0] = 0;
1532 mema[1] = 0;
1533 }
1534 }
1535
1536 if (ioa[1] != 0) {
1537 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1538 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1539 ioa[0] = htole32(boo);
1540 if (rv != 0) {
1541 printf("%s: can't alloc priv i/o space, err = %d\n",
1542 sc->sc_dv.dv_xname, rv);
1543 ioa[0] = 0;
1544 ioa[1] = 0;
1545 }
1546 }
1547
1548 PHOLD(curlwp);
1549 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1550 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1551 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1552 rv = iop_msg_post(sc, im, mb, 5000);
1553 iop_msg_unmap(sc, im);
1554 iop_msg_free(sc, im);
1555 PRELE(curlwp);
1556 return (rv);
1557 }
1558
1559 /*
1560 * Reset the IOP. Must be called with interrupts disabled.
1561 */
1562 static int
1563 iop_reset(struct iop_softc *sc)
1564 {
1565 u_int32_t mfa, *sw;
1566 struct i2o_exec_iop_reset mf;
1567 int rv;
1568 paddr_t pa;
1569
1570 sw = (u_int32_t *)sc->sc_scr;
1571 pa = sc->sc_scr_seg->ds_addr;
1572
1573 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1574 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1575 mf.reserved[0] = 0;
1576 mf.reserved[1] = 0;
1577 mf.reserved[2] = 0;
1578 mf.reserved[3] = 0;
1579 mf.statuslow = (u_int32_t)pa;
1580 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1581
1582 *sw = htole32(0);
1583 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1584 BUS_DMASYNC_PREREAD);
1585
1586 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1587 return (rv);
1588
1589 POLL(2500,
1590 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1591 BUS_DMASYNC_POSTREAD), *sw != 0));
1592 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1593 printf("%s: reset rejected, status 0x%x\n",
1594 sc->sc_dv.dv_xname, le32toh(*sw));
1595 return (EIO);
1596 }
1597
1598 /*
1599 * IOP is now in the INIT state. Wait no more than 10 seconds for
1600 * the inbound queue to become responsive.
1601 */
1602 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1603 if (mfa == IOP_MFA_EMPTY) {
1604 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1605 return (EIO);
1606 }
1607
1608 iop_release_mfa(sc, mfa);
1609 return (0);
1610 }
1611
1612 /*
1613 * Register a new initiator. Must be called with the configuration lock
1614 * held.
1615 */
1616 void
1617 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1618 {
1619 static int ictxgen;
1620 int s;
1621
1622 /* 0 is reserved (by us) for system messages. */
1623 ii->ii_ictx = ++ictxgen;
1624
1625 /*
1626 * `Utility initiators' don't make it onto the per-IOP initiator list
1627 * (which is used only for configuration), but do get one slot on
1628 * the inbound queue.
1629 */
1630 if ((ii->ii_flags & II_UTILITY) == 0) {
1631 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1632 sc->sc_nii++;
1633 } else
1634 sc->sc_nuii++;
1635
1636 s = splbio();
1637 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1638 splx(s);
1639 }
1640
1641 /*
1642 * Unregister an initiator. Must be called with the configuration lock
1643 * held.
1644 */
1645 void
1646 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1647 {
1648 int s;
1649
1650 if ((ii->ii_flags & II_UTILITY) == 0) {
1651 LIST_REMOVE(ii, ii_list);
1652 sc->sc_nii--;
1653 } else
1654 sc->sc_nuii--;
1655
1656 s = splbio();
1657 LIST_REMOVE(ii, ii_hash);
1658 splx(s);
1659 }
1660
1661 /*
1662 * Handle a reply frame from the IOP.
1663 */
1664 static int
1665 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1666 {
1667 struct iop_msg *im;
1668 struct i2o_reply *rb;
1669 struct i2o_fault_notify *fn;
1670 struct iop_initiator *ii;
1671 u_int off, ictx, tctx, status, size;
1672
1673 off = (int)(rmfa - sc->sc_rep_phys);
1674 rb = (struct i2o_reply *)(sc->sc_rep + off);
1675
1676 /* Perform reply queue DMA synchronisation. */
1677 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1678 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1679 if (--sc->sc_curib != 0)
1680 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1681 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1682
1683 #ifdef I2ODEBUG
1684 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1685 panic("iop_handle_reply: 64-bit reply");
1686 #endif
1687 /*
1688 * Find the initiator.
1689 */
1690 ictx = le32toh(rb->msgictx);
1691 if (ictx == IOP_ICTX)
1692 ii = NULL;
1693 else {
1694 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1695 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1696 if (ii->ii_ictx == ictx)
1697 break;
1698 if (ii == NULL) {
1699 #ifdef I2ODEBUG
1700 iop_reply_print(sc, rb);
1701 #endif
1702 printf("%s: WARNING: bad ictx returned (%x)\n",
1703 sc->sc_dv.dv_xname, ictx);
1704 return (-1);
1705 }
1706 }
1707
1708 /*
1709 * If we received a transport failure notice, we've got to dig the
1710 * transaction context (if any) out of the original message frame,
1711 * and then release the original MFA back to the inbound FIFO.
1712 */
1713 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1714 status = I2O_STATUS_SUCCESS;
1715
1716 fn = (struct i2o_fault_notify *)rb;
1717 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1718 iop_release_mfa(sc, fn->lowmfa);
1719 iop_tfn_print(sc, fn);
1720 } else {
1721 status = rb->reqstatus;
1722 tctx = le32toh(rb->msgtctx);
1723 }
1724
1725 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1726 /*
1727 * This initiator tracks state using message wrappers.
1728 *
1729 * Find the originating message wrapper, and if requested
1730 * notify the initiator.
1731 */
1732 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1733 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1734 (im->im_flags & IM_ALLOCED) == 0 ||
1735 tctx != im->im_tctx) {
1736 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1737 sc->sc_dv.dv_xname, tctx, im);
1738 if (im != NULL)
1739 printf("%s: flags=0x%08x tctx=0x%08x\n",
1740 sc->sc_dv.dv_xname, im->im_flags,
1741 im->im_tctx);
1742 #ifdef I2ODEBUG
1743 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1744 iop_reply_print(sc, rb);
1745 #endif
1746 return (-1);
1747 }
1748
1749 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1750 im->im_flags |= IM_FAIL;
1751
1752 #ifdef I2ODEBUG
1753 if ((im->im_flags & IM_REPLIED) != 0)
1754 panic("%s: dup reply", sc->sc_dv.dv_xname);
1755 #endif
1756 im->im_flags |= IM_REPLIED;
1757
1758 #ifdef I2ODEBUG
1759 if (status != I2O_STATUS_SUCCESS)
1760 iop_reply_print(sc, rb);
1761 #endif
1762 im->im_reqstatus = status;
1763 im->im_detstatus = le16toh(rb->detail);
1764
1765 /* Copy the reply frame, if requested. */
1766 if (im->im_rb != NULL) {
1767 size = (le32toh(rb->msgflags) >> 14) & ~3;
1768 #ifdef I2ODEBUG
1769 if (size > sc->sc_framesize)
1770 panic("iop_handle_reply: reply too large");
1771 #endif
1772 memcpy(im->im_rb, rb, size);
1773 }
1774
1775 /* Notify the initiator. */
1776 if ((im->im_flags & IM_WAIT) != 0)
1777 wakeup(im);
1778 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1779 (*ii->ii_intr)(ii->ii_dv, im, rb);
1780 } else {
1781 /*
1782 * This initiator discards message wrappers.
1783 *
1784 * Simply pass the reply frame to the initiator.
1785 */
1786 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1787 }
1788
1789 return (status);
1790 }
1791
1792 /*
1793 * Handle an interrupt from the IOP.
1794 */
1795 int
1796 iop_intr(void *arg)
1797 {
1798 struct iop_softc *sc;
1799 u_int32_t rmfa;
1800
1801 sc = arg;
1802
1803 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1804 return (0);
1805
1806 for (;;) {
1807 /* Double read to account for IOP bug. */
1808 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1809 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1810 if (rmfa == IOP_MFA_EMPTY)
1811 break;
1812 }
1813 iop_handle_reply(sc, rmfa);
1814 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1815 }
1816
1817 return (1);
1818 }
1819
1820 /*
1821 * Handle an event signalled by the executive.
1822 */
1823 static void
1824 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1825 {
1826 struct i2o_util_event_register_reply *rb;
1827 u_int event;
1828
1829 rb = reply;
1830
1831 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1832 return;
1833
1834 event = le32toh(rb->event);
1835 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1836 }
1837
1838 /*
1839 * Allocate a message wrapper.
1840 */
1841 struct iop_msg *
1842 iop_msg_alloc(struct iop_softc *sc, int flags)
1843 {
1844 struct iop_msg *im;
1845 static u_int tctxgen;
1846 int s, i;
1847
1848 #ifdef I2ODEBUG
1849 if ((flags & IM_SYSMASK) != 0)
1850 panic("iop_msg_alloc: system flags specified");
1851 #endif
1852
1853 s = splbio();
1854 im = SLIST_FIRST(&sc->sc_im_freelist);
1855 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1856 if (im == NULL)
1857 panic("iop_msg_alloc: no free wrappers");
1858 #endif
1859 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1860 splx(s);
1861
1862 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1863 tctxgen += (1 << IOP_TCTX_SHIFT);
1864 im->im_flags = flags | IM_ALLOCED;
1865 im->im_rb = NULL;
1866 i = 0;
1867 do {
1868 im->im_xfer[i++].ix_size = 0;
1869 } while (i < IOP_MAX_MSG_XFERS);
1870
1871 return (im);
1872 }
1873
1874 /*
1875 * Free a message wrapper.
1876 */
1877 void
1878 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1879 {
1880 int s;
1881
1882 #ifdef I2ODEBUG
1883 if ((im->im_flags & IM_ALLOCED) == 0)
1884 panic("iop_msg_free: wrapper not allocated");
1885 #endif
1886
1887 im->im_flags = 0;
1888 s = splbio();
1889 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1890 splx(s);
1891 }
1892
1893 /*
1894 * Map a data transfer. Write a scatter-gather list into the message frame.
1895 */
1896 int
1897 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1898 void *xferaddr, int xfersize, int out, struct proc *up)
1899 {
1900 bus_dmamap_t dm;
1901 bus_dma_segment_t *ds;
1902 struct iop_xfer *ix;
1903 u_int rv, i, nsegs, flg, off, xn;
1904 u_int32_t *p;
1905
1906 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1907 if (ix->ix_size == 0)
1908 break;
1909
1910 #ifdef I2ODEBUG
1911 if (xfersize == 0)
1912 panic("iop_msg_map: null transfer");
1913 if (xfersize > IOP_MAX_XFER)
1914 panic("iop_msg_map: transfer too large");
1915 if (xn == IOP_MAX_MSG_XFERS)
1916 panic("iop_msg_map: too many xfers");
1917 #endif
1918
1919 /*
1920 * Only the first DMA map is static.
1921 */
1922 if (xn != 0) {
1923 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1924 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1925 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1926 if (rv != 0)
1927 return (rv);
1928 }
1929
1930 dm = ix->ix_map;
1931 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1932 (up == NULL ? BUS_DMA_NOWAIT : 0));
1933 if (rv != 0)
1934 goto bad;
1935
1936 /*
1937 * How many SIMPLE SG elements can we fit in this message?
1938 */
1939 off = mb[0] >> 16;
1940 p = mb + off;
1941 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1942
1943 if (dm->dm_nsegs > nsegs) {
1944 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1945 rv = EFBIG;
1946 DPRINTF(("iop_msg_map: too many segs\n"));
1947 goto bad;
1948 }
1949
1950 nsegs = dm->dm_nsegs;
1951 xfersize = 0;
1952
1953 /*
1954 * Write out the SG list.
1955 */
1956 if (out)
1957 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1958 else
1959 flg = I2O_SGL_SIMPLE;
1960
1961 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1962 p[0] = (u_int32_t)ds->ds_len | flg;
1963 p[1] = (u_int32_t)ds->ds_addr;
1964 xfersize += ds->ds_len;
1965 }
1966
1967 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1968 p[1] = (u_int32_t)ds->ds_addr;
1969 xfersize += ds->ds_len;
1970
1971 /* Fix up the transfer record, and sync the map. */
1972 ix->ix_flags = (out ? IX_OUT : IX_IN);
1973 ix->ix_size = xfersize;
1974 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1975 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1976
1977 /*
1978 * If this is the first xfer we've mapped for this message, adjust
1979 * the SGL offset field in the message header.
1980 */
1981 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1982 mb[0] += (mb[0] >> 12) & 0xf0;
1983 im->im_flags |= IM_SGLOFFADJ;
1984 }
1985 mb[0] += (nsegs << 17);
1986 return (0);
1987
1988 bad:
1989 if (xn != 0)
1990 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1991 return (rv);
1992 }
1993
1994 /*
1995 * Map a block I/O data transfer (different in that there's only one per
1996 * message maximum, and PAGE addressing may be used). Write a scatter
1997 * gather list into the message frame.
1998 */
1999 int
2000 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
2001 void *xferaddr, int xfersize, int out)
2002 {
2003 bus_dma_segment_t *ds;
2004 bus_dmamap_t dm;
2005 struct iop_xfer *ix;
2006 u_int rv, i, nsegs, off, slen, tlen, flg;
2007 paddr_t saddr, eaddr;
2008 u_int32_t *p;
2009
2010 #ifdef I2ODEBUG
2011 if (xfersize == 0)
2012 panic("iop_msg_map_bio: null transfer");
2013 if (xfersize > IOP_MAX_XFER)
2014 panic("iop_msg_map_bio: transfer too large");
2015 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2016 panic("iop_msg_map_bio: SGLOFFADJ");
2017 #endif
2018
2019 ix = im->im_xfer;
2020 dm = ix->ix_map;
2021 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2022 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2023 if (rv != 0)
2024 return (rv);
2025
2026 off = mb[0] >> 16;
2027 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2028
2029 /*
2030 * If the transfer is highly fragmented and won't fit using SIMPLE
2031 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2032 * potentially more efficient, both for us and the IOP.
2033 */
2034 if (dm->dm_nsegs > nsegs) {
2035 nsegs = 1;
2036 p = mb + off + 1;
2037
2038 /* XXX This should be done with a bus_space flag. */
2039 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2040 slen = ds->ds_len;
2041 saddr = ds->ds_addr;
2042
2043 while (slen > 0) {
2044 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2045 tlen = min(eaddr - saddr, slen);
2046 slen -= tlen;
2047 *p++ = le32toh(saddr);
2048 saddr = eaddr;
2049 nsegs++;
2050 }
2051 }
2052
2053 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2054 I2O_SGL_END;
2055 if (out)
2056 mb[off] |= I2O_SGL_DATA_OUT;
2057 } else {
2058 p = mb + off;
2059 nsegs = dm->dm_nsegs;
2060
2061 if (out)
2062 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2063 else
2064 flg = I2O_SGL_SIMPLE;
2065
2066 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2067 p[0] = (u_int32_t)ds->ds_len | flg;
2068 p[1] = (u_int32_t)ds->ds_addr;
2069 }
2070
2071 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2072 I2O_SGL_END;
2073 p[1] = (u_int32_t)ds->ds_addr;
2074 nsegs <<= 1;
2075 }
2076
2077 /* Fix up the transfer record, and sync the map. */
2078 ix->ix_flags = (out ? IX_OUT : IX_IN);
2079 ix->ix_size = xfersize;
2080 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2081 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2082
2083 /*
2084 * Adjust the SGL offset and total message size fields. We don't
2085 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2086 */
2087 mb[0] += ((off << 4) + (nsegs << 16));
2088 return (0);
2089 }
2090
2091 /*
2092 * Unmap all data transfers associated with a message wrapper.
2093 */
2094 void
2095 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2096 {
2097 struct iop_xfer *ix;
2098 int i;
2099
2100 #ifdef I2ODEBUG
2101 if (im->im_xfer[0].ix_size == 0)
2102 panic("iop_msg_unmap: no transfers mapped");
2103 #endif
2104
2105 for (ix = im->im_xfer, i = 0;;) {
2106 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2107 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2108 BUS_DMASYNC_POSTREAD);
2109 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2110
2111 /* Only the first DMA map is static. */
2112 if (i != 0)
2113 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2114 if ((++ix)->ix_size == 0)
2115 break;
2116 if (++i >= IOP_MAX_MSG_XFERS)
2117 break;
2118 }
2119 }
2120
2121 /*
2122 * Post a message frame to the IOP's inbound queue.
2123 */
2124 int
2125 iop_post(struct iop_softc *sc, u_int32_t *mb)
2126 {
2127 u_int32_t mfa;
2128 int s;
2129
2130 #ifdef I2ODEBUG
2131 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2132 panic("iop_post: frame too large");
2133 #endif
2134
2135 s = splbio();
2136
2137 /* Allocate a slot with the IOP. */
2138 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2139 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2140 splx(s);
2141 printf("%s: mfa not forthcoming\n",
2142 sc->sc_dv.dv_xname);
2143 return (EAGAIN);
2144 }
2145
2146 /* Perform reply buffer DMA synchronisation. */
2147 if (sc->sc_curib++ == 0)
2148 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2149 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2150
2151 /* Copy out the message frame. */
2152 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2153 mb[0] >> 16);
2154 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2155 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2156
2157 /* Post the MFA back to the IOP. */
2158 iop_outl(sc, IOP_REG_IFIFO, mfa);
2159
2160 splx(s);
2161 return (0);
2162 }
2163
2164 /*
2165 * Post a message to the IOP and deal with completion.
2166 */
2167 int
2168 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2169 {
2170 u_int32_t *mb;
2171 int rv, s;
2172
2173 mb = xmb;
2174
2175 /* Terminate the scatter/gather list chain. */
2176 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2177 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2178
2179 if ((rv = iop_post(sc, mb)) != 0)
2180 return (rv);
2181
2182 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2183 if ((im->im_flags & IM_POLL) != 0)
2184 iop_msg_poll(sc, im, timo);
2185 else
2186 iop_msg_wait(sc, im, timo);
2187
2188 s = splbio();
2189 if ((im->im_flags & IM_REPLIED) != 0) {
2190 if ((im->im_flags & IM_NOSTATUS) != 0)
2191 rv = 0;
2192 else if ((im->im_flags & IM_FAIL) != 0)
2193 rv = ENXIO;
2194 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2195 rv = EIO;
2196 else
2197 rv = 0;
2198 } else
2199 rv = EBUSY;
2200 splx(s);
2201 } else
2202 rv = 0;
2203
2204 return (rv);
2205 }
2206
2207 /*
2208 * Spin until the specified message is replied to.
2209 */
2210 static void
2211 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2212 {
2213 u_int32_t rmfa;
2214 int s;
2215
2216 s = splbio();
2217
2218 /* Wait for completion. */
2219 for (timo *= 10; timo != 0; timo--) {
2220 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2221 /* Double read to account for IOP bug. */
2222 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2223 if (rmfa == IOP_MFA_EMPTY)
2224 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2225 if (rmfa != IOP_MFA_EMPTY) {
2226 iop_handle_reply(sc, rmfa);
2227
2228 /*
2229 * Return the reply frame to the IOP's
2230 * outbound FIFO.
2231 */
2232 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2233 }
2234 }
2235 if ((im->im_flags & IM_REPLIED) != 0)
2236 break;
2237 DELAY(100);
2238 }
2239
2240 if (timo == 0) {
2241 #ifdef I2ODEBUG
2242 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2243 if (iop_status_get(sc, 1) != 0)
2244 printf("iop_msg_poll: unable to retrieve status\n");
2245 else
2246 printf("iop_msg_poll: IOP state = %d\n",
2247 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2248 #endif
2249 }
2250
2251 splx(s);
2252 }
2253
2254 /*
2255 * Sleep until the specified message is replied to.
2256 */
2257 static void
2258 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2259 {
2260 int s, rv;
2261
2262 s = splbio();
2263 if ((im->im_flags & IM_REPLIED) != 0) {
2264 splx(s);
2265 return;
2266 }
2267 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2268 splx(s);
2269
2270 #ifdef I2ODEBUG
2271 if (rv != 0) {
2272 printf("iop_msg_wait: tsleep() == %d\n", rv);
2273 if (iop_status_get(sc, 0) != 0)
2274 printf("iop_msg_wait: unable to retrieve status\n");
2275 else
2276 printf("iop_msg_wait: IOP state = %d\n",
2277 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2278 }
2279 #endif
2280 }
2281
2282 /*
2283 * Release an unused message frame back to the IOP's inbound fifo.
2284 */
2285 static void
2286 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2287 {
2288
2289 /* Use the frame to issue a no-op. */
2290 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2291 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2292 iop_outl_msg(sc, mfa + 8, 0);
2293 iop_outl_msg(sc, mfa + 12, 0);
2294
2295 iop_outl(sc, IOP_REG_IFIFO, mfa);
2296 }
2297
2298 #ifdef I2ODEBUG
2299 /*
2300 * Dump a reply frame header.
2301 */
2302 static void
2303 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2304 {
2305 u_int function, detail;
2306 #ifdef I2OVERBOSE
2307 const char *statusstr;
2308 #endif
2309
2310 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2311 detail = le16toh(rb->detail);
2312
2313 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2314
2315 #ifdef I2OVERBOSE
2316 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2317 statusstr = iop_status[rb->reqstatus];
2318 else
2319 statusstr = "undefined error code";
2320
2321 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2322 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2323 #else
2324 printf("%s: function=0x%02x status=0x%02x\n",
2325 sc->sc_dv.dv_xname, function, rb->reqstatus);
2326 #endif
2327 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2328 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2329 le32toh(rb->msgtctx));
2330 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2331 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2332 (le32toh(rb->msgflags) >> 8) & 0xff);
2333 }
2334 #endif
2335
2336 /*
2337 * Dump a transport failure reply.
2338 */
2339 static void
2340 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2341 {
2342
2343 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2344
2345 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2346 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2347 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2348 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2349 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2350 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2351 }
2352
2353 /*
2354 * Translate an I2O ASCII field into a C string.
2355 */
2356 void
2357 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2358 {
2359 int hc, lc, i, nit;
2360
2361 dlen--;
2362 lc = 0;
2363 hc = 0;
2364 i = 0;
2365
2366 /*
2367 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2368 * spec has nothing to say about it. Since AMI fields are usually
2369 * filled with junk after the terminator, ...
2370 */
2371 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2372
2373 while (slen-- != 0 && dlen-- != 0) {
2374 if (nit && *src == '\0')
2375 break;
2376 else if (*src <= 0x20 || *src >= 0x7f) {
2377 if (hc)
2378 dst[i++] = ' ';
2379 } else {
2380 hc = 1;
2381 dst[i++] = *src;
2382 lc = i;
2383 }
2384 src++;
2385 }
2386
2387 dst[lc] = '\0';
2388 }
2389
2390 /*
2391 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2392 */
2393 int
2394 iop_print_ident(struct iop_softc *sc, int tid)
2395 {
2396 struct {
2397 struct i2o_param_op_results pr;
2398 struct i2o_param_read_results prr;
2399 struct i2o_param_device_identity di;
2400 } __attribute__ ((__packed__)) p;
2401 char buf[32];
2402 int rv;
2403
2404 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2405 sizeof(p), NULL);
2406 if (rv != 0)
2407 return (rv);
2408
2409 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2410 sizeof(buf));
2411 printf(" <%s, ", buf);
2412 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2413 sizeof(buf));
2414 printf("%s, ", buf);
2415 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2416 printf("%s>", buf);
2417
2418 return (0);
2419 }
2420
2421 /*
2422 * Claim or unclaim the specified TID.
2423 */
2424 int
2425 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2426 int flags)
2427 {
2428 struct iop_msg *im;
2429 struct i2o_util_claim mf;
2430 int rv, func;
2431
2432 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2433 im = iop_msg_alloc(sc, IM_WAIT);
2434
2435 /* We can use the same structure, as they're identical. */
2436 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2437 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2438 mf.msgictx = ii->ii_ictx;
2439 mf.msgtctx = im->im_tctx;
2440 mf.flags = flags;
2441
2442 rv = iop_msg_post(sc, im, &mf, 5000);
2443 iop_msg_free(sc, im);
2444 return (rv);
2445 }
2446
2447 /*
2448 * Perform an abort.
2449 */
2450 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2451 int tctxabort, int flags)
2452 {
2453 struct iop_msg *im;
2454 struct i2o_util_abort mf;
2455 int rv;
2456
2457 im = iop_msg_alloc(sc, IM_WAIT);
2458
2459 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2460 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2461 mf.msgictx = ii->ii_ictx;
2462 mf.msgtctx = im->im_tctx;
2463 mf.flags = (func << 24) | flags;
2464 mf.tctxabort = tctxabort;
2465
2466 rv = iop_msg_post(sc, im, &mf, 5000);
2467 iop_msg_free(sc, im);
2468 return (rv);
2469 }
2470
2471 /*
2472 * Enable or disable reception of events for the specified device.
2473 */
2474 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2475 {
2476 struct i2o_util_event_register mf;
2477
2478 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2479 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2480 mf.msgictx = ii->ii_ictx;
2481 mf.msgtctx = 0;
2482 mf.eventmask = mask;
2483
2484 /* This message is replied to only when events are signalled. */
2485 return (iop_post(sc, (u_int32_t *)&mf));
2486 }
2487
2488 int
2489 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2490 {
2491 struct iop_softc *sc;
2492
2493 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2494 return (ENXIO);
2495 if ((sc->sc_flags & IOP_ONLINE) == 0)
2496 return (ENXIO);
2497 if ((sc->sc_flags & IOP_OPEN) != 0)
2498 return (EBUSY);
2499 sc->sc_flags |= IOP_OPEN;
2500
2501 return (0);
2502 }
2503
2504 int
2505 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2506 {
2507 struct iop_softc *sc;
2508
2509 sc = device_lookup(&iop_cd, minor(dev));
2510 sc->sc_flags &= ~IOP_OPEN;
2511
2512 return (0);
2513 }
2514
2515 int
2516 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2517 {
2518 struct iop_softc *sc;
2519 struct iovec *iov;
2520 int rv, i;
2521
2522 if (securelevel >= 2)
2523 return (EPERM);
2524
2525 sc = device_lookup(&iop_cd, minor(dev));
2526
2527 switch (cmd) {
2528 case IOPIOCPT:
2529 return (iop_passthrough(sc, (struct ioppt *)data, p));
2530
2531 case IOPIOCGSTATUS:
2532 iov = (struct iovec *)data;
2533 i = sizeof(struct i2o_status);
2534 if (i > iov->iov_len)
2535 i = iov->iov_len;
2536 else
2537 iov->iov_len = i;
2538 if ((rv = iop_status_get(sc, 0)) == 0)
2539 rv = copyout(&sc->sc_status, iov->iov_base, i);
2540 return (rv);
2541
2542 case IOPIOCGLCT:
2543 case IOPIOCGTIDMAP:
2544 case IOPIOCRECONFIG:
2545 break;
2546
2547 default:
2548 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2549 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2550 #endif
2551 return (ENOTTY);
2552 }
2553
2554 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2555 return (rv);
2556
2557 switch (cmd) {
2558 case IOPIOCGLCT:
2559 iov = (struct iovec *)data;
2560 i = le16toh(sc->sc_lct->tablesize) << 2;
2561 if (i > iov->iov_len)
2562 i = iov->iov_len;
2563 else
2564 iov->iov_len = i;
2565 rv = copyout(sc->sc_lct, iov->iov_base, i);
2566 break;
2567
2568 case IOPIOCRECONFIG:
2569 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0)
2570 rv = iop_reconfigure(sc, 0);
2571 break;
2572
2573 case IOPIOCGTIDMAP:
2574 iov = (struct iovec *)data;
2575 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2576 if (i > iov->iov_len)
2577 i = iov->iov_len;
2578 else
2579 iov->iov_len = i;
2580 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2581 break;
2582 }
2583
2584 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2585 return (rv);
2586 }
2587
2588 static int
2589 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2590 {
2591 struct iop_msg *im;
2592 struct i2o_msg *mf;
2593 struct ioppt_buf *ptb;
2594 int rv, i, mapped;
2595
2596 mf = NULL;
2597 im = NULL;
2598 mapped = 1;
2599
2600 if (pt->pt_msglen > sc->sc_framesize ||
2601 pt->pt_msglen < sizeof(struct i2o_msg) ||
2602 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2603 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2604 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2605 return (EINVAL);
2606
2607 for (i = 0; i < pt->pt_nbufs; i++)
2608 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2609 rv = ENOMEM;
2610 goto bad;
2611 }
2612
2613 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2614 if (mf == NULL)
2615 return (ENOMEM);
2616
2617 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2618 goto bad;
2619
2620 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2621 im->im_rb = (struct i2o_reply *)mf;
2622 mf->msgictx = IOP_ICTX;
2623 mf->msgtctx = im->im_tctx;
2624
2625 for (i = 0; i < pt->pt_nbufs; i++) {
2626 ptb = &pt->pt_bufs[i];
2627 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2628 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2629 if (rv != 0)
2630 goto bad;
2631 mapped = 1;
2632 }
2633
2634 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2635 goto bad;
2636
2637 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2638 if (i > sc->sc_framesize)
2639 i = sc->sc_framesize;
2640 if (i > pt->pt_replylen)
2641 i = pt->pt_replylen;
2642 rv = copyout(im->im_rb, pt->pt_reply, i);
2643
2644 bad:
2645 if (mapped != 0)
2646 iop_msg_unmap(sc, im);
2647 if (im != NULL)
2648 iop_msg_free(sc, im);
2649 if (mf != NULL)
2650 free(mf, M_DEVBUF);
2651 return (rv);
2652 }
Cache object: 7fd94e9d8698f4294a191f56513c51c2
|