FreeBSD/Linux Kernel Cross Reference
sys/dev/i2o/iop.c
1 /* $NetBSD: iop.c,v 1.61.2.2 2008/05/25 18:53:43 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.61.2.2 2008/05/25 18:53:43 bouyer Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60 #include <sys/kauth.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #include <machine/bus.h>
65
66 #include <dev/i2o/i2o.h>
67 #include <dev/i2o/iopio.h>
68 #include <dev/i2o/iopreg.h>
69 #include <dev/i2o/iopvar.h>
70
71 #include "locators.h"
72
73 #define POLL(ms, cond) \
74 do { \
75 int xi; \
76 for (xi = (ms) * 10; xi; xi--) { \
77 if (cond) \
78 break; \
79 DELAY(100); \
80 } \
81 } while (/* CONSTCOND */0);
82
83 #ifdef I2ODEBUG
84 #define DPRINTF(x) printf x
85 #else
86 #define DPRINTF(x)
87 #endif
88
89 #ifdef I2OVERBOSE
90 #define IFVERBOSE(x) x
91 #define COMMENT(x) NULL
92 #else
93 #define IFVERBOSE(x)
94 #define COMMENT(x)
95 #endif
96
97 #define IOP_ICTXHASH_NBUCKETS 16
98 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
99
100 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
101
102 #define IOP_TCTX_SHIFT 12
103 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
104
105 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
106 static u_long iop_ictxhash;
107 static void *iop_sdh;
108 static struct i2o_systab *iop_systab;
109 static int iop_systab_size;
110
111 extern struct cfdriver iop_cd;
112
113 dev_type_open(iopopen);
114 dev_type_close(iopclose);
115 dev_type_ioctl(iopioctl);
116
117 const struct cdevsw iop_cdevsw = {
118 iopopen, iopclose, noread, nowrite, iopioctl,
119 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
120 };
121
122 #define IC_CONFIGURE 0x01
123 #define IC_PRIORITY 0x02
124
125 static struct iop_class {
126 u_short ic_class;
127 u_short ic_flags;
128 #ifdef I2OVERBOSE
129 const char *ic_caption;
130 #endif
131 } const iop_class[] = {
132 {
133 I2O_CLASS_EXECUTIVE,
134 0,
135 IFVERBOSE("executive")
136 },
137 {
138 I2O_CLASS_DDM,
139 0,
140 COMMENT("device driver module")
141 },
142 {
143 I2O_CLASS_RANDOM_BLOCK_STORAGE,
144 IC_CONFIGURE | IC_PRIORITY,
145 IFVERBOSE("random block storage")
146 },
147 {
148 I2O_CLASS_SEQUENTIAL_STORAGE,
149 IC_CONFIGURE | IC_PRIORITY,
150 IFVERBOSE("sequential storage")
151 },
152 {
153 I2O_CLASS_LAN,
154 IC_CONFIGURE | IC_PRIORITY,
155 IFVERBOSE("LAN port")
156 },
157 {
158 I2O_CLASS_WAN,
159 IC_CONFIGURE | IC_PRIORITY,
160 IFVERBOSE("WAN port")
161 },
162 {
163 I2O_CLASS_FIBRE_CHANNEL_PORT,
164 IC_CONFIGURE,
165 IFVERBOSE("fibrechannel port")
166 },
167 {
168 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
169 0,
170 COMMENT("fibrechannel peripheral")
171 },
172 {
173 I2O_CLASS_SCSI_PERIPHERAL,
174 0,
175 COMMENT("SCSI peripheral")
176 },
177 {
178 I2O_CLASS_ATE_PORT,
179 IC_CONFIGURE,
180 IFVERBOSE("ATE port")
181 },
182 {
183 I2O_CLASS_ATE_PERIPHERAL,
184 0,
185 COMMENT("ATE peripheral")
186 },
187 {
188 I2O_CLASS_FLOPPY_CONTROLLER,
189 IC_CONFIGURE,
190 IFVERBOSE("floppy controller")
191 },
192 {
193 I2O_CLASS_FLOPPY_DEVICE,
194 0,
195 COMMENT("floppy device")
196 },
197 {
198 I2O_CLASS_BUS_ADAPTER_PORT,
199 IC_CONFIGURE,
200 IFVERBOSE("bus adapter port" )
201 },
202 };
203
204 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
205 static const char * const iop_status[] = {
206 "success",
207 "abort (dirty)",
208 "abort (no data transfer)",
209 "abort (partial transfer)",
210 "error (dirty)",
211 "error (no data transfer)",
212 "error (partial transfer)",
213 "undefined error code",
214 "process abort (dirty)",
215 "process abort (no data transfer)",
216 "process abort (partial transfer)",
217 "transaction error",
218 };
219 #endif
220
221 static inline u_int32_t iop_inl(struct iop_softc *, int);
222 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
223
224 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
225 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
226
227 static void iop_config_interrupts(struct device *);
228 static void iop_configure_devices(struct iop_softc *, int, int);
229 static void iop_devinfo(int, char *, size_t);
230 static int iop_print(void *, const char *);
231 static void iop_shutdown(void *);
232
233 static void iop_adjqparam(struct iop_softc *, int);
234 static void iop_create_reconf_thread(void *);
235 static int iop_handle_reply(struct iop_softc *, u_int32_t);
236 static int iop_hrt_get(struct iop_softc *);
237 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
238 static void iop_intr_event(struct device *, struct iop_msg *, void *);
239 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
240 u_int32_t);
241 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
242 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
243 static int iop_ofifo_init(struct iop_softc *);
244 static int iop_passthrough(struct iop_softc *, struct ioppt *,
245 struct proc *);
246 static void iop_reconf_thread(void *);
247 static void iop_release_mfa(struct iop_softc *, u_int32_t);
248 static int iop_reset(struct iop_softc *);
249 static int iop_sys_enable(struct iop_softc *);
250 static int iop_systab_set(struct iop_softc *);
251 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
252
253 #ifdef I2ODEBUG
254 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
255 #endif
256
257 static inline u_int32_t
258 iop_inl(struct iop_softc *sc, int off)
259 {
260
261 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
262 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
263 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
264 }
265
266 static inline void
267 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
268 {
269
270 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
271 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
272 BUS_SPACE_BARRIER_WRITE);
273 }
274
275 static inline u_int32_t
276 iop_inl_msg(struct iop_softc *sc, int off)
277 {
278
279 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
280 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
281 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
282 }
283
284 static inline void
285 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
286 {
287
288 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
289 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
290 BUS_SPACE_BARRIER_WRITE);
291 }
292
293 /*
294 * Initialise the IOP and our interface.
295 */
296 void
297 iop_init(struct iop_softc *sc, const char *intrstr)
298 {
299 struct iop_msg *im;
300 int rv, i, j, state, nsegs;
301 u_int32_t mask;
302 char ident[64];
303
304 state = 0;
305
306 printf("I2O adapter");
307
308 if (iop_ictxhashtbl == NULL)
309 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
310 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
311
312 /* Disable interrupts at the IOP. */
313 mask = iop_inl(sc, IOP_REG_INTR_MASK);
314 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
315
316 /* Allocate a scratch DMA map for small miscellaneous shared data. */
317 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
318 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
319 printf("%s: cannot create scratch dmamap\n",
320 sc->sc_dv.dv_xname);
321 return;
322 }
323
324 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
325 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
326 printf("%s: cannot alloc scratch dmamem\n",
327 sc->sc_dv.dv_xname);
328 goto bail_out;
329 }
330 state++;
331
332 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
333 &sc->sc_scr, 0)) {
334 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
335 goto bail_out;
336 }
337 state++;
338
339 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
340 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
341 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
342 goto bail_out;
343 }
344 state++;
345
346 #ifdef I2ODEBUG
347 /* So that our debug checks don't choke. */
348 sc->sc_framesize = 128;
349 #endif
350
351 /* Avoid syncing the reply map until it's set up. */
352 sc->sc_curib = 0x123;
353
354 /* Reset the adapter and request status. */
355 if ((rv = iop_reset(sc)) != 0) {
356 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
357 goto bail_out;
358 }
359
360 if ((rv = iop_status_get(sc, 1)) != 0) {
361 printf("%s: not responding (get status)\n",
362 sc->sc_dv.dv_xname);
363 goto bail_out;
364 }
365
366 sc->sc_flags |= IOP_HAVESTATUS;
367 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
368 ident, sizeof(ident));
369 printf(" <%s>\n", ident);
370
371 #ifdef I2ODEBUG
372 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
373 le16toh(sc->sc_status.orgid),
374 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
375 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
376 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
377 le32toh(sc->sc_status.desiredprivmemsize),
378 le32toh(sc->sc_status.currentprivmemsize),
379 le32toh(sc->sc_status.currentprivmembase));
380 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
381 le32toh(sc->sc_status.desiredpriviosize),
382 le32toh(sc->sc_status.currentpriviosize),
383 le32toh(sc->sc_status.currentpriviobase));
384 #endif
385
386 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
387 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
388 sc->sc_maxob = IOP_MAX_OUTBOUND;
389 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
390 if (sc->sc_maxib > IOP_MAX_INBOUND)
391 sc->sc_maxib = IOP_MAX_INBOUND;
392 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
393 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
394 sc->sc_framesize = IOP_MAX_MSG_SIZE;
395
396 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
397 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
398 printf("%s: frame size too small (%d)\n",
399 sc->sc_dv.dv_xname, sc->sc_framesize);
400 goto bail_out;
401 }
402 #endif
403
404 /* Allocate message wrappers. */
405 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
406 if (im == NULL) {
407 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
408 goto bail_out;
409 }
410 state++;
411 sc->sc_ims = im;
412 SLIST_INIT(&sc->sc_im_freelist);
413
414 for (i = 0; i < sc->sc_maxib; i++, im++) {
415 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
416 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
417 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
418 &im->im_xfer[0].ix_map);
419 if (rv != 0) {
420 printf("%s: couldn't create dmamap (%d)",
421 sc->sc_dv.dv_xname, rv);
422 goto bail_out3;
423 }
424
425 im->im_tctx = i;
426 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
427 }
428
429 /* Initialise the IOP's outbound FIFO. */
430 if (iop_ofifo_init(sc) != 0) {
431 printf("%s: unable to init oubound FIFO\n",
432 sc->sc_dv.dv_xname);
433 goto bail_out3;
434 }
435
436 /*
437 * Defer further configuration until (a) interrupts are working and
438 * (b) we have enough information to build the system table.
439 */
440 config_interrupts((struct device *)sc, iop_config_interrupts);
441
442 /* Configure shutdown hook before we start any device activity. */
443 if (iop_sdh == NULL)
444 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
445
446 /* Ensure interrupts are enabled at the IOP. */
447 mask = iop_inl(sc, IOP_REG_INTR_MASK);
448 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
449
450 if (intrstr != NULL)
451 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
452 intrstr);
453
454 #ifdef I2ODEBUG
455 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
456 sc->sc_dv.dv_xname, sc->sc_maxib,
457 le32toh(sc->sc_status.maxinboundmframes),
458 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
459 #endif
460
461 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
462 return;
463
464 bail_out3:
465 if (state > 3) {
466 for (j = 0; j < i; j++)
467 bus_dmamap_destroy(sc->sc_dmat,
468 sc->sc_ims[j].im_xfer[0].ix_map);
469 free(sc->sc_ims, M_DEVBUF);
470 }
471 bail_out:
472 if (state > 2)
473 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
474 if (state > 1)
475 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
476 if (state > 0)
477 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
478 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
479 }
480
481 /*
482 * Perform autoconfiguration tasks.
483 */
484 static void
485 iop_config_interrupts(struct device *self)
486 {
487 struct iop_attach_args ia;
488 struct iop_softc *sc, *iop;
489 struct i2o_systab_entry *ste;
490 int rv, i, niop;
491 int locs[IOPCF_NLOCS];
492
493 sc = device_private(self);
494 LIST_INIT(&sc->sc_iilist);
495
496 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
497
498 if (iop_hrt_get(sc) != 0) {
499 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
500 return;
501 }
502
503 /*
504 * Build the system table.
505 */
506 if (iop_systab == NULL) {
507 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
508 if ((iop = device_lookup(&iop_cd, i)) == NULL)
509 continue;
510 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
511 continue;
512 if (iop_status_get(iop, 1) != 0) {
513 printf("%s: unable to retrieve status\n",
514 sc->sc_dv.dv_xname);
515 iop->sc_flags &= ~IOP_HAVESTATUS;
516 continue;
517 }
518 niop++;
519 }
520 if (niop == 0)
521 return;
522
523 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
524 sizeof(struct i2o_systab);
525 iop_systab_size = i;
526 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
527
528 iop_systab->numentries = niop;
529 iop_systab->version = I2O_VERSION_11;
530
531 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
532 if ((iop = device_lookup(&iop_cd, i)) == NULL)
533 continue;
534 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
535 continue;
536
537 ste->orgid = iop->sc_status.orgid;
538 ste->iopid = device_unit(&iop->sc_dv) + 2;
539 ste->segnumber =
540 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
541 ste->iopcaps = iop->sc_status.iopcaps;
542 ste->inboundmsgframesize =
543 iop->sc_status.inboundmframesize;
544 ste->inboundmsgportaddresslow =
545 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
546 ste++;
547 }
548 }
549
550 /*
551 * Post the system table to the IOP and bring it to the OPERATIONAL
552 * state.
553 */
554 if (iop_systab_set(sc) != 0) {
555 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
556 return;
557 }
558 if (iop_sys_enable(sc) != 0) {
559 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
560 return;
561 }
562
563 /*
564 * Set up an event handler for this IOP.
565 */
566 sc->sc_eventii.ii_dv = self;
567 sc->sc_eventii.ii_intr = iop_intr_event;
568 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
569 sc->sc_eventii.ii_tid = I2O_TID_IOP;
570 iop_initiator_register(sc, &sc->sc_eventii);
571
572 rv = iop_util_eventreg(sc, &sc->sc_eventii,
573 I2O_EVENT_EXEC_RESOURCE_LIMITS |
574 I2O_EVENT_EXEC_CONNECTION_FAIL |
575 I2O_EVENT_EXEC_ADAPTER_FAULT |
576 I2O_EVENT_EXEC_POWER_FAIL |
577 I2O_EVENT_EXEC_RESET_PENDING |
578 I2O_EVENT_EXEC_RESET_IMMINENT |
579 I2O_EVENT_EXEC_HARDWARE_FAIL |
580 I2O_EVENT_EXEC_XCT_CHANGE |
581 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
582 I2O_EVENT_GEN_DEVICE_RESET |
583 I2O_EVENT_GEN_STATE_CHANGE |
584 I2O_EVENT_GEN_GENERAL_WARNING);
585 if (rv != 0) {
586 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
587 return;
588 }
589
590 /*
591 * Attempt to match and attach a product-specific extension.
592 */
593 ia.ia_class = I2O_CLASS_ANY;
594 ia.ia_tid = I2O_TID_IOP;
595 locs[IOPCF_TID] = I2O_TID_IOP;
596 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
597 config_stdsubmatch);
598
599 /*
600 * Start device configuration.
601 */
602 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
603 if ((rv = iop_reconfigure(sc, 0)) == -1) {
604 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
605 return;
606 }
607 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
608
609 kthread_create(iop_create_reconf_thread, sc);
610 }
611
612 /*
613 * Create the reconfiguration thread. Called after the standard kernel
614 * threads have been created.
615 */
616 static void
617 iop_create_reconf_thread(void *cookie)
618 {
619 struct iop_softc *sc;
620 int rv;
621
622 sc = cookie;
623 sc->sc_flags |= IOP_ONLINE;
624
625 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
626 "%s", sc->sc_dv.dv_xname);
627 if (rv != 0) {
628 printf("%s: unable to create reconfiguration thread (%d)",
629 sc->sc_dv.dv_xname, rv);
630 return;
631 }
632 }
633
634 /*
635 * Reconfiguration thread; listens for LCT change notification, and
636 * initiates re-configuration if received.
637 */
638 static void
639 iop_reconf_thread(void *cookie)
640 {
641 struct iop_softc *sc;
642 struct lwp *l;
643 struct i2o_lct lct;
644 u_int32_t chgind;
645 int rv;
646
647 sc = cookie;
648 chgind = sc->sc_chgind + 1;
649 l = curlwp;
650
651 for (;;) {
652 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
653 sc->sc_dv.dv_xname, chgind));
654
655 PHOLD(l);
656 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
657 PRELE(l);
658
659 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
660 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
661
662 if (rv == 0 &&
663 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
664 iop_reconfigure(sc, le32toh(lct.changeindicator));
665 chgind = sc->sc_chgind + 1;
666 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
667 }
668
669 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
670 }
671 }
672
673 /*
674 * Reconfigure: find new and removed devices.
675 */
676 int
677 iop_reconfigure(struct iop_softc *sc, u_int chgind)
678 {
679 struct iop_msg *im;
680 struct i2o_hba_bus_scan mf;
681 struct i2o_lct_entry *le;
682 struct iop_initiator *ii, *nextii;
683 int rv, tid, i;
684
685 /*
686 * If the reconfiguration request isn't the result of LCT change
687 * notification, then be more thorough: ask all bus ports to scan
688 * their busses. Wait up to 5 minutes for each bus port to complete
689 * the request.
690 */
691 if (chgind == 0) {
692 if ((rv = iop_lct_get(sc)) != 0) {
693 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
694 return (rv);
695 }
696
697 le = sc->sc_lct->entry;
698 for (i = 0; i < sc->sc_nlctent; i++, le++) {
699 if ((le16toh(le->classid) & 4095) !=
700 I2O_CLASS_BUS_ADAPTER_PORT)
701 continue;
702 tid = le16toh(le->localtid) & 4095;
703
704 im = iop_msg_alloc(sc, IM_WAIT);
705
706 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
707 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
708 mf.msgictx = IOP_ICTX;
709 mf.msgtctx = im->im_tctx;
710
711 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
712 tid));
713
714 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
715 iop_msg_free(sc, im);
716 #ifdef I2ODEBUG
717 if (rv != 0)
718 printf("%s: bus scan failed\n",
719 sc->sc_dv.dv_xname);
720 #endif
721 }
722 } else if (chgind <= sc->sc_chgind) {
723 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
724 return (0);
725 }
726
727 /* Re-read the LCT and determine if it has changed. */
728 if ((rv = iop_lct_get(sc)) != 0) {
729 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
730 return (rv);
731 }
732 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
733
734 chgind = le32toh(sc->sc_lct->changeindicator);
735 if (chgind == sc->sc_chgind) {
736 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
737 return (0);
738 }
739 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
740 sc->sc_chgind = chgind;
741
742 if (sc->sc_tidmap != NULL)
743 free(sc->sc_tidmap, M_DEVBUF);
744 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
745 M_DEVBUF, M_NOWAIT|M_ZERO);
746
747 /* Allow 1 queued command per device while we're configuring. */
748 iop_adjqparam(sc, 1);
749
750 /*
751 * Match and attach child devices. We configure high-level devices
752 * first so that any claims will propagate throughout the LCT,
753 * hopefully masking off aliased devices as a result.
754 *
755 * Re-reading the LCT at this point is a little dangerous, but we'll
756 * trust the IOP (and the operator) to behave itself...
757 */
758 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
759 IC_CONFIGURE | IC_PRIORITY);
760 if ((rv = iop_lct_get(sc)) != 0) {
761 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
762 }
763 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
764 IC_CONFIGURE);
765
766 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
767 nextii = LIST_NEXT(ii, ii_list);
768
769 /* Detach devices that were configured, but are now gone. */
770 for (i = 0; i < sc->sc_nlctent; i++)
771 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
772 break;
773 if (i == sc->sc_nlctent ||
774 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
775 config_detach(ii->ii_dv, DETACH_FORCE);
776 continue;
777 }
778
779 /*
780 * Tell initiators that existed before the re-configuration
781 * to re-configure.
782 */
783 if (ii->ii_reconfig == NULL)
784 continue;
785 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
786 printf("%s: %s failed reconfigure (%d)\n",
787 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
788 }
789
790 /* Re-adjust queue parameters and return. */
791 if (sc->sc_nii != 0)
792 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
793 / sc->sc_nii);
794
795 return (0);
796 }
797
798 /*
799 * Configure I2O devices into the system.
800 */
801 static void
802 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
803 {
804 struct iop_attach_args ia;
805 struct iop_initiator *ii;
806 const struct i2o_lct_entry *le;
807 struct device *dv;
808 int i, j, nent;
809 u_int usertid;
810 int locs[IOPCF_NLOCS];
811
812 nent = sc->sc_nlctent;
813 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
814 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
815
816 /* Ignore the device if it's in use. */
817 usertid = le32toh(le->usertid) & 4095;
818 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
819 continue;
820
821 ia.ia_class = le16toh(le->classid) & 4095;
822 ia.ia_tid = sc->sc_tidmap[i].it_tid;
823
824 /* Ignore uninteresting devices. */
825 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
826 if (iop_class[j].ic_class == ia.ia_class)
827 break;
828 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
829 (iop_class[j].ic_flags & mask) != maskval)
830 continue;
831
832 /*
833 * Try to configure the device only if it's not already
834 * configured.
835 */
836 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
837 if (ia.ia_tid == ii->ii_tid) {
838 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
839 strcpy(sc->sc_tidmap[i].it_dvname,
840 ii->ii_dv->dv_xname);
841 break;
842 }
843 }
844 if (ii != NULL)
845 continue;
846
847 locs[IOPCF_TID] = ia.ia_tid;
848
849 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
850 iop_print, config_stdsubmatch);
851 if (dv != NULL) {
852 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
853 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
854 }
855 }
856 }
857
858 /*
859 * Adjust queue parameters for all child devices.
860 */
861 static void
862 iop_adjqparam(struct iop_softc *sc, int mpi)
863 {
864 struct iop_initiator *ii;
865
866 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
867 if (ii->ii_adjqparam != NULL)
868 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
869 }
870
871 static void
872 iop_devinfo(int class, char *devinfo, size_t l)
873 {
874 #ifdef I2OVERBOSE
875 int i;
876
877 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
878 if (class == iop_class[i].ic_class)
879 break;
880
881 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
882 snprintf(devinfo, l, "device (class 0x%x)", class);
883 else
884 strlcpy(devinfo, iop_class[i].ic_caption, l);
885 #else
886
887 snprintf(devinfo, l, "device (class 0x%x)", class);
888 #endif
889 }
890
891 static int
892 iop_print(void *aux, const char *pnp)
893 {
894 struct iop_attach_args *ia;
895 char devinfo[256];
896
897 ia = aux;
898
899 if (pnp != NULL) {
900 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
901 aprint_normal("%s at %s", devinfo, pnp);
902 }
903 aprint_normal(" tid %d", ia->ia_tid);
904 return (UNCONF);
905 }
906
907 /*
908 * Shut down all configured IOPs.
909 */
910 static void
911 iop_shutdown(void *junk)
912 {
913 struct iop_softc *sc;
914 int i;
915
916 printf("shutting down iop devices...");
917
918 for (i = 0; i < iop_cd.cd_ndevs; i++) {
919 if ((sc = device_lookup(&iop_cd, i)) == NULL)
920 continue;
921 if ((sc->sc_flags & IOP_ONLINE) == 0)
922 continue;
923
924 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
925 0, 5000);
926
927 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
928 /*
929 * Some AMI firmware revisions will go to sleep and
930 * never come back after this.
931 */
932 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
933 IOP_ICTX, 0, 1000);
934 }
935 }
936
937 /* Wait. Some boards could still be flushing, stupidly enough. */
938 delay(5000*1000);
939 printf(" done\n");
940 }
941
942 /*
943 * Retrieve IOP status.
944 */
945 int
946 iop_status_get(struct iop_softc *sc, int nosleep)
947 {
948 struct i2o_exec_status_get mf;
949 struct i2o_status *st;
950 paddr_t pa;
951 int rv, i;
952
953 pa = sc->sc_scr_seg->ds_addr;
954 st = (struct i2o_status *)sc->sc_scr;
955
956 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
957 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
958 mf.reserved[0] = 0;
959 mf.reserved[1] = 0;
960 mf.reserved[2] = 0;
961 mf.reserved[3] = 0;
962 mf.addrlow = (u_int32_t)pa;
963 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
964 mf.length = sizeof(sc->sc_status);
965
966 memset(st, 0, sizeof(*st));
967 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
968 BUS_DMASYNC_PREREAD);
969
970 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
971 return (rv);
972
973 for (i = 25; i != 0; i--) {
974 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
975 sizeof(*st), BUS_DMASYNC_POSTREAD);
976 if (st->syncbyte == 0xff)
977 break;
978 if (nosleep)
979 DELAY(100*1000);
980 else
981 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
982 }
983
984 if (st->syncbyte != 0xff) {
985 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
986 rv = EIO;
987 } else {
988 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
989 rv = 0;
990 }
991
992 return (rv);
993 }
994
995 /*
996 * Initialize and populate the IOP's outbound FIFO.
997 */
998 static int
999 iop_ofifo_init(struct iop_softc *sc)
1000 {
1001 bus_addr_t addr;
1002 bus_dma_segment_t seg;
1003 struct i2o_exec_outbound_init *mf;
1004 int i, rseg, rv;
1005 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1006
1007 sw = (u_int32_t *)sc->sc_scr;
1008
1009 mf = (struct i2o_exec_outbound_init *)mb;
1010 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1011 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1012 mf->msgictx = IOP_ICTX;
1013 mf->msgtctx = 0;
1014 mf->pagesize = PAGE_SIZE;
1015 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1016
1017 /*
1018 * The I2O spec says that there are two SGLs: one for the status
1019 * word, and one for a list of discarded MFAs. It continues to say
1020 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1021 * necessary; this isn't the case (and is in fact a bad thing).
1022 */
1023 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1024 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1025 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1026 (u_int32_t)sc->sc_scr_seg->ds_addr;
1027 mb[0] += 2 << 16;
1028
1029 *sw = 0;
1030 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1031 BUS_DMASYNC_PREREAD);
1032
1033 if ((rv = iop_post(sc, mb)) != 0)
1034 return (rv);
1035
1036 POLL(5000,
1037 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1038 BUS_DMASYNC_POSTREAD),
1039 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1040
1041 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1042 printf("%s: outbound FIFO init failed (%d)\n",
1043 sc->sc_dv.dv_xname, le32toh(*sw));
1044 return (EIO);
1045 }
1046
1047 /* Allocate DMA safe memory for the reply frames. */
1048 if (sc->sc_rep_phys == 0) {
1049 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1050
1051 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1052 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1053 if (rv != 0) {
1054 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1055 rv);
1056 return (rv);
1057 }
1058
1059 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1060 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1061 if (rv != 0) {
1062 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1063 return (rv);
1064 }
1065
1066 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1067 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1068 if (rv != 0) {
1069 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1070 rv);
1071 return (rv);
1072 }
1073
1074 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1075 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1076 if (rv != 0) {
1077 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1078 return (rv);
1079 }
1080
1081 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1082
1083 /* Now safe to sync the reply map. */
1084 sc->sc_curib = 0;
1085 }
1086
1087 /* Populate the outbound FIFO. */
1088 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1089 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1090 addr += sc->sc_framesize;
1091 }
1092
1093 return (0);
1094 }
1095
1096 /*
1097 * Read the specified number of bytes from the IOP's hardware resource table.
1098 */
1099 static int
1100 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1101 {
1102 struct iop_msg *im;
1103 int rv;
1104 struct i2o_exec_hrt_get *mf;
1105 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1106
1107 im = iop_msg_alloc(sc, IM_WAIT);
1108 mf = (struct i2o_exec_hrt_get *)mb;
1109 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1110 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1111 mf->msgictx = IOP_ICTX;
1112 mf->msgtctx = im->im_tctx;
1113
1114 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1115 rv = iop_msg_post(sc, im, mb, 30000);
1116 iop_msg_unmap(sc, im);
1117 iop_msg_free(sc, im);
1118 return (rv);
1119 }
1120
1121 /*
1122 * Read the IOP's hardware resource table.
1123 */
1124 static int
1125 iop_hrt_get(struct iop_softc *sc)
1126 {
1127 struct i2o_hrt hrthdr, *hrt;
1128 int size, rv;
1129
1130 PHOLD(curlwp);
1131 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1132 PRELE(curlwp);
1133 if (rv != 0)
1134 return (rv);
1135
1136 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1137 le16toh(hrthdr.numentries)));
1138
1139 size = sizeof(struct i2o_hrt) +
1140 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1141 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1142
1143 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1144 free(hrt, M_DEVBUF);
1145 return (rv);
1146 }
1147
1148 if (sc->sc_hrt != NULL)
1149 free(sc->sc_hrt, M_DEVBUF);
1150 sc->sc_hrt = hrt;
1151 return (0);
1152 }
1153
1154 /*
1155 * Request the specified number of bytes from the IOP's logical
1156 * configuration table. If a change indicator is specified, this
1157 * is a verbatim notification request, so the caller is prepared
1158 * to wait indefinitely.
1159 */
1160 static int
1161 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1162 u_int32_t chgind)
1163 {
1164 struct iop_msg *im;
1165 struct i2o_exec_lct_notify *mf;
1166 int rv;
1167 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1168
1169 im = iop_msg_alloc(sc, IM_WAIT);
1170 memset(lct, 0, size);
1171
1172 mf = (struct i2o_exec_lct_notify *)mb;
1173 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1174 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1175 mf->msgictx = IOP_ICTX;
1176 mf->msgtctx = im->im_tctx;
1177 mf->classid = I2O_CLASS_ANY;
1178 mf->changeindicator = chgind;
1179
1180 #ifdef I2ODEBUG
1181 printf("iop_lct_get0: reading LCT");
1182 if (chgind != 0)
1183 printf(" (async)");
1184 printf("\n");
1185 #endif
1186
1187 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1188 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1189 iop_msg_unmap(sc, im);
1190 iop_msg_free(sc, im);
1191 return (rv);
1192 }
1193
1194 /*
1195 * Read the IOP's logical configuration table.
1196 */
1197 int
1198 iop_lct_get(struct iop_softc *sc)
1199 {
1200 int esize, size, rv;
1201 struct i2o_lct *lct;
1202
1203 esize = le32toh(sc->sc_status.expectedlctsize);
1204 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1205 if (lct == NULL)
1206 return (ENOMEM);
1207
1208 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1209 free(lct, M_DEVBUF);
1210 return (rv);
1211 }
1212
1213 size = le16toh(lct->tablesize) << 2;
1214 if (esize != size) {
1215 free(lct, M_DEVBUF);
1216 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1217 if (lct == NULL)
1218 return (ENOMEM);
1219
1220 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1221 free(lct, M_DEVBUF);
1222 return (rv);
1223 }
1224 }
1225
1226 /* Swap in the new LCT. */
1227 if (sc->sc_lct != NULL)
1228 free(sc->sc_lct, M_DEVBUF);
1229 sc->sc_lct = lct;
1230 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1231 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1232 sizeof(struct i2o_lct_entry);
1233 return (0);
1234 }
1235
1236 /*
1237 * Post a SYS_ENABLE message to the adapter.
1238 */
1239 int
1240 iop_sys_enable(struct iop_softc *sc)
1241 {
1242 struct iop_msg *im;
1243 struct i2o_msg mf;
1244 int rv;
1245
1246 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1247
1248 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1249 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1250 mf.msgictx = IOP_ICTX;
1251 mf.msgtctx = im->im_tctx;
1252
1253 rv = iop_msg_post(sc, im, &mf, 30000);
1254 if (rv == 0) {
1255 if ((im->im_flags & IM_FAIL) != 0)
1256 rv = ENXIO;
1257 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1258 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1259 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1260 rv = 0;
1261 else
1262 rv = EIO;
1263 }
1264
1265 iop_msg_free(sc, im);
1266 return (rv);
1267 }
1268
1269 /*
1270 * Request the specified parameter group from the target. If an initiator
1271 * is specified (a) don't wait for the operation to complete, but instead
1272 * let the initiator's interrupt handler deal with the reply and (b) place a
1273 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1274 */
1275 int
1276 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1277 int size, struct iop_initiator *ii)
1278 {
1279 struct iop_msg *im;
1280 struct i2o_util_params_op *mf;
1281 int rv;
1282 struct iop_pgop *pgop;
1283 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1284
1285 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1286 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1287 iop_msg_free(sc, im);
1288 return (ENOMEM);
1289 }
1290 im->im_dvcontext = pgop;
1291
1292 mf = (struct i2o_util_params_op *)mb;
1293 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1294 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1295 mf->msgictx = IOP_ICTX;
1296 mf->msgtctx = im->im_tctx;
1297 mf->flags = 0;
1298
1299 pgop->olh.count = htole16(1);
1300 pgop->olh.reserved = htole16(0);
1301 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1302 pgop->oat.fieldcount = htole16(0xffff);
1303 pgop->oat.group = htole16(group);
1304
1305 if (ii == NULL)
1306 PHOLD(curlwp);
1307
1308 memset(buf, 0, size);
1309 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1310 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1311 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1312
1313 if (ii == NULL)
1314 PRELE(curlwp);
1315
1316 /* Detect errors; let partial transfers to count as success. */
1317 if (ii == NULL && rv == 0) {
1318 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1319 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1320 rv = 0;
1321 else
1322 rv = (im->im_reqstatus != 0 ? EIO : 0);
1323
1324 if (rv != 0)
1325 printf("%s: FIELD_GET failed for tid %d group %d\n",
1326 sc->sc_dv.dv_xname, tid, group);
1327 }
1328
1329 if (ii == NULL || rv != 0) {
1330 iop_msg_unmap(sc, im);
1331 iop_msg_free(sc, im);
1332 free(pgop, M_DEVBUF);
1333 }
1334
1335 return (rv);
1336 }
1337
1338 /*
1339 * Set a single field in a scalar parameter group.
1340 */
1341 int
1342 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1343 int size, int field)
1344 {
1345 struct iop_msg *im;
1346 struct i2o_util_params_op *mf;
1347 struct iop_pgop *pgop;
1348 int rv, totsize;
1349 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1350
1351 totsize = sizeof(*pgop) + size;
1352
1353 im = iop_msg_alloc(sc, IM_WAIT);
1354 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1355 iop_msg_free(sc, im);
1356 return (ENOMEM);
1357 }
1358
1359 mf = (struct i2o_util_params_op *)mb;
1360 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1361 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1362 mf->msgictx = IOP_ICTX;
1363 mf->msgtctx = im->im_tctx;
1364 mf->flags = 0;
1365
1366 pgop->olh.count = htole16(1);
1367 pgop->olh.reserved = htole16(0);
1368 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1369 pgop->oat.fieldcount = htole16(1);
1370 pgop->oat.group = htole16(group);
1371 pgop->oat.fields[0] = htole16(field);
1372 memcpy(pgop + 1, buf, size);
1373
1374 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1375 rv = iop_msg_post(sc, im, mb, 30000);
1376 if (rv != 0)
1377 printf("%s: FIELD_SET failed for tid %d group %d\n",
1378 sc->sc_dv.dv_xname, tid, group);
1379
1380 iop_msg_unmap(sc, im);
1381 iop_msg_free(sc, im);
1382 free(pgop, M_DEVBUF);
1383 return (rv);
1384 }
1385
1386 /*
1387 * Delete all rows in a tablular parameter group.
1388 */
1389 int
1390 iop_table_clear(struct iop_softc *sc, int tid, int group)
1391 {
1392 struct iop_msg *im;
1393 struct i2o_util_params_op *mf;
1394 struct iop_pgop pgop;
1395 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1396 int rv;
1397
1398 im = iop_msg_alloc(sc, IM_WAIT);
1399
1400 mf = (struct i2o_util_params_op *)mb;
1401 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1402 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1403 mf->msgictx = IOP_ICTX;
1404 mf->msgtctx = im->im_tctx;
1405 mf->flags = 0;
1406
1407 pgop.olh.count = htole16(1);
1408 pgop.olh.reserved = htole16(0);
1409 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1410 pgop.oat.fieldcount = htole16(0);
1411 pgop.oat.group = htole16(group);
1412 pgop.oat.fields[0] = htole16(0);
1413
1414 PHOLD(curlwp);
1415 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1416 rv = iop_msg_post(sc, im, mb, 30000);
1417 if (rv != 0)
1418 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1419 sc->sc_dv.dv_xname, tid, group);
1420
1421 iop_msg_unmap(sc, im);
1422 PRELE(curlwp);
1423 iop_msg_free(sc, im);
1424 return (rv);
1425 }
1426
1427 /*
1428 * Add a single row to a tabular parameter group. The row can have only one
1429 * field.
1430 */
1431 int
1432 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1433 int size, int row)
1434 {
1435 struct iop_msg *im;
1436 struct i2o_util_params_op *mf;
1437 struct iop_pgop *pgop;
1438 int rv, totsize;
1439 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1440
1441 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1442
1443 im = iop_msg_alloc(sc, IM_WAIT);
1444 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1445 iop_msg_free(sc, im);
1446 return (ENOMEM);
1447 }
1448
1449 mf = (struct i2o_util_params_op *)mb;
1450 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1451 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1452 mf->msgictx = IOP_ICTX;
1453 mf->msgtctx = im->im_tctx;
1454 mf->flags = 0;
1455
1456 pgop->olh.count = htole16(1);
1457 pgop->olh.reserved = htole16(0);
1458 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1459 pgop->oat.fieldcount = htole16(1);
1460 pgop->oat.group = htole16(group);
1461 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1462 pgop->oat.fields[1] = htole16(1); /* RowCount */
1463 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1464 memcpy(&pgop->oat.fields[3], buf, size);
1465
1466 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1467 rv = iop_msg_post(sc, im, mb, 30000);
1468 if (rv != 0)
1469 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1470 sc->sc_dv.dv_xname, tid, group, row);
1471
1472 iop_msg_unmap(sc, im);
1473 iop_msg_free(sc, im);
1474 free(pgop, M_DEVBUF);
1475 return (rv);
1476 }
1477
1478 /*
1479 * Execute a simple command (no parameters).
1480 */
1481 int
1482 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1483 int async, int timo)
1484 {
1485 struct iop_msg *im;
1486 struct i2o_msg mf;
1487 int rv, fl;
1488
1489 fl = (async != 0 ? IM_WAIT : IM_POLL);
1490 im = iop_msg_alloc(sc, fl);
1491
1492 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1493 mf.msgfunc = I2O_MSGFUNC(tid, function);
1494 mf.msgictx = ictx;
1495 mf.msgtctx = im->im_tctx;
1496
1497 rv = iop_msg_post(sc, im, &mf, timo);
1498 iop_msg_free(sc, im);
1499 return (rv);
1500 }
1501
1502 /*
1503 * Post the system table to the IOP.
1504 */
1505 static int
1506 iop_systab_set(struct iop_softc *sc)
1507 {
1508 struct i2o_exec_sys_tab_set *mf;
1509 struct iop_msg *im;
1510 bus_space_handle_t bsh;
1511 bus_addr_t boo;
1512 u_int32_t mema[2], ioa[2];
1513 int rv;
1514 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1515
1516 im = iop_msg_alloc(sc, IM_WAIT);
1517
1518 mf = (struct i2o_exec_sys_tab_set *)mb;
1519 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1520 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1521 mf->msgictx = IOP_ICTX;
1522 mf->msgtctx = im->im_tctx;
1523 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1524 mf->segnumber = 0;
1525
1526 mema[1] = sc->sc_status.desiredprivmemsize;
1527 ioa[1] = sc->sc_status.desiredpriviosize;
1528
1529 if (mema[1] != 0) {
1530 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1531 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1532 mema[0] = htole32(boo);
1533 if (rv != 0) {
1534 printf("%s: can't alloc priv mem space, err = %d\n",
1535 sc->sc_dv.dv_xname, rv);
1536 mema[0] = 0;
1537 mema[1] = 0;
1538 }
1539 }
1540
1541 if (ioa[1] != 0) {
1542 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1543 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1544 ioa[0] = htole32(boo);
1545 if (rv != 0) {
1546 printf("%s: can't alloc priv i/o space, err = %d\n",
1547 sc->sc_dv.dv_xname, rv);
1548 ioa[0] = 0;
1549 ioa[1] = 0;
1550 }
1551 }
1552
1553 PHOLD(curlwp);
1554 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1555 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1556 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1557 rv = iop_msg_post(sc, im, mb, 5000);
1558 iop_msg_unmap(sc, im);
1559 iop_msg_free(sc, im);
1560 PRELE(curlwp);
1561 return (rv);
1562 }
1563
1564 /*
1565 * Reset the IOP. Must be called with interrupts disabled.
1566 */
1567 static int
1568 iop_reset(struct iop_softc *sc)
1569 {
1570 u_int32_t mfa, *sw;
1571 struct i2o_exec_iop_reset mf;
1572 int rv;
1573 paddr_t pa;
1574
1575 sw = (u_int32_t *)sc->sc_scr;
1576 pa = sc->sc_scr_seg->ds_addr;
1577
1578 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1579 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1580 mf.reserved[0] = 0;
1581 mf.reserved[1] = 0;
1582 mf.reserved[2] = 0;
1583 mf.reserved[3] = 0;
1584 mf.statuslow = (u_int32_t)pa;
1585 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1586
1587 *sw = htole32(0);
1588 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1589 BUS_DMASYNC_PREREAD);
1590
1591 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1592 return (rv);
1593
1594 POLL(2500,
1595 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1596 BUS_DMASYNC_POSTREAD), *sw != 0));
1597 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1598 printf("%s: reset rejected, status 0x%x\n",
1599 sc->sc_dv.dv_xname, le32toh(*sw));
1600 return (EIO);
1601 }
1602
1603 /*
1604 * IOP is now in the INIT state. Wait no more than 10 seconds for
1605 * the inbound queue to become responsive.
1606 */
1607 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1608 if (mfa == IOP_MFA_EMPTY) {
1609 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1610 return (EIO);
1611 }
1612
1613 iop_release_mfa(sc, mfa);
1614 return (0);
1615 }
1616
1617 /*
1618 * Register a new initiator. Must be called with the configuration lock
1619 * held.
1620 */
1621 void
1622 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1623 {
1624 static int ictxgen;
1625 int s;
1626
1627 /* 0 is reserved (by us) for system messages. */
1628 ii->ii_ictx = ++ictxgen;
1629
1630 /*
1631 * `Utility initiators' don't make it onto the per-IOP initiator list
1632 * (which is used only for configuration), but do get one slot on
1633 * the inbound queue.
1634 */
1635 if ((ii->ii_flags & II_UTILITY) == 0) {
1636 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1637 sc->sc_nii++;
1638 } else
1639 sc->sc_nuii++;
1640
1641 s = splbio();
1642 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1643 splx(s);
1644 }
1645
1646 /*
1647 * Unregister an initiator. Must be called with the configuration lock
1648 * held.
1649 */
1650 void
1651 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1652 {
1653 int s;
1654
1655 if ((ii->ii_flags & II_UTILITY) == 0) {
1656 LIST_REMOVE(ii, ii_list);
1657 sc->sc_nii--;
1658 } else
1659 sc->sc_nuii--;
1660
1661 s = splbio();
1662 LIST_REMOVE(ii, ii_hash);
1663 splx(s);
1664 }
1665
1666 /*
1667 * Handle a reply frame from the IOP.
1668 */
1669 static int
1670 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1671 {
1672 struct iop_msg *im;
1673 struct i2o_reply *rb;
1674 struct i2o_fault_notify *fn;
1675 struct iop_initiator *ii;
1676 u_int off, ictx, tctx, status, size;
1677
1678 off = (int)(rmfa - sc->sc_rep_phys);
1679 rb = (struct i2o_reply *)(sc->sc_rep + off);
1680
1681 /* Perform reply queue DMA synchronisation. */
1682 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1683 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1684 if (--sc->sc_curib != 0)
1685 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1686 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1687
1688 #ifdef I2ODEBUG
1689 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1690 panic("iop_handle_reply: 64-bit reply");
1691 #endif
1692 /*
1693 * Find the initiator.
1694 */
1695 ictx = le32toh(rb->msgictx);
1696 if (ictx == IOP_ICTX)
1697 ii = NULL;
1698 else {
1699 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1700 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1701 if (ii->ii_ictx == ictx)
1702 break;
1703 if (ii == NULL) {
1704 #ifdef I2ODEBUG
1705 iop_reply_print(sc, rb);
1706 #endif
1707 printf("%s: WARNING: bad ictx returned (%x)\n",
1708 sc->sc_dv.dv_xname, ictx);
1709 return (-1);
1710 }
1711 }
1712
1713 /*
1714 * If we received a transport failure notice, we've got to dig the
1715 * transaction context (if any) out of the original message frame,
1716 * and then release the original MFA back to the inbound FIFO.
1717 */
1718 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1719 status = I2O_STATUS_SUCCESS;
1720
1721 fn = (struct i2o_fault_notify *)rb;
1722 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1723 iop_release_mfa(sc, fn->lowmfa);
1724 iop_tfn_print(sc, fn);
1725 } else {
1726 status = rb->reqstatus;
1727 tctx = le32toh(rb->msgtctx);
1728 }
1729
1730 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1731 /*
1732 * This initiator tracks state using message wrappers.
1733 *
1734 * Find the originating message wrapper, and if requested
1735 * notify the initiator.
1736 */
1737 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1738 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1739 (im->im_flags & IM_ALLOCED) == 0 ||
1740 tctx != im->im_tctx) {
1741 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1742 sc->sc_dv.dv_xname, tctx, im);
1743 if (im != NULL)
1744 printf("%s: flags=0x%08x tctx=0x%08x\n",
1745 sc->sc_dv.dv_xname, im->im_flags,
1746 im->im_tctx);
1747 #ifdef I2ODEBUG
1748 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1749 iop_reply_print(sc, rb);
1750 #endif
1751 return (-1);
1752 }
1753
1754 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1755 im->im_flags |= IM_FAIL;
1756
1757 #ifdef I2ODEBUG
1758 if ((im->im_flags & IM_REPLIED) != 0)
1759 panic("%s: dup reply", sc->sc_dv.dv_xname);
1760 #endif
1761 im->im_flags |= IM_REPLIED;
1762
1763 #ifdef I2ODEBUG
1764 if (status != I2O_STATUS_SUCCESS)
1765 iop_reply_print(sc, rb);
1766 #endif
1767 im->im_reqstatus = status;
1768 im->im_detstatus = le16toh(rb->detail);
1769
1770 /* Copy the reply frame, if requested. */
1771 if (im->im_rb != NULL) {
1772 size = (le32toh(rb->msgflags) >> 14) & ~3;
1773 #ifdef I2ODEBUG
1774 if (size > sc->sc_framesize)
1775 panic("iop_handle_reply: reply too large");
1776 #endif
1777 memcpy(im->im_rb, rb, size);
1778 }
1779
1780 /* Notify the initiator. */
1781 if ((im->im_flags & IM_WAIT) != 0)
1782 wakeup(im);
1783 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1784 if (ii)
1785 (*ii->ii_intr)(ii->ii_dv, im, rb);
1786 }
1787 } else {
1788 /*
1789 * This initiator discards message wrappers.
1790 *
1791 * Simply pass the reply frame to the initiator.
1792 */
1793 if (ii)
1794 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1795 }
1796
1797 return (status);
1798 }
1799
1800 /*
1801 * Handle an interrupt from the IOP.
1802 */
1803 int
1804 iop_intr(void *arg)
1805 {
1806 struct iop_softc *sc;
1807 u_int32_t rmfa;
1808
1809 sc = arg;
1810
1811 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1812 return (0);
1813
1814 for (;;) {
1815 /* Double read to account for IOP bug. */
1816 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1817 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1818 if (rmfa == IOP_MFA_EMPTY)
1819 break;
1820 }
1821 iop_handle_reply(sc, rmfa);
1822 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1823 }
1824
1825 return (1);
1826 }
1827
1828 /*
1829 * Handle an event signalled by the executive.
1830 */
1831 static void
1832 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1833 {
1834 struct i2o_util_event_register_reply *rb;
1835 u_int event;
1836
1837 rb = reply;
1838
1839 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1840 return;
1841
1842 event = le32toh(rb->event);
1843 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1844 }
1845
1846 /*
1847 * Allocate a message wrapper.
1848 */
1849 struct iop_msg *
1850 iop_msg_alloc(struct iop_softc *sc, int flags)
1851 {
1852 struct iop_msg *im;
1853 static u_int tctxgen;
1854 int s, i;
1855
1856 #ifdef I2ODEBUG
1857 if ((flags & IM_SYSMASK) != 0)
1858 panic("iop_msg_alloc: system flags specified");
1859 #endif
1860
1861 s = splbio();
1862 im = SLIST_FIRST(&sc->sc_im_freelist);
1863 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1864 if (im == NULL)
1865 panic("iop_msg_alloc: no free wrappers");
1866 #endif
1867 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1868 splx(s);
1869
1870 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1871 tctxgen += (1 << IOP_TCTX_SHIFT);
1872 im->im_flags = flags | IM_ALLOCED;
1873 im->im_rb = NULL;
1874 i = 0;
1875 do {
1876 im->im_xfer[i++].ix_size = 0;
1877 } while (i < IOP_MAX_MSG_XFERS);
1878
1879 return (im);
1880 }
1881
1882 /*
1883 * Free a message wrapper.
1884 */
1885 void
1886 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1887 {
1888 int s;
1889
1890 #ifdef I2ODEBUG
1891 if ((im->im_flags & IM_ALLOCED) == 0)
1892 panic("iop_msg_free: wrapper not allocated");
1893 #endif
1894
1895 im->im_flags = 0;
1896 s = splbio();
1897 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1898 splx(s);
1899 }
1900
1901 /*
1902 * Map a data transfer. Write a scatter-gather list into the message frame.
1903 */
1904 int
1905 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1906 void *xferaddr, int xfersize, int out, struct proc *up)
1907 {
1908 bus_dmamap_t dm;
1909 bus_dma_segment_t *ds;
1910 struct iop_xfer *ix;
1911 u_int rv, i, nsegs, flg, off, xn;
1912 u_int32_t *p;
1913
1914 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1915 if (ix->ix_size == 0)
1916 break;
1917
1918 #ifdef I2ODEBUG
1919 if (xfersize == 0)
1920 panic("iop_msg_map: null transfer");
1921 if (xfersize > IOP_MAX_XFER)
1922 panic("iop_msg_map: transfer too large");
1923 if (xn == IOP_MAX_MSG_XFERS)
1924 panic("iop_msg_map: too many xfers");
1925 #endif
1926
1927 /*
1928 * Only the first DMA map is static.
1929 */
1930 if (xn != 0) {
1931 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1932 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1933 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1934 if (rv != 0)
1935 return (rv);
1936 }
1937
1938 dm = ix->ix_map;
1939 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1940 (up == NULL ? BUS_DMA_NOWAIT : 0));
1941 if (rv != 0)
1942 goto bad;
1943
1944 /*
1945 * How many SIMPLE SG elements can we fit in this message?
1946 */
1947 off = mb[0] >> 16;
1948 p = mb + off;
1949 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1950
1951 if (dm->dm_nsegs > nsegs) {
1952 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1953 rv = EFBIG;
1954 DPRINTF(("iop_msg_map: too many segs\n"));
1955 goto bad;
1956 }
1957
1958 nsegs = dm->dm_nsegs;
1959 xfersize = 0;
1960
1961 /*
1962 * Write out the SG list.
1963 */
1964 if (out)
1965 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1966 else
1967 flg = I2O_SGL_SIMPLE;
1968
1969 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1970 p[0] = (u_int32_t)ds->ds_len | flg;
1971 p[1] = (u_int32_t)ds->ds_addr;
1972 xfersize += ds->ds_len;
1973 }
1974
1975 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1976 p[1] = (u_int32_t)ds->ds_addr;
1977 xfersize += ds->ds_len;
1978
1979 /* Fix up the transfer record, and sync the map. */
1980 ix->ix_flags = (out ? IX_OUT : IX_IN);
1981 ix->ix_size = xfersize;
1982 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1983 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1984
1985 /*
1986 * If this is the first xfer we've mapped for this message, adjust
1987 * the SGL offset field in the message header.
1988 */
1989 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1990 mb[0] += (mb[0] >> 12) & 0xf0;
1991 im->im_flags |= IM_SGLOFFADJ;
1992 }
1993 mb[0] += (nsegs << 17);
1994 return (0);
1995
1996 bad:
1997 if (xn != 0)
1998 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1999 return (rv);
2000 }
2001
2002 /*
2003 * Map a block I/O data transfer (different in that there's only one per
2004 * message maximum, and PAGE addressing may be used). Write a scatter
2005 * gather list into the message frame.
2006 */
2007 int
2008 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
2009 void *xferaddr, int xfersize, int out)
2010 {
2011 bus_dma_segment_t *ds;
2012 bus_dmamap_t dm;
2013 struct iop_xfer *ix;
2014 u_int rv, i, nsegs, off, slen, tlen, flg;
2015 paddr_t saddr, eaddr;
2016 u_int32_t *p;
2017
2018 #ifdef I2ODEBUG
2019 if (xfersize == 0)
2020 panic("iop_msg_map_bio: null transfer");
2021 if (xfersize > IOP_MAX_XFER)
2022 panic("iop_msg_map_bio: transfer too large");
2023 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2024 panic("iop_msg_map_bio: SGLOFFADJ");
2025 #endif
2026
2027 ix = im->im_xfer;
2028 dm = ix->ix_map;
2029 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2030 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2031 if (rv != 0)
2032 return (rv);
2033
2034 off = mb[0] >> 16;
2035 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2036
2037 /*
2038 * If the transfer is highly fragmented and won't fit using SIMPLE
2039 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2040 * potentially more efficient, both for us and the IOP.
2041 */
2042 if (dm->dm_nsegs > nsegs) {
2043 nsegs = 1;
2044 p = mb + off + 1;
2045
2046 /* XXX This should be done with a bus_space flag. */
2047 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2048 slen = ds->ds_len;
2049 saddr = ds->ds_addr;
2050
2051 while (slen > 0) {
2052 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2053 tlen = min(eaddr - saddr, slen);
2054 slen -= tlen;
2055 *p++ = le32toh(saddr);
2056 saddr = eaddr;
2057 nsegs++;
2058 }
2059 }
2060
2061 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2062 I2O_SGL_END;
2063 if (out)
2064 mb[off] |= I2O_SGL_DATA_OUT;
2065 } else {
2066 p = mb + off;
2067 nsegs = dm->dm_nsegs;
2068
2069 if (out)
2070 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2071 else
2072 flg = I2O_SGL_SIMPLE;
2073
2074 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2075 p[0] = (u_int32_t)ds->ds_len | flg;
2076 p[1] = (u_int32_t)ds->ds_addr;
2077 }
2078
2079 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2080 I2O_SGL_END;
2081 p[1] = (u_int32_t)ds->ds_addr;
2082 nsegs <<= 1;
2083 }
2084
2085 /* Fix up the transfer record, and sync the map. */
2086 ix->ix_flags = (out ? IX_OUT : IX_IN);
2087 ix->ix_size = xfersize;
2088 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2089 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2090
2091 /*
2092 * Adjust the SGL offset and total message size fields. We don't
2093 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2094 */
2095 mb[0] += ((off << 4) + (nsegs << 16));
2096 return (0);
2097 }
2098
2099 /*
2100 * Unmap all data transfers associated with a message wrapper.
2101 */
2102 void
2103 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2104 {
2105 struct iop_xfer *ix;
2106 int i;
2107
2108 #ifdef I2ODEBUG
2109 if (im->im_xfer[0].ix_size == 0)
2110 panic("iop_msg_unmap: no transfers mapped");
2111 #endif
2112
2113 for (ix = im->im_xfer, i = 0;;) {
2114 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2115 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2116 BUS_DMASYNC_POSTREAD);
2117 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2118
2119 /* Only the first DMA map is static. */
2120 if (i != 0)
2121 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2122 if ((++ix)->ix_size == 0)
2123 break;
2124 if (++i >= IOP_MAX_MSG_XFERS)
2125 break;
2126 }
2127 }
2128
2129 /*
2130 * Post a message frame to the IOP's inbound queue.
2131 */
2132 int
2133 iop_post(struct iop_softc *sc, u_int32_t *mb)
2134 {
2135 u_int32_t mfa;
2136 int s;
2137
2138 #ifdef I2ODEBUG
2139 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2140 panic("iop_post: frame too large");
2141 #endif
2142
2143 s = splbio();
2144
2145 /* Allocate a slot with the IOP. */
2146 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2147 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2148 splx(s);
2149 printf("%s: mfa not forthcoming\n",
2150 sc->sc_dv.dv_xname);
2151 return (EAGAIN);
2152 }
2153
2154 /* Perform reply buffer DMA synchronisation. */
2155 if (sc->sc_curib++ == 0)
2156 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2157 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2158
2159 /* Copy out the message frame. */
2160 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2161 mb[0] >> 16);
2162 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2163 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2164
2165 /* Post the MFA back to the IOP. */
2166 iop_outl(sc, IOP_REG_IFIFO, mfa);
2167
2168 splx(s);
2169 return (0);
2170 }
2171
2172 /*
2173 * Post a message to the IOP and deal with completion.
2174 */
2175 int
2176 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2177 {
2178 u_int32_t *mb;
2179 int rv, s;
2180
2181 mb = xmb;
2182
2183 /* Terminate the scatter/gather list chain. */
2184 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2185 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2186
2187 if ((rv = iop_post(sc, mb)) != 0)
2188 return (rv);
2189
2190 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2191 if ((im->im_flags & IM_POLL) != 0)
2192 iop_msg_poll(sc, im, timo);
2193 else
2194 iop_msg_wait(sc, im, timo);
2195
2196 s = splbio();
2197 if ((im->im_flags & IM_REPLIED) != 0) {
2198 if ((im->im_flags & IM_NOSTATUS) != 0)
2199 rv = 0;
2200 else if ((im->im_flags & IM_FAIL) != 0)
2201 rv = ENXIO;
2202 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2203 rv = EIO;
2204 else
2205 rv = 0;
2206 } else
2207 rv = EBUSY;
2208 splx(s);
2209 } else
2210 rv = 0;
2211
2212 return (rv);
2213 }
2214
2215 /*
2216 * Spin until the specified message is replied to.
2217 */
2218 static void
2219 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2220 {
2221 u_int32_t rmfa;
2222 int s;
2223
2224 s = splbio();
2225
2226 /* Wait for completion. */
2227 for (timo *= 10; timo != 0; timo--) {
2228 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2229 /* Double read to account for IOP bug. */
2230 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2231 if (rmfa == IOP_MFA_EMPTY)
2232 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2233 if (rmfa != IOP_MFA_EMPTY) {
2234 iop_handle_reply(sc, rmfa);
2235
2236 /*
2237 * Return the reply frame to the IOP's
2238 * outbound FIFO.
2239 */
2240 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2241 }
2242 }
2243 if ((im->im_flags & IM_REPLIED) != 0)
2244 break;
2245 DELAY(100);
2246 }
2247
2248 if (timo == 0) {
2249 #ifdef I2ODEBUG
2250 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2251 if (iop_status_get(sc, 1) != 0)
2252 printf("iop_msg_poll: unable to retrieve status\n");
2253 else
2254 printf("iop_msg_poll: IOP state = %d\n",
2255 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2256 #endif
2257 }
2258
2259 splx(s);
2260 }
2261
2262 /*
2263 * Sleep until the specified message is replied to.
2264 */
2265 static void
2266 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2267 {
2268 int s, rv;
2269
2270 s = splbio();
2271 if ((im->im_flags & IM_REPLIED) != 0) {
2272 splx(s);
2273 return;
2274 }
2275 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2276 splx(s);
2277
2278 #ifdef I2ODEBUG
2279 if (rv != 0) {
2280 printf("iop_msg_wait: tsleep() == %d\n", rv);
2281 if (iop_status_get(sc, 0) != 0)
2282 printf("iop_msg_wait: unable to retrieve status\n");
2283 else
2284 printf("iop_msg_wait: IOP state = %d\n",
2285 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2286 }
2287 #endif
2288 }
2289
2290 /*
2291 * Release an unused message frame back to the IOP's inbound fifo.
2292 */
2293 static void
2294 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2295 {
2296
2297 /* Use the frame to issue a no-op. */
2298 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2299 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2300 iop_outl_msg(sc, mfa + 8, 0);
2301 iop_outl_msg(sc, mfa + 12, 0);
2302
2303 iop_outl(sc, IOP_REG_IFIFO, mfa);
2304 }
2305
2306 #ifdef I2ODEBUG
2307 /*
2308 * Dump a reply frame header.
2309 */
2310 static void
2311 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2312 {
2313 u_int function, detail;
2314 #ifdef I2OVERBOSE
2315 const char *statusstr;
2316 #endif
2317
2318 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2319 detail = le16toh(rb->detail);
2320
2321 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2322
2323 #ifdef I2OVERBOSE
2324 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2325 statusstr = iop_status[rb->reqstatus];
2326 else
2327 statusstr = "undefined error code";
2328
2329 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2330 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2331 #else
2332 printf("%s: function=0x%02x status=0x%02x\n",
2333 sc->sc_dv.dv_xname, function, rb->reqstatus);
2334 #endif
2335 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2336 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2337 le32toh(rb->msgtctx));
2338 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2339 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2340 (le32toh(rb->msgflags) >> 8) & 0xff);
2341 }
2342 #endif
2343
2344 /*
2345 * Dump a transport failure reply.
2346 */
2347 static void
2348 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2349 {
2350
2351 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2352
2353 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2354 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2355 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2356 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2357 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2358 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2359 }
2360
2361 /*
2362 * Translate an I2O ASCII field into a C string.
2363 */
2364 void
2365 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2366 {
2367 int hc, lc, i, nit;
2368
2369 dlen--;
2370 lc = 0;
2371 hc = 0;
2372 i = 0;
2373
2374 /*
2375 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2376 * spec has nothing to say about it. Since AMI fields are usually
2377 * filled with junk after the terminator, ...
2378 */
2379 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2380
2381 while (slen-- != 0 && dlen-- != 0) {
2382 if (nit && *src == '\0')
2383 break;
2384 else if (*src <= 0x20 || *src >= 0x7f) {
2385 if (hc)
2386 dst[i++] = ' ';
2387 } else {
2388 hc = 1;
2389 dst[i++] = *src;
2390 lc = i;
2391 }
2392 src++;
2393 }
2394
2395 dst[lc] = '\0';
2396 }
2397
2398 /*
2399 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2400 */
2401 int
2402 iop_print_ident(struct iop_softc *sc, int tid)
2403 {
2404 struct {
2405 struct i2o_param_op_results pr;
2406 struct i2o_param_read_results prr;
2407 struct i2o_param_device_identity di;
2408 } __attribute__ ((__packed__)) p;
2409 char buf[32];
2410 int rv;
2411
2412 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2413 sizeof(p), NULL);
2414 if (rv != 0)
2415 return (rv);
2416
2417 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2418 sizeof(buf));
2419 printf(" <%s, ", buf);
2420 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2421 sizeof(buf));
2422 printf("%s, ", buf);
2423 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2424 printf("%s>", buf);
2425
2426 return (0);
2427 }
2428
2429 /*
2430 * Claim or unclaim the specified TID.
2431 */
2432 int
2433 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2434 int flags)
2435 {
2436 struct iop_msg *im;
2437 struct i2o_util_claim mf;
2438 int rv, func;
2439
2440 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2441 im = iop_msg_alloc(sc, IM_WAIT);
2442
2443 /* We can use the same structure, as they're identical. */
2444 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2445 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2446 mf.msgictx = ii->ii_ictx;
2447 mf.msgtctx = im->im_tctx;
2448 mf.flags = flags;
2449
2450 rv = iop_msg_post(sc, im, &mf, 5000);
2451 iop_msg_free(sc, im);
2452 return (rv);
2453 }
2454
2455 /*
2456 * Perform an abort.
2457 */
2458 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2459 int tctxabort, int flags)
2460 {
2461 struct iop_msg *im;
2462 struct i2o_util_abort mf;
2463 int rv;
2464
2465 im = iop_msg_alloc(sc, IM_WAIT);
2466
2467 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2468 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2469 mf.msgictx = ii->ii_ictx;
2470 mf.msgtctx = im->im_tctx;
2471 mf.flags = (func << 24) | flags;
2472 mf.tctxabort = tctxabort;
2473
2474 rv = iop_msg_post(sc, im, &mf, 5000);
2475 iop_msg_free(sc, im);
2476 return (rv);
2477 }
2478
2479 /*
2480 * Enable or disable reception of events for the specified device.
2481 */
2482 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2483 {
2484 struct i2o_util_event_register mf;
2485
2486 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2487 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2488 mf.msgictx = ii->ii_ictx;
2489 mf.msgtctx = 0;
2490 mf.eventmask = mask;
2491
2492 /* This message is replied to only when events are signalled. */
2493 return (iop_post(sc, (u_int32_t *)&mf));
2494 }
2495
2496 int
2497 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2498 {
2499 struct iop_softc *sc;
2500
2501 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2502 return (ENXIO);
2503 if ((sc->sc_flags & IOP_ONLINE) == 0)
2504 return (ENXIO);
2505 if ((sc->sc_flags & IOP_OPEN) != 0)
2506 return (EBUSY);
2507 sc->sc_flags |= IOP_OPEN;
2508
2509 return (0);
2510 }
2511
2512 int
2513 iopclose(dev_t dev, int flag, int mode,
2514 struct lwp *l)
2515 {
2516 struct iop_softc *sc;
2517
2518 sc = device_lookup(&iop_cd, minor(dev));
2519 sc->sc_flags &= ~IOP_OPEN;
2520
2521 return (0);
2522 }
2523
2524 int
2525 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2526 {
2527 struct iop_softc *sc;
2528 struct iovec *iov;
2529 int rv, i;
2530
2531 sc = device_lookup(&iop_cd, minor(dev));
2532
2533 switch (cmd) {
2534 case IOPIOCPT:
2535 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2536 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2537 if (rv)
2538 return (rv);
2539
2540 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2541
2542 case IOPIOCGSTATUS:
2543 iov = (struct iovec *)data;
2544 i = sizeof(struct i2o_status);
2545 if (i > iov->iov_len)
2546 i = iov->iov_len;
2547 else
2548 iov->iov_len = i;
2549 if ((rv = iop_status_get(sc, 0)) == 0)
2550 rv = copyout(&sc->sc_status, iov->iov_base, i);
2551 return (rv);
2552
2553 case IOPIOCGLCT:
2554 case IOPIOCGTIDMAP:
2555 case IOPIOCRECONFIG:
2556 break;
2557
2558 default:
2559 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2560 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2561 #endif
2562 return (ENOTTY);
2563 }
2564
2565 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2566 return (rv);
2567
2568 switch (cmd) {
2569 case IOPIOCGLCT:
2570 iov = (struct iovec *)data;
2571 i = le16toh(sc->sc_lct->tablesize) << 2;
2572 if (i > iov->iov_len)
2573 i = iov->iov_len;
2574 else
2575 iov->iov_len = i;
2576 rv = copyout(sc->sc_lct, iov->iov_base, i);
2577 break;
2578
2579 case IOPIOCRECONFIG:
2580 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0)
2581 rv = iop_reconfigure(sc, 0);
2582 break;
2583
2584 case IOPIOCGTIDMAP:
2585 iov = (struct iovec *)data;
2586 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2587 if (i > iov->iov_len)
2588 i = iov->iov_len;
2589 else
2590 iov->iov_len = i;
2591 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2592 break;
2593 }
2594
2595 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2596 return (rv);
2597 }
2598
2599 static int
2600 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2601 {
2602 struct iop_msg *im;
2603 struct i2o_msg *mf;
2604 struct ioppt_buf *ptb;
2605 int rv, i, mapped;
2606
2607 mf = NULL;
2608 im = NULL;
2609 mapped = 1;
2610
2611 if (pt->pt_msglen > sc->sc_framesize ||
2612 pt->pt_msglen < sizeof(struct i2o_msg) ||
2613 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2614 pt->pt_nbufs < 0 ||
2615 #if 0
2616 pt->pt_replylen < 0 ||
2617 #endif
2618 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2619 return (EINVAL);
2620
2621 for (i = 0; i < pt->pt_nbufs; i++)
2622 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2623 rv = ENOMEM;
2624 goto bad;
2625 }
2626
2627 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2628 if (mf == NULL)
2629 return (ENOMEM);
2630
2631 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2632 goto bad;
2633
2634 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2635 im->im_rb = (struct i2o_reply *)mf;
2636 mf->msgictx = IOP_ICTX;
2637 mf->msgtctx = im->im_tctx;
2638
2639 for (i = 0; i < pt->pt_nbufs; i++) {
2640 ptb = &pt->pt_bufs[i];
2641 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2642 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2643 if (rv != 0)
2644 goto bad;
2645 mapped = 1;
2646 }
2647
2648 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2649 goto bad;
2650
2651 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2652 if (i > sc->sc_framesize)
2653 i = sc->sc_framesize;
2654 if (i > pt->pt_replylen)
2655 i = pt->pt_replylen;
2656 rv = copyout(im->im_rb, pt->pt_reply, i);
2657
2658 bad:
2659 if (mapped != 0)
2660 iop_msg_unmap(sc, im);
2661 if (im != NULL)
2662 iop_msg_free(sc, im);
2663 if (mf != NULL)
2664 free(mf, M_DEVBUF);
2665 return (rv);
2666 }
Cache object: 6b495bcf3402cbdacb99429608a1efb9
|