FreeBSD/Linux Kernel Cross Reference
sys/dev/aac/aac.c
1 /*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2001 Scott Long
4 * Copyright (c) 2000 BSDi
5 * Copyright (c) 2001 Adaptec, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/8.4/sys/dev/aac/aac.c 226913 2011-10-29 23:44:29Z marius $");
32
33 /*
34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters.
35 */
36 #define AAC_DRIVERNAME "aac"
37
38 #include "opt_aac.h"
39
40 /* #include <stddef.h> */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/sysctl.h>
47 #include <sys/poll.h>
48 #include <sys/ioccom.h>
49
50 #include <sys/bus.h>
51 #include <sys/conf.h>
52 #include <sys/signalvar.h>
53 #include <sys/time.h>
54 #include <sys/eventhandler.h>
55 #include <sys/rman.h>
56
57 #include <machine/bus.h>
58 #include <sys/bus_dma.h>
59 #include <machine/resource.h>
60
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63
64 #include <dev/aac/aacreg.h>
65 #include <sys/aac_ioctl.h>
66 #include <dev/aac/aacvar.h>
67 #include <dev/aac/aac_tables.h>
68
69 static void aac_startup(void *arg);
70 static void aac_add_container(struct aac_softc *sc,
71 struct aac_mntinforesp *mir, int f);
72 static void aac_get_bus_info(struct aac_softc *sc);
73 static void aac_daemon(void *arg);
74
75 /* Command Processing */
76 static void aac_timeout(struct aac_softc *sc);
77 static void aac_complete(void *context, int pending);
78 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp);
79 static void aac_bio_complete(struct aac_command *cm);
80 static int aac_wait_command(struct aac_command *cm);
81 static void aac_command_thread(struct aac_softc *sc);
82
83 /* Command Buffer Management */
84 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs,
85 int nseg, int error);
86 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
87 int nseg, int error);
88 static int aac_alloc_commands(struct aac_softc *sc);
89 static void aac_free_commands(struct aac_softc *sc);
90 static void aac_unmap_command(struct aac_command *cm);
91
92 /* Hardware Interface */
93 static int aac_alloc(struct aac_softc *sc);
94 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
95 int error);
96 static int aac_check_firmware(struct aac_softc *sc);
97 static int aac_init(struct aac_softc *sc);
98 static int aac_sync_command(struct aac_softc *sc, u_int32_t command,
99 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2,
100 u_int32_t arg3, u_int32_t *sp);
101 static int aac_setup_intr(struct aac_softc *sc);
102 static int aac_enqueue_fib(struct aac_softc *sc, int queue,
103 struct aac_command *cm);
104 static int aac_dequeue_fib(struct aac_softc *sc, int queue,
105 u_int32_t *fib_size, struct aac_fib **fib_addr);
106 static int aac_enqueue_response(struct aac_softc *sc, int queue,
107 struct aac_fib *fib);
108
109 /* StrongARM interface */
110 static int aac_sa_get_fwstatus(struct aac_softc *sc);
111 static void aac_sa_qnotify(struct aac_softc *sc, int qbit);
112 static int aac_sa_get_istatus(struct aac_softc *sc);
113 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask);
114 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
115 u_int32_t arg0, u_int32_t arg1,
116 u_int32_t arg2, u_int32_t arg3);
117 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb);
118 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable);
119
120 struct aac_interface aac_sa_interface = {
121 aac_sa_get_fwstatus,
122 aac_sa_qnotify,
123 aac_sa_get_istatus,
124 aac_sa_clear_istatus,
125 aac_sa_set_mailbox,
126 aac_sa_get_mailbox,
127 aac_sa_set_interrupts,
128 NULL, NULL, NULL
129 };
130
131 /* i960Rx interface */
132 static int aac_rx_get_fwstatus(struct aac_softc *sc);
133 static void aac_rx_qnotify(struct aac_softc *sc, int qbit);
134 static int aac_rx_get_istatus(struct aac_softc *sc);
135 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask);
136 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
137 u_int32_t arg0, u_int32_t arg1,
138 u_int32_t arg2, u_int32_t arg3);
139 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb);
140 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable);
141 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm);
142 static int aac_rx_get_outb_queue(struct aac_softc *sc);
143 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index);
144
145 struct aac_interface aac_rx_interface = {
146 aac_rx_get_fwstatus,
147 aac_rx_qnotify,
148 aac_rx_get_istatus,
149 aac_rx_clear_istatus,
150 aac_rx_set_mailbox,
151 aac_rx_get_mailbox,
152 aac_rx_set_interrupts,
153 aac_rx_send_command,
154 aac_rx_get_outb_queue,
155 aac_rx_set_outb_queue
156 };
157
158 /* Rocket/MIPS interface */
159 static int aac_rkt_get_fwstatus(struct aac_softc *sc);
160 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit);
161 static int aac_rkt_get_istatus(struct aac_softc *sc);
162 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask);
163 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command,
164 u_int32_t arg0, u_int32_t arg1,
165 u_int32_t arg2, u_int32_t arg3);
166 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb);
167 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable);
168 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm);
169 static int aac_rkt_get_outb_queue(struct aac_softc *sc);
170 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index);
171
172 struct aac_interface aac_rkt_interface = {
173 aac_rkt_get_fwstatus,
174 aac_rkt_qnotify,
175 aac_rkt_get_istatus,
176 aac_rkt_clear_istatus,
177 aac_rkt_set_mailbox,
178 aac_rkt_get_mailbox,
179 aac_rkt_set_interrupts,
180 aac_rkt_send_command,
181 aac_rkt_get_outb_queue,
182 aac_rkt_set_outb_queue
183 };
184
185 /* Debugging and Diagnostics */
186 static void aac_describe_controller(struct aac_softc *sc);
187 static char *aac_describe_code(struct aac_code_lookup *table,
188 u_int32_t code);
189
190 /* Management Interface */
191 static d_open_t aac_open;
192 static d_ioctl_t aac_ioctl;
193 static d_poll_t aac_poll;
194 static void aac_cdevpriv_dtor(void *arg);
195 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
196 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
197 static void aac_handle_aif(struct aac_softc *sc,
198 struct aac_fib *fib);
199 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
200 static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
201 static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
202 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
203 static int aac_return_aif(struct aac_softc *sc,
204 struct aac_fib_context *ctx, caddr_t uptr);
205 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
206 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
207 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
208 static void aac_ioctl_event(struct aac_softc *sc,
209 struct aac_event *event, void *arg);
210 static struct aac_mntinforesp *
211 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid);
212
213 static struct cdevsw aac_cdevsw = {
214 .d_version = D_VERSION,
215 .d_flags = D_NEEDGIANT,
216 .d_open = aac_open,
217 .d_ioctl = aac_ioctl,
218 .d_poll = aac_poll,
219 .d_name = "aac",
220 };
221
222 MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
223
224 /* sysctl node */
225 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters");
226
227 /*
228 * Device Interface
229 */
230
231 /*
232 * Initialize the controller and softc
233 */
234 int
235 aac_attach(struct aac_softc *sc)
236 {
237 int error, unit;
238
239 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
240
241 /*
242 * Initialize per-controller queues.
243 */
244 aac_initq_free(sc);
245 aac_initq_ready(sc);
246 aac_initq_busy(sc);
247 aac_initq_bio(sc);
248
249 /*
250 * Initialize command-completion task.
251 */
252 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc);
253
254 /* mark controller as suspended until we get ourselves organised */
255 sc->aac_state |= AAC_STATE_SUSPEND;
256
257 /*
258 * Check that the firmware on the card is supported.
259 */
260 if ((error = aac_check_firmware(sc)) != 0)
261 return(error);
262
263 /*
264 * Initialize locks
265 */
266 mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF);
267 mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF);
268 mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF);
269 TAILQ_INIT(&sc->aac_container_tqh);
270 TAILQ_INIT(&sc->aac_ev_cmfree);
271
272 /* Initialize the clock daemon callout. */
273 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
274
275 /*
276 * Initialize the adapter.
277 */
278 if ((error = aac_alloc(sc)) != 0)
279 return(error);
280 if ((error = aac_init(sc)) != 0)
281 return(error);
282
283 /*
284 * Allocate and connect our interrupt.
285 */
286 if ((error = aac_setup_intr(sc)) != 0)
287 return(error);
288
289 /*
290 * Print a little information about the controller.
291 */
292 aac_describe_controller(sc);
293
294 /*
295 * Register to probe our containers later.
296 */
297 sc->aac_ich.ich_func = aac_startup;
298 sc->aac_ich.ich_arg = sc;
299 if (config_intrhook_establish(&sc->aac_ich) != 0) {
300 device_printf(sc->aac_dev,
301 "can't establish configuration hook\n");
302 return(ENXIO);
303 }
304
305 /*
306 * Make the control device.
307 */
308 unit = device_get_unit(sc->aac_dev);
309 sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR,
310 0640, "aac%d", unit);
311 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit);
312 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit);
313 sc->aac_dev_t->si_drv1 = sc;
314
315 /* Create the AIF thread */
316 if (kproc_create((void(*)(void *))aac_command_thread, sc,
317 &sc->aifthread, 0, 0, "aac%daif", unit))
318 panic("Could not create AIF thread");
319
320 /* Register the shutdown method to only be called post-dump */
321 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown,
322 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
323 device_printf(sc->aac_dev,
324 "shutdown event registration failed\n");
325
326 /* Register with CAM for the non-DASD devices */
327 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) {
328 TAILQ_INIT(&sc->aac_sim_tqh);
329 aac_get_bus_info(sc);
330 }
331
332 mtx_lock(&sc->aac_io_lock);
333 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
334 mtx_unlock(&sc->aac_io_lock);
335
336 return(0);
337 }
338
339 static void
340 aac_daemon(void *arg)
341 {
342 struct timeval tv;
343 struct aac_softc *sc;
344 struct aac_fib *fib;
345
346 sc = arg;
347 mtx_assert(&sc->aac_io_lock, MA_OWNED);
348
349 if (callout_pending(&sc->aac_daemontime) ||
350 callout_active(&sc->aac_daemontime) == 0)
351 return;
352 getmicrotime(&tv);
353 aac_alloc_sync_fib(sc, &fib);
354 *(uint32_t *)fib->data = tv.tv_sec;
355 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t));
356 aac_release_sync_fib(sc);
357 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
358 }
359
360 void
361 aac_add_event(struct aac_softc *sc, struct aac_event *event)
362 {
363
364 switch (event->ev_type & AAC_EVENT_MASK) {
365 case AAC_EVENT_CMFREE:
366 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
367 break;
368 default:
369 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
370 event->ev_type);
371 break;
372 }
373
374 return;
375 }
376
377 /*
378 * Request information of container #cid
379 */
380 static struct aac_mntinforesp *
381 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid)
382 {
383 struct aac_mntinfo *mi;
384
385 mi = (struct aac_mntinfo *)&fib->data[0];
386 /* use 64-bit LBA if enabled */
387 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ?
388 VM_NameServe64 : VM_NameServe;
389 mi->MntType = FT_FILESYS;
390 mi->MntCount = cid;
391
392 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
393 sizeof(struct aac_mntinfo))) {
394 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
395 return (NULL);
396 }
397
398 return ((struct aac_mntinforesp *)&fib->data[0]);
399 }
400
401 /*
402 * Probe for containers, create disks.
403 */
404 static void
405 aac_startup(void *arg)
406 {
407 struct aac_softc *sc;
408 struct aac_fib *fib;
409 struct aac_mntinforesp *mir;
410 int count = 0, i = 0;
411
412 sc = (struct aac_softc *)arg;
413 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
414
415 /* disconnect ourselves from the intrhook chain */
416 config_intrhook_disestablish(&sc->aac_ich);
417
418 mtx_lock(&sc->aac_io_lock);
419 aac_alloc_sync_fib(sc, &fib);
420
421 /* loop over possible containers */
422 do {
423 if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
424 continue;
425 if (i == 0)
426 count = mir->MntRespCount;
427 aac_add_container(sc, mir, 0);
428 i++;
429 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
430
431 aac_release_sync_fib(sc);
432 mtx_unlock(&sc->aac_io_lock);
433
434 /* poke the bus to actually attach the child devices */
435 if (bus_generic_attach(sc->aac_dev))
436 device_printf(sc->aac_dev, "bus_generic_attach failed\n");
437
438 /* mark the controller up */
439 sc->aac_state &= ~AAC_STATE_SUSPEND;
440
441 /* enable interrupts now */
442 AAC_UNMASK_INTERRUPTS(sc);
443 }
444
445 /*
446 * Create a device to represent a new container
447 */
448 static void
449 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f)
450 {
451 struct aac_container *co;
452 device_t child;
453
454 /*
455 * Check container volume type for validity. Note that many of
456 * the possible types may never show up.
457 */
458 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
459 co = (struct aac_container *)malloc(sizeof *co, M_AACBUF,
460 M_NOWAIT | M_ZERO);
461 if (co == NULL)
462 panic("Out of memory?!");
463 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d",
464 mir->MntTable[0].ObjectId,
465 mir->MntTable[0].FileSystemName,
466 mir->MntTable[0].Capacity, mir->MntTable[0].VolType);
467
468 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL)
469 device_printf(sc->aac_dev, "device_add_child failed\n");
470 else
471 device_set_ivars(child, co);
472 device_set_desc(child, aac_describe_code(aac_container_types,
473 mir->MntTable[0].VolType));
474 co->co_disk = child;
475 co->co_found = f;
476 bcopy(&mir->MntTable[0], &co->co_mntobj,
477 sizeof(struct aac_mntobj));
478 mtx_lock(&sc->aac_container_lock);
479 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
480 mtx_unlock(&sc->aac_container_lock);
481 }
482 }
483
484 /*
485 * Allocate resources associated with (sc)
486 */
487 static int
488 aac_alloc(struct aac_softc *sc)
489 {
490
491 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
492
493 /*
494 * Create DMA tag for mapping buffers into controller-addressable space.
495 */
496 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
497 1, 0, /* algnmnt, boundary */
498 (sc->flags & AAC_FLAGS_SG_64BIT) ?
499 BUS_SPACE_MAXADDR :
500 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
501 BUS_SPACE_MAXADDR, /* highaddr */
502 NULL, NULL, /* filter, filterarg */
503 MAXBSIZE, /* maxsize */
504 sc->aac_sg_tablesize, /* nsegments */
505 MAXBSIZE, /* maxsegsize */
506 BUS_DMA_ALLOCNOW, /* flags */
507 busdma_lock_mutex, /* lockfunc */
508 &sc->aac_io_lock, /* lockfuncarg */
509 &sc->aac_buffer_dmat)) {
510 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
511 return (ENOMEM);
512 }
513
514 /*
515 * Create DMA tag for mapping FIBs into controller-addressable space..
516 */
517 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
518 1, 0, /* algnmnt, boundary */
519 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
520 BUS_SPACE_MAXADDR_32BIT :
521 0x7fffffff, /* lowaddr */
522 BUS_SPACE_MAXADDR, /* highaddr */
523 NULL, NULL, /* filter, filterarg */
524 sc->aac_max_fibs_alloc *
525 sc->aac_max_fib_size, /* maxsize */
526 1, /* nsegments */
527 sc->aac_max_fibs_alloc *
528 sc->aac_max_fib_size, /* maxsize */
529 0, /* flags */
530 NULL, NULL, /* No locking needed */
531 &sc->aac_fib_dmat)) {
532 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
533 return (ENOMEM);
534 }
535
536 /*
537 * Create DMA tag for the common structure and allocate it.
538 */
539 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
540 1, 0, /* algnmnt, boundary */
541 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
542 BUS_SPACE_MAXADDR_32BIT :
543 0x7fffffff, /* lowaddr */
544 BUS_SPACE_MAXADDR, /* highaddr */
545 NULL, NULL, /* filter, filterarg */
546 8192 + sizeof(struct aac_common), /* maxsize */
547 1, /* nsegments */
548 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
549 0, /* flags */
550 NULL, NULL, /* No locking needed */
551 &sc->aac_common_dmat)) {
552 device_printf(sc->aac_dev,
553 "can't allocate common structure DMA tag\n");
554 return (ENOMEM);
555 }
556 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
557 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
558 device_printf(sc->aac_dev, "can't allocate common structure\n");
559 return (ENOMEM);
560 }
561
562 /*
563 * Work around a bug in the 2120 and 2200 that cannot DMA commands
564 * below address 8192 in physical memory.
565 * XXX If the padding is not needed, can it be put to use instead
566 * of ignored?
567 */
568 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
569 sc->aac_common, 8192 + sizeof(*sc->aac_common),
570 aac_common_map, sc, 0);
571
572 if (sc->aac_common_busaddr < 8192) {
573 sc->aac_common = (struct aac_common *)
574 ((uint8_t *)sc->aac_common + 8192);
575 sc->aac_common_busaddr += 8192;
576 }
577 bzero(sc->aac_common, sizeof(*sc->aac_common));
578
579 /* Allocate some FIBs and associated command structs */
580 TAILQ_INIT(&sc->aac_fibmap_tqh);
581 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
582 M_AACBUF, M_WAITOK|M_ZERO);
583 while (sc->total_fibs < sc->aac_max_fibs) {
584 if (aac_alloc_commands(sc) != 0)
585 break;
586 }
587 if (sc->total_fibs == 0)
588 return (ENOMEM);
589
590 return (0);
591 }
592
593 /*
594 * Free all of the resources associated with (sc)
595 *
596 * Should not be called if the controller is active.
597 */
598 void
599 aac_free(struct aac_softc *sc)
600 {
601
602 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
603
604 /* remove the control device */
605 if (sc->aac_dev_t != NULL)
606 destroy_dev(sc->aac_dev_t);
607
608 /* throw away any FIB buffers, discard the FIB DMA tag */
609 aac_free_commands(sc);
610 if (sc->aac_fib_dmat)
611 bus_dma_tag_destroy(sc->aac_fib_dmat);
612
613 free(sc->aac_commands, M_AACBUF);
614
615 /* destroy the common area */
616 if (sc->aac_common) {
617 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
618 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
619 sc->aac_common_dmamap);
620 }
621 if (sc->aac_common_dmat)
622 bus_dma_tag_destroy(sc->aac_common_dmat);
623
624 /* disconnect the interrupt handler */
625 if (sc->aac_intr)
626 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr);
627 if (sc->aac_irq != NULL)
628 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid,
629 sc->aac_irq);
630
631 /* destroy data-transfer DMA tag */
632 if (sc->aac_buffer_dmat)
633 bus_dma_tag_destroy(sc->aac_buffer_dmat);
634
635 /* destroy the parent DMA tag */
636 if (sc->aac_parent_dmat)
637 bus_dma_tag_destroy(sc->aac_parent_dmat);
638
639 /* release the register window mapping */
640 if (sc->aac_regs_res0 != NULL)
641 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
642 sc->aac_regs_rid0, sc->aac_regs_res0);
643 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL)
644 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
645 sc->aac_regs_rid1, sc->aac_regs_res1);
646 }
647
648 /*
649 * Disconnect from the controller completely, in preparation for unload.
650 */
651 int
652 aac_detach(device_t dev)
653 {
654 struct aac_softc *sc;
655 struct aac_container *co;
656 struct aac_sim *sim;
657 int error;
658
659 sc = device_get_softc(dev);
660 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
661
662 callout_drain(&sc->aac_daemontime);
663
664 mtx_lock(&sc->aac_io_lock);
665 while (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
666 sc->aifflags |= AAC_AIFFLAGS_EXIT;
667 wakeup(sc->aifthread);
668 msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0);
669 }
670 mtx_unlock(&sc->aac_io_lock);
671 KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0,
672 ("%s: invalid detach state", __func__));
673
674 /* Remove the child containers */
675 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
676 error = device_delete_child(dev, co->co_disk);
677 if (error)
678 return (error);
679 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
680 free(co, M_AACBUF);
681 }
682
683 /* Remove the CAM SIMs */
684 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
685 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
686 error = device_delete_child(dev, sim->sim_dev);
687 if (error)
688 return (error);
689 free(sim, M_AACBUF);
690 }
691
692 if ((error = aac_shutdown(dev)))
693 return(error);
694
695 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
696
697 aac_free(sc);
698
699 mtx_destroy(&sc->aac_aifq_lock);
700 mtx_destroy(&sc->aac_io_lock);
701 mtx_destroy(&sc->aac_container_lock);
702
703 return(0);
704 }
705
706 /*
707 * Bring the controller down to a dormant state and detach all child devices.
708 *
709 * This function is called before detach or system shutdown.
710 *
711 * Note that we can assume that the bioq on the controller is empty, as we won't
712 * allow shutdown if any device is open.
713 */
714 int
715 aac_shutdown(device_t dev)
716 {
717 struct aac_softc *sc;
718 struct aac_fib *fib;
719 struct aac_close_command *cc;
720
721 sc = device_get_softc(dev);
722 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
723
724 sc->aac_state |= AAC_STATE_SUSPEND;
725
726 /*
727 * Send a Container shutdown followed by a HostShutdown FIB to the
728 * controller to convince it that we don't want to talk to it anymore.
729 * We've been closed and all I/O completed already
730 */
731 device_printf(sc->aac_dev, "shutting down controller...");
732
733 mtx_lock(&sc->aac_io_lock);
734 aac_alloc_sync_fib(sc, &fib);
735 cc = (struct aac_close_command *)&fib->data[0];
736
737 bzero(cc, sizeof(struct aac_close_command));
738 cc->Command = VM_CloseAll;
739 cc->ContainerId = 0xffffffff;
740 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
741 sizeof(struct aac_close_command)))
742 printf("FAILED.\n");
743 else
744 printf("done\n");
745 #if 0
746 else {
747 fib->data[0] = 0;
748 /*
749 * XXX Issuing this command to the controller makes it shut down
750 * but also keeps it from coming back up without a reset of the
751 * PCI bus. This is not desirable if you are just unloading the
752 * driver module with the intent to reload it later.
753 */
754 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN,
755 fib, 1)) {
756 printf("FAILED.\n");
757 } else {
758 printf("done.\n");
759 }
760 }
761 #endif
762
763 AAC_MASK_INTERRUPTS(sc);
764 aac_release_sync_fib(sc);
765 mtx_unlock(&sc->aac_io_lock);
766
767 return(0);
768 }
769
770 /*
771 * Bring the controller to a quiescent state, ready for system suspend.
772 */
773 int
774 aac_suspend(device_t dev)
775 {
776 struct aac_softc *sc;
777
778 sc = device_get_softc(dev);
779
780 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
781 sc->aac_state |= AAC_STATE_SUSPEND;
782
783 AAC_MASK_INTERRUPTS(sc);
784 return(0);
785 }
786
787 /*
788 * Bring the controller back to a state ready for operation.
789 */
790 int
791 aac_resume(device_t dev)
792 {
793 struct aac_softc *sc;
794
795 sc = device_get_softc(dev);
796
797 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
798 sc->aac_state &= ~AAC_STATE_SUSPEND;
799 AAC_UNMASK_INTERRUPTS(sc);
800 return(0);
801 }
802
803 /*
804 * Interrupt handler for NEW_COMM interface.
805 */
806 void
807 aac_new_intr(void *arg)
808 {
809 struct aac_softc *sc;
810 u_int32_t index, fast;
811 struct aac_command *cm;
812 struct aac_fib *fib;
813 int i;
814
815 sc = (struct aac_softc *)arg;
816
817 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
818 mtx_lock(&sc->aac_io_lock);
819 while (1) {
820 index = AAC_GET_OUTB_QUEUE(sc);
821 if (index == 0xffffffff)
822 index = AAC_GET_OUTB_QUEUE(sc);
823 if (index == 0xffffffff)
824 break;
825 if (index & 2) {
826 if (index == 0xfffffffe) {
827 /* XXX This means that the controller wants
828 * more work. Ignore it for now.
829 */
830 continue;
831 }
832 /* AIF */
833 fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF,
834 M_NOWAIT | M_ZERO);
835 if (fib == NULL) {
836 /* If we're really this short on memory,
837 * hopefully breaking out of the handler will
838 * allow something to get freed. This
839 * actually sucks a whole lot.
840 */
841 break;
842 }
843 index &= ~2;
844 for (i = 0; i < sizeof(struct aac_fib)/4; ++i)
845 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4);
846 aac_handle_aif(sc, fib);
847 free(fib, M_AACBUF);
848
849 /*
850 * AIF memory is owned by the adapter, so let it
851 * know that we are done with it.
852 */
853 AAC_SET_OUTB_QUEUE(sc, index);
854 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
855 } else {
856 fast = index & 1;
857 cm = sc->aac_commands + (index >> 2);
858 fib = cm->cm_fib;
859 if (fast) {
860 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
861 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL;
862 }
863 aac_remove_busy(cm);
864 aac_unmap_command(cm);
865 cm->cm_flags |= AAC_CMD_COMPLETED;
866
867 /* is there a completion handler? */
868 if (cm->cm_complete != NULL) {
869 cm->cm_complete(cm);
870 } else {
871 /* assume that someone is sleeping on this
872 * command
873 */
874 wakeup(cm);
875 }
876 sc->flags &= ~AAC_QUEUE_FRZN;
877 }
878 }
879 /* see if we can start some more I/O */
880 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
881 aac_startio(sc);
882
883 mtx_unlock(&sc->aac_io_lock);
884 }
885
886 /*
887 * Interrupt filter for !NEW_COMM interface.
888 */
889 int
890 aac_filter(void *arg)
891 {
892 struct aac_softc *sc;
893 u_int16_t reason;
894
895 sc = (struct aac_softc *)arg;
896
897 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
898 /*
899 * Read the status register directly. This is faster than taking the
900 * driver lock and reading the queues directly. It also saves having
901 * to turn parts of the driver lock into a spin mutex, which would be
902 * ugly.
903 */
904 reason = AAC_GET_ISTATUS(sc);
905 AAC_CLEAR_ISTATUS(sc, reason);
906
907 /* handle completion processing */
908 if (reason & AAC_DB_RESPONSE_READY)
909 taskqueue_enqueue_fast(taskqueue_fast, &sc->aac_task_complete);
910
911 /* controller wants to talk to us */
912 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) {
913 /*
914 * XXX Make sure that we don't get fooled by strange messages
915 * that start with a NULL.
916 */
917 if ((reason & AAC_DB_PRINTF) &&
918 (sc->aac_common->ac_printf[0] == 0))
919 sc->aac_common->ac_printf[0] = 32;
920
921 /*
922 * This might miss doing the actual wakeup. However, the
923 * msleep that this is waking up has a timeout, so it will
924 * wake up eventually. AIFs and printfs are low enough
925 * priority that they can handle hanging out for a few seconds
926 * if needed.
927 */
928 wakeup(sc->aifthread);
929 }
930 return (FILTER_HANDLED);
931 }
932
933 /*
934 * Command Processing
935 */
936
937 /*
938 * Start as much queued I/O as possible on the controller
939 */
940 void
941 aac_startio(struct aac_softc *sc)
942 {
943 struct aac_command *cm;
944 int error;
945
946 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
947
948 for (;;) {
949 /*
950 * This flag might be set if the card is out of resources.
951 * Checking it here prevents an infinite loop of deferrals.
952 */
953 if (sc->flags & AAC_QUEUE_FRZN)
954 break;
955
956 /*
957 * Try to get a command that's been put off for lack of
958 * resources
959 */
960 cm = aac_dequeue_ready(sc);
961
962 /*
963 * Try to build a command off the bio queue (ignore error
964 * return)
965 */
966 if (cm == NULL)
967 aac_bio_command(sc, &cm);
968
969 /* nothing to do? */
970 if (cm == NULL)
971 break;
972
973 /* don't map more than once */
974 if (cm->cm_flags & AAC_CMD_MAPPED)
975 panic("aac: command %p already mapped", cm);
976
977 /*
978 * Set up the command to go to the controller. If there are no
979 * data buffers associated with the command then it can bypass
980 * busdma.
981 */
982 if (cm->cm_datalen != 0) {
983 error = bus_dmamap_load(sc->aac_buffer_dmat,
984 cm->cm_datamap, cm->cm_data,
985 cm->cm_datalen,
986 aac_map_command_sg, cm, 0);
987 if (error == EINPROGRESS) {
988 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n");
989 sc->flags |= AAC_QUEUE_FRZN;
990 error = 0;
991 } else if (error != 0)
992 panic("aac_startio: unexpected error %d from "
993 "busdma", error);
994 } else
995 aac_map_command_sg(cm, NULL, 0, 0);
996 }
997 }
998
999 /*
1000 * Handle notification of one or more FIBs coming from the controller.
1001 */
1002 static void
1003 aac_command_thread(struct aac_softc *sc)
1004 {
1005 struct aac_fib *fib;
1006 u_int32_t fib_size;
1007 int size, retval;
1008
1009 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1010
1011 mtx_lock(&sc->aac_io_lock);
1012 sc->aifflags = AAC_AIFFLAGS_RUNNING;
1013
1014 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1015
1016 retval = 0;
1017 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1018 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1019 "aifthd", AAC_PERIODIC_INTERVAL * hz);
1020
1021 /*
1022 * First see if any FIBs need to be allocated. This needs
1023 * to be called without the driver lock because contigmalloc
1024 * can sleep.
1025 */
1026 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1027 mtx_unlock(&sc->aac_io_lock);
1028 aac_alloc_commands(sc);
1029 mtx_lock(&sc->aac_io_lock);
1030 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1031 aac_startio(sc);
1032 }
1033
1034 /*
1035 * While we're here, check to see if any commands are stuck.
1036 * This is pretty low-priority, so it's ok if it doesn't
1037 * always fire.
1038 */
1039 if (retval == EWOULDBLOCK)
1040 aac_timeout(sc);
1041
1042 /* Check the hardware printf message buffer */
1043 if (sc->aac_common->ac_printf[0] != 0)
1044 aac_print_printf(sc);
1045
1046 /* Also check to see if the adapter has a command for us. */
1047 if (sc->flags & AAC_FLAGS_NEW_COMM)
1048 continue;
1049 for (;;) {
1050 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE,
1051 &fib_size, &fib))
1052 break;
1053
1054 AAC_PRINT_FIB(sc, fib);
1055
1056 switch (fib->Header.Command) {
1057 case AifRequest:
1058 aac_handle_aif(sc, fib);
1059 break;
1060 default:
1061 device_printf(sc->aac_dev, "unknown command "
1062 "from controller\n");
1063 break;
1064 }
1065
1066 if ((fib->Header.XferState == 0) ||
1067 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) {
1068 break;
1069 }
1070
1071 /* Return the AIF to the controller. */
1072 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) {
1073 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST;
1074 *(AAC_FSAStatus*)fib->data = ST_OK;
1075
1076 /* XXX Compute the Size field? */
1077 size = fib->Header.Size;
1078 if (size > sizeof(struct aac_fib)) {
1079 size = sizeof(struct aac_fib);
1080 fib->Header.Size = size;
1081 }
1082 /*
1083 * Since we did not generate this command, it
1084 * cannot go through the normal
1085 * enqueue->startio chain.
1086 */
1087 aac_enqueue_response(sc,
1088 AAC_ADAP_NORM_RESP_QUEUE,
1089 fib);
1090 }
1091 }
1092 }
1093 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1094 mtx_unlock(&sc->aac_io_lock);
1095 wakeup(sc->aac_dev);
1096
1097 kproc_exit(0);
1098 }
1099
1100 /*
1101 * Process completed commands.
1102 */
1103 static void
1104 aac_complete(void *context, int pending)
1105 {
1106 struct aac_softc *sc;
1107 struct aac_command *cm;
1108 struct aac_fib *fib;
1109 u_int32_t fib_size;
1110
1111 sc = (struct aac_softc *)context;
1112 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1113
1114 mtx_lock(&sc->aac_io_lock);
1115
1116 /* pull completed commands off the queue */
1117 for (;;) {
1118 /* look for completed FIBs on our queue */
1119 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size,
1120 &fib))
1121 break; /* nothing to do */
1122
1123 /* get the command, unmap and hand off for processing */
1124 cm = sc->aac_commands + fib->Header.SenderData;
1125 if (cm == NULL) {
1126 AAC_PRINT_FIB(sc, fib);
1127 break;
1128 }
1129 if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0)
1130 device_printf(sc->aac_dev,
1131 "COMMAND %p COMPLETED AFTER %d SECONDS\n",
1132 cm, (int)(time_uptime-cm->cm_timestamp));
1133
1134 aac_remove_busy(cm);
1135
1136 aac_unmap_command(cm);
1137 cm->cm_flags |= AAC_CMD_COMPLETED;
1138
1139 /* is there a completion handler? */
1140 if (cm->cm_complete != NULL) {
1141 cm->cm_complete(cm);
1142 } else {
1143 /* assume that someone is sleeping on this command */
1144 wakeup(cm);
1145 }
1146 }
1147
1148 /* see if we can start some more I/O */
1149 sc->flags &= ~AAC_QUEUE_FRZN;
1150 aac_startio(sc);
1151
1152 mtx_unlock(&sc->aac_io_lock);
1153 }
1154
1155 /*
1156 * Handle a bio submitted from a disk device.
1157 */
1158 void
1159 aac_submit_bio(struct bio *bp)
1160 {
1161 struct aac_disk *ad;
1162 struct aac_softc *sc;
1163
1164 ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1165 sc = ad->ad_controller;
1166 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1167
1168 /* queue the BIO and try to get some work done */
1169 aac_enqueue_bio(sc, bp);
1170 aac_startio(sc);
1171 }
1172
1173 /*
1174 * Get a bio and build a command to go with it.
1175 */
1176 static int
1177 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp)
1178 {
1179 struct aac_command *cm;
1180 struct aac_fib *fib;
1181 struct aac_disk *ad;
1182 struct bio *bp;
1183
1184 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1185
1186 /* get the resources we will need */
1187 cm = NULL;
1188 bp = NULL;
1189 if (aac_alloc_command(sc, &cm)) /* get a command */
1190 goto fail;
1191 if ((bp = aac_dequeue_bio(sc)) == NULL)
1192 goto fail;
1193
1194 /* fill out the command */
1195 cm->cm_data = (void *)bp->bio_data;
1196 cm->cm_datalen = bp->bio_bcount;
1197 cm->cm_complete = aac_bio_complete;
1198 cm->cm_private = bp;
1199 cm->cm_timestamp = time_uptime;
1200
1201 /* build the FIB */
1202 fib = cm->cm_fib;
1203 fib->Header.Size = sizeof(struct aac_fib_header);
1204 fib->Header.XferState =
1205 AAC_FIBSTATE_HOSTOWNED |
1206 AAC_FIBSTATE_INITIALISED |
1207 AAC_FIBSTATE_EMPTY |
1208 AAC_FIBSTATE_FROMHOST |
1209 AAC_FIBSTATE_REXPECTED |
1210 AAC_FIBSTATE_NORM |
1211 AAC_FIBSTATE_ASYNC |
1212 AAC_FIBSTATE_FAST_RESPONSE;
1213
1214 /* build the read/write request */
1215 ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1216
1217 if (sc->flags & AAC_FLAGS_RAW_IO) {
1218 struct aac_raw_io *raw;
1219 raw = (struct aac_raw_io *)&fib->data[0];
1220 fib->Header.Command = RawIo;
1221 raw->BlockNumber = (u_int64_t)bp->bio_pblkno;
1222 raw->ByteCount = bp->bio_bcount;
1223 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1224 raw->BpTotal = 0;
1225 raw->BpComplete = 0;
1226 fib->Header.Size += sizeof(struct aac_raw_io);
1227 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw;
1228 if (bp->bio_cmd == BIO_READ) {
1229 raw->Flags = 1;
1230 cm->cm_flags |= AAC_CMD_DATAIN;
1231 } else {
1232 raw->Flags = 0;
1233 cm->cm_flags |= AAC_CMD_DATAOUT;
1234 }
1235 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1236 fib->Header.Command = ContainerCommand;
1237 if (bp->bio_cmd == BIO_READ) {
1238 struct aac_blockread *br;
1239 br = (struct aac_blockread *)&fib->data[0];
1240 br->Command = VM_CtBlockRead;
1241 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1242 br->BlockNumber = bp->bio_pblkno;
1243 br->ByteCount = bp->bio_bcount;
1244 fib->Header.Size += sizeof(struct aac_blockread);
1245 cm->cm_sgtable = &br->SgMap;
1246 cm->cm_flags |= AAC_CMD_DATAIN;
1247 } else {
1248 struct aac_blockwrite *bw;
1249 bw = (struct aac_blockwrite *)&fib->data[0];
1250 bw->Command = VM_CtBlockWrite;
1251 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1252 bw->BlockNumber = bp->bio_pblkno;
1253 bw->ByteCount = bp->bio_bcount;
1254 bw->Stable = CUNSTABLE;
1255 fib->Header.Size += sizeof(struct aac_blockwrite);
1256 cm->cm_flags |= AAC_CMD_DATAOUT;
1257 cm->cm_sgtable = &bw->SgMap;
1258 }
1259 } else {
1260 fib->Header.Command = ContainerCommand64;
1261 if (bp->bio_cmd == BIO_READ) {
1262 struct aac_blockread64 *br;
1263 br = (struct aac_blockread64 *)&fib->data[0];
1264 br->Command = VM_CtHostRead64;
1265 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1266 br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1267 br->BlockNumber = bp->bio_pblkno;
1268 br->Pad = 0;
1269 br->Flags = 0;
1270 fib->Header.Size += sizeof(struct aac_blockread64);
1271 cm->cm_flags |= AAC_CMD_DATAIN;
1272 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64;
1273 } else {
1274 struct aac_blockwrite64 *bw;
1275 bw = (struct aac_blockwrite64 *)&fib->data[0];
1276 bw->Command = VM_CtHostWrite64;
1277 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1278 bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1279 bw->BlockNumber = bp->bio_pblkno;
1280 bw->Pad = 0;
1281 bw->Flags = 0;
1282 fib->Header.Size += sizeof(struct aac_blockwrite64);
1283 cm->cm_flags |= AAC_CMD_DATAOUT;
1284 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64;
1285 }
1286 }
1287
1288 *cmp = cm;
1289 return(0);
1290
1291 fail:
1292 if (bp != NULL)
1293 aac_enqueue_bio(sc, bp);
1294 if (cm != NULL)
1295 aac_release_command(cm);
1296 return(ENOMEM);
1297 }
1298
1299 /*
1300 * Handle a bio-instigated command that has been completed.
1301 */
1302 static void
1303 aac_bio_complete(struct aac_command *cm)
1304 {
1305 struct aac_blockread_response *brr;
1306 struct aac_blockwrite_response *bwr;
1307 struct bio *bp;
1308 AAC_FSAStatus status;
1309
1310 /* fetch relevant status and then release the command */
1311 bp = (struct bio *)cm->cm_private;
1312 if (bp->bio_cmd == BIO_READ) {
1313 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0];
1314 status = brr->Status;
1315 } else {
1316 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0];
1317 status = bwr->Status;
1318 }
1319 aac_release_command(cm);
1320
1321 /* fix up the bio based on status */
1322 if (status == ST_OK) {
1323 bp->bio_resid = 0;
1324 } else {
1325 bp->bio_error = EIO;
1326 bp->bio_flags |= BIO_ERROR;
1327 /* pass an error string out to the disk layer */
1328 bp->bio_driver1 = aac_describe_code(aac_command_status_table,
1329 status);
1330 }
1331 aac_biodone(bp);
1332 }
1333
1334 /*
1335 * Submit a command to the controller, return when it completes.
1336 * XXX This is very dangerous! If the card has gone out to lunch, we could
1337 * be stuck here forever. At the same time, signals are not caught
1338 * because there is a risk that a signal could wakeup the sleep before
1339 * the card has a chance to complete the command. Since there is no way
1340 * to cancel a command that is in progress, we can't protect against the
1341 * card completing a command late and spamming the command and data
1342 * memory. So, we are held hostage until the command completes.
1343 */
1344 static int
1345 aac_wait_command(struct aac_command *cm)
1346 {
1347 struct aac_softc *sc;
1348 int error;
1349
1350 sc = cm->cm_sc;
1351 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1352
1353 /* Put the command on the ready queue and get things going */
1354 aac_enqueue_ready(cm);
1355 aac_startio(sc);
1356 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0);
1357 return(error);
1358 }
1359
1360 /*
1361 *Command Buffer Management
1362 */
1363
1364 /*
1365 * Allocate a command.
1366 */
1367 int
1368 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1369 {
1370 struct aac_command *cm;
1371
1372 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1373
1374 if ((cm = aac_dequeue_free(sc)) == NULL) {
1375 if (sc->total_fibs < sc->aac_max_fibs) {
1376 mtx_lock(&sc->aac_io_lock);
1377 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1378 mtx_unlock(&sc->aac_io_lock);
1379 wakeup(sc->aifthread);
1380 }
1381 return (EBUSY);
1382 }
1383
1384 *cmp = cm;
1385 return(0);
1386 }
1387
1388 /*
1389 * Release a command back to the freelist.
1390 */
1391 void
1392 aac_release_command(struct aac_command *cm)
1393 {
1394 struct aac_event *event;
1395 struct aac_softc *sc;
1396
1397 sc = cm->cm_sc;
1398 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1399
1400 /* (re)initialize the command/FIB */
1401 cm->cm_sgtable = NULL;
1402 cm->cm_flags = 0;
1403 cm->cm_complete = NULL;
1404 cm->cm_private = NULL;
1405 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1406 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1407 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1408 cm->cm_fib->Header.Flags = 0;
1409 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1410
1411 /*
1412 * These are duplicated in aac_start to cover the case where an
1413 * intermediate stage may have destroyed them. They're left
1414 * initialized here for debugging purposes only.
1415 */
1416 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1417 cm->cm_fib->Header.SenderData = 0;
1418
1419 aac_enqueue_free(cm);
1420
1421 if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1422 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1423 event->ev_callback(sc, event, event->ev_arg);
1424 }
1425 }
1426
1427 /*
1428 * Map helper for command/FIB allocation.
1429 */
1430 static void
1431 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1432 {
1433 uint64_t *fibphys;
1434
1435 fibphys = (uint64_t *)arg;
1436
1437 *fibphys = segs[0].ds_addr;
1438 }
1439
1440 /*
1441 * Allocate and initialize commands/FIBs for this adapter.
1442 */
1443 static int
1444 aac_alloc_commands(struct aac_softc *sc)
1445 {
1446 struct aac_command *cm;
1447 struct aac_fibmap *fm;
1448 uint64_t fibphys;
1449 int i, error;
1450
1451 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1452
1453 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1454 return (ENOMEM);
1455
1456 fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO);
1457 if (fm == NULL)
1458 return (ENOMEM);
1459
1460 /* allocate the FIBs in DMAable memory and load them */
1461 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1462 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1463 device_printf(sc->aac_dev,
1464 "Not enough contiguous memory available.\n");
1465 free(fm, M_AACBUF);
1466 return (ENOMEM);
1467 }
1468
1469 /* Ignore errors since this doesn't bounce */
1470 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1471 sc->aac_max_fibs_alloc * sc->aac_max_fib_size,
1472 aac_map_command_helper, &fibphys, 0);
1473
1474 /* initialize constant fields in the command structure */
1475 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size);
1476 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1477 cm = sc->aac_commands + sc->total_fibs;
1478 fm->aac_commands = cm;
1479 cm->cm_sc = sc;
1480 cm->cm_fib = (struct aac_fib *)
1481 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size);
1482 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size;
1483 cm->cm_index = sc->total_fibs;
1484
1485 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1486 &cm->cm_datamap)) != 0)
1487 break;
1488 mtx_lock(&sc->aac_io_lock);
1489 aac_release_command(cm);
1490 sc->total_fibs++;
1491 mtx_unlock(&sc->aac_io_lock);
1492 }
1493
1494 if (i > 0) {
1495 mtx_lock(&sc->aac_io_lock);
1496 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1497 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1498 mtx_unlock(&sc->aac_io_lock);
1499 return (0);
1500 }
1501
1502 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1503 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1504 free(fm, M_AACBUF);
1505 return (ENOMEM);
1506 }
1507
1508 /*
1509 * Free FIBs owned by this adapter.
1510 */
1511 static void
1512 aac_free_commands(struct aac_softc *sc)
1513 {
1514 struct aac_fibmap *fm;
1515 struct aac_command *cm;
1516 int i;
1517
1518 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1519
1520 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1521
1522 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1523 /*
1524 * We check against total_fibs to handle partially
1525 * allocated blocks.
1526 */
1527 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1528 cm = fm->aac_commands + i;
1529 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1530 }
1531 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1532 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1533 free(fm, M_AACBUF);
1534 }
1535 }
1536
1537 /*
1538 * Command-mapping helper function - populate this command's s/g table.
1539 */
1540 static void
1541 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1542 {
1543 struct aac_softc *sc;
1544 struct aac_command *cm;
1545 struct aac_fib *fib;
1546 int i;
1547
1548 cm = (struct aac_command *)arg;
1549 sc = cm->cm_sc;
1550 fib = cm->cm_fib;
1551 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1552
1553 /* copy into the FIB */
1554 if (cm->cm_sgtable != NULL) {
1555 if (fib->Header.Command == RawIo) {
1556 struct aac_sg_tableraw *sg;
1557 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1558 sg->SgCount = nseg;
1559 for (i = 0; i < nseg; i++) {
1560 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1561 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1562 sg->SgEntryRaw[i].Next = 0;
1563 sg->SgEntryRaw[i].Prev = 0;
1564 sg->SgEntryRaw[i].Flags = 0;
1565 }
1566 /* update the FIB size for the s/g count */
1567 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1568 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1569 struct aac_sg_table *sg;
1570 sg = cm->cm_sgtable;
1571 sg->SgCount = nseg;
1572 for (i = 0; i < nseg; i++) {
1573 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1574 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1575 }
1576 /* update the FIB size for the s/g count */
1577 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1578 } else {
1579 struct aac_sg_table64 *sg;
1580 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1581 sg->SgCount = nseg;
1582 for (i = 0; i < nseg; i++) {
1583 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1584 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1585 }
1586 /* update the FIB size for the s/g count */
1587 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1588 }
1589 }
1590
1591 /* Fix up the address values in the FIB. Use the command array index
1592 * instead of a pointer since these fields are only 32 bits. Shift
1593 * the SenderFibAddress over to make room for the fast response bit
1594 * and for the AIF bit
1595 */
1596 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1597 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1598
1599 /* save a pointer to the command for speedy reverse-lookup */
1600 cm->cm_fib->Header.SenderData = cm->cm_index;
1601
1602 if (cm->cm_flags & AAC_CMD_DATAIN)
1603 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1604 BUS_DMASYNC_PREREAD);
1605 if (cm->cm_flags & AAC_CMD_DATAOUT)
1606 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1607 BUS_DMASYNC_PREWRITE);
1608 cm->cm_flags |= AAC_CMD_MAPPED;
1609
1610 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1611 int count = 10000000L;
1612 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1613 if (--count == 0) {
1614 aac_unmap_command(cm);
1615 sc->flags |= AAC_QUEUE_FRZN;
1616 aac_requeue_ready(cm);
1617 }
1618 DELAY(5); /* wait 5 usec. */
1619 }
1620 } else {
1621 /* Put the FIB on the outbound queue */
1622 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) {
1623 aac_unmap_command(cm);
1624 sc->flags |= AAC_QUEUE_FRZN;
1625 aac_requeue_ready(cm);
1626 }
1627 }
1628
1629 return;
1630 }
1631
1632 /*
1633 * Unmap a command from controller-visible space.
1634 */
1635 static void
1636 aac_unmap_command(struct aac_command *cm)
1637 {
1638 struct aac_softc *sc;
1639
1640 sc = cm->cm_sc;
1641 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1642
1643 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1644 return;
1645
1646 if (cm->cm_datalen != 0) {
1647 if (cm->cm_flags & AAC_CMD_DATAIN)
1648 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1649 BUS_DMASYNC_POSTREAD);
1650 if (cm->cm_flags & AAC_CMD_DATAOUT)
1651 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1652 BUS_DMASYNC_POSTWRITE);
1653
1654 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1655 }
1656 cm->cm_flags &= ~AAC_CMD_MAPPED;
1657 }
1658
1659 /*
1660 * Hardware Interface
1661 */
1662
1663 /*
1664 * Initialize the adapter.
1665 */
1666 static void
1667 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1668 {
1669 struct aac_softc *sc;
1670
1671 sc = (struct aac_softc *)arg;
1672 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1673
1674 sc->aac_common_busaddr = segs[0].ds_addr;
1675 }
1676
1677 static int
1678 aac_check_firmware(struct aac_softc *sc)
1679 {
1680 u_int32_t code, major, minor, options = 0, atu_size = 0;
1681 int status;
1682 time_t then;
1683
1684 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1685 /*
1686 * Wait for the adapter to come ready.
1687 */
1688 then = time_uptime;
1689 do {
1690 code = AAC_GET_FWSTATUS(sc);
1691 if (code & AAC_SELF_TEST_FAILED) {
1692 device_printf(sc->aac_dev, "FATAL: selftest failed\n");
1693 return(ENXIO);
1694 }
1695 if (code & AAC_KERNEL_PANIC) {
1696 device_printf(sc->aac_dev,
1697 "FATAL: controller kernel panic");
1698 return(ENXIO);
1699 }
1700 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1701 device_printf(sc->aac_dev,
1702 "FATAL: controller not coming ready, "
1703 "status %x\n", code);
1704 return(ENXIO);
1705 }
1706 } while (!(code & AAC_UP_AND_RUNNING));
1707
1708 /*
1709 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1710 * firmware version 1.x are not compatible with this driver.
1711 */
1712 if (sc->flags & AAC_FLAGS_PERC2QC) {
1713 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1714 NULL)) {
1715 device_printf(sc->aac_dev,
1716 "Error reading firmware version\n");
1717 return (EIO);
1718 }
1719
1720 /* These numbers are stored as ASCII! */
1721 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1722 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1723 if (major == 1) {
1724 device_printf(sc->aac_dev,
1725 "Firmware version %d.%d is not supported.\n",
1726 major, minor);
1727 return (EINVAL);
1728 }
1729 }
1730
1731 /*
1732 * Retrieve the capabilities/supported options word so we know what
1733 * work-arounds to enable. Some firmware revs don't support this
1734 * command.
1735 */
1736 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) {
1737 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1738 device_printf(sc->aac_dev,
1739 "RequestAdapterInfo failed\n");
1740 return (EIO);
1741 }
1742 } else {
1743 options = AAC_GET_MAILBOX(sc, 1);
1744 atu_size = AAC_GET_MAILBOX(sc, 2);
1745 sc->supported_options = options;
1746
1747 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1748 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1749 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1750 if (options & AAC_SUPPORTED_NONDASD)
1751 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1752 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1753 && (sizeof(bus_addr_t) > 4)) {
1754 device_printf(sc->aac_dev,
1755 "Enabling 64-bit address support\n");
1756 sc->flags |= AAC_FLAGS_SG_64BIT;
1757 }
1758 if ((options & AAC_SUPPORTED_NEW_COMM)
1759 && sc->aac_if.aif_send_command)
1760 sc->flags |= AAC_FLAGS_NEW_COMM;
1761 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1762 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1763 }
1764
1765 /* Check for broken hardware that does a lower number of commands */
1766 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1767
1768 /* Remap mem. resource, if required */
1769 if ((sc->flags & AAC_FLAGS_NEW_COMM) &&
1770 atu_size > rman_get_size(sc->aac_regs_res1)) {
1771 bus_release_resource(
1772 sc->aac_dev, SYS_RES_MEMORY,
1773 sc->aac_regs_rid1, sc->aac_regs_res1);
1774 sc->aac_regs_res1 = bus_alloc_resource(
1775 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid1,
1776 0ul, ~0ul, atu_size, RF_ACTIVE);
1777 if (sc->aac_regs_res1 == NULL) {
1778 sc->aac_regs_res1 = bus_alloc_resource_any(
1779 sc->aac_dev, SYS_RES_MEMORY,
1780 &sc->aac_regs_rid1, RF_ACTIVE);
1781 if (sc->aac_regs_res1 == NULL) {
1782 device_printf(sc->aac_dev,
1783 "couldn't allocate register window\n");
1784 return (ENXIO);
1785 }
1786 sc->flags &= ~AAC_FLAGS_NEW_COMM;
1787 }
1788 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1);
1789 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1);
1790
1791 if (sc->aac_hwif == AAC_HWIF_NARK) {
1792 sc->aac_regs_res0 = sc->aac_regs_res1;
1793 sc->aac_regs_rid0 = sc->aac_regs_rid1;
1794 sc->aac_btag0 = sc->aac_btag1;
1795 sc->aac_bhandle0 = sc->aac_bhandle1;
1796 }
1797 }
1798
1799 /* Read preferred settings */
1800 sc->aac_max_fib_size = sizeof(struct aac_fib);
1801 sc->aac_max_sectors = 128; /* 64KB */
1802 if (sc->flags & AAC_FLAGS_SG_64BIT)
1803 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1804 - sizeof(struct aac_blockwrite64))
1805 / sizeof(struct aac_sg_entry64);
1806 else
1807 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1808 - sizeof(struct aac_blockwrite))
1809 / sizeof(struct aac_sg_entry);
1810
1811 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) {
1812 options = AAC_GET_MAILBOX(sc, 1);
1813 sc->aac_max_fib_size = (options & 0xFFFF);
1814 sc->aac_max_sectors = (options >> 16) << 1;
1815 options = AAC_GET_MAILBOX(sc, 2);
1816 sc->aac_sg_tablesize = (options >> 16);
1817 options = AAC_GET_MAILBOX(sc, 3);
1818 sc->aac_max_fibs = (options & 0xFFFF);
1819 }
1820 if (sc->aac_max_fib_size > PAGE_SIZE)
1821 sc->aac_max_fib_size = PAGE_SIZE;
1822 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size;
1823
1824 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1825 sc->flags |= AAC_FLAGS_RAW_IO;
1826 device_printf(sc->aac_dev, "Enable Raw I/O\n");
1827 }
1828 if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1829 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1830 sc->flags |= AAC_FLAGS_LBA_64BIT;
1831 device_printf(sc->aac_dev, "Enable 64-bit array\n");
1832 }
1833
1834 return (0);
1835 }
1836
1837 static int
1838 aac_init(struct aac_softc *sc)
1839 {
1840 struct aac_adapter_init *ip;
1841 u_int32_t qoffset;
1842 int error;
1843
1844 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1845
1846 /*
1847 * Fill in the init structure. This tells the adapter about the
1848 * physical location of various important shared data structures.
1849 */
1850 ip = &sc->aac_common->ac_init;
1851 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1852 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1853 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1854 sc->flags |= AAC_FLAGS_RAW_IO;
1855 }
1856 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION;
1857
1858 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1859 offsetof(struct aac_common, ac_fibs);
1860 ip->AdapterFibsVirtualAddress = 0;
1861 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1862 ip->AdapterFibAlign = sizeof(struct aac_fib);
1863
1864 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1865 offsetof(struct aac_common, ac_printf);
1866 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1867
1868 /*
1869 * The adapter assumes that pages are 4K in size, except on some
1870 * broken firmware versions that do the page->byte conversion twice,
1871 * therefore 'assuming' that this value is in 16MB units (2^24).
1872 * Round up since the granularity is so high.
1873 */
1874 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1875 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1876 ip->HostPhysMemPages =
1877 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1878 }
1879 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1880
1881 ip->InitFlags = 0;
1882 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1883 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1884 device_printf(sc->aac_dev, "New comm. interface enabled\n");
1885 }
1886
1887 ip->MaxIoCommands = sc->aac_max_fibs;
1888 ip->MaxIoSize = sc->aac_max_sectors << 9;
1889 ip->MaxFibSize = sc->aac_max_fib_size;
1890
1891 /*
1892 * Initialize FIB queues. Note that it appears that the layout of the
1893 * indexes and the segmentation of the entries may be mandated by the
1894 * adapter, which is only told about the base of the queue index fields.
1895 *
1896 * The initial values of the indices are assumed to inform the adapter
1897 * of the sizes of the respective queues, and theoretically it could
1898 * work out the entire layout of the queue structures from this. We
1899 * take the easy route and just lay this area out like everyone else
1900 * does.
1901 *
1902 * The Linux driver uses a much more complex scheme whereby several
1903 * header records are kept for each queue. We use a couple of generic
1904 * list manipulation functions which 'know' the size of each list by
1905 * virtue of a table.
1906 */
1907 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN;
1908 qoffset &= ~(AAC_QUEUE_ALIGN - 1);
1909 sc->aac_queues =
1910 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset);
1911 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset;
1912
1913 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1914 AAC_HOST_NORM_CMD_ENTRIES;
1915 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1916 AAC_HOST_NORM_CMD_ENTRIES;
1917 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1918 AAC_HOST_HIGH_CMD_ENTRIES;
1919 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1920 AAC_HOST_HIGH_CMD_ENTRIES;
1921 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1922 AAC_ADAP_NORM_CMD_ENTRIES;
1923 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1924 AAC_ADAP_NORM_CMD_ENTRIES;
1925 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1926 AAC_ADAP_HIGH_CMD_ENTRIES;
1927 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1928 AAC_ADAP_HIGH_CMD_ENTRIES;
1929 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1930 AAC_HOST_NORM_RESP_ENTRIES;
1931 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1932 AAC_HOST_NORM_RESP_ENTRIES;
1933 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1934 AAC_HOST_HIGH_RESP_ENTRIES;
1935 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1936 AAC_HOST_HIGH_RESP_ENTRIES;
1937 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1938 AAC_ADAP_NORM_RESP_ENTRIES;
1939 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1940 AAC_ADAP_NORM_RESP_ENTRIES;
1941 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1942 AAC_ADAP_HIGH_RESP_ENTRIES;
1943 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1944 AAC_ADAP_HIGH_RESP_ENTRIES;
1945 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] =
1946 &sc->aac_queues->qt_HostNormCmdQueue[0];
1947 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] =
1948 &sc->aac_queues->qt_HostHighCmdQueue[0];
1949 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] =
1950 &sc->aac_queues->qt_AdapNormCmdQueue[0];
1951 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] =
1952 &sc->aac_queues->qt_AdapHighCmdQueue[0];
1953 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] =
1954 &sc->aac_queues->qt_HostNormRespQueue[0];
1955 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] =
1956 &sc->aac_queues->qt_HostHighRespQueue[0];
1957 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] =
1958 &sc->aac_queues->qt_AdapNormRespQueue[0];
1959 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] =
1960 &sc->aac_queues->qt_AdapHighRespQueue[0];
1961
1962 /*
1963 * Do controller-type-specific initialisation
1964 */
1965 switch (sc->aac_hwif) {
1966 case AAC_HWIF_I960RX:
1967 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0);
1968 break;
1969 case AAC_HWIF_RKT:
1970 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0);
1971 break;
1972 default:
1973 break;
1974 }
1975
1976 /*
1977 * Give the init structure to the controller.
1978 */
1979 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT,
1980 sc->aac_common_busaddr +
1981 offsetof(struct aac_common, ac_init), 0, 0, 0,
1982 NULL)) {
1983 device_printf(sc->aac_dev,
1984 "error establishing init structure\n");
1985 error = EIO;
1986 goto out;
1987 }
1988
1989 error = 0;
1990 out:
1991 return(error);
1992 }
1993
1994 static int
1995 aac_setup_intr(struct aac_softc *sc)
1996 {
1997 sc->aac_irq_rid = 0;
1998 if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ,
1999 &sc->aac_irq_rid,
2000 RF_SHAREABLE |
2001 RF_ACTIVE)) == NULL) {
2002 device_printf(sc->aac_dev, "can't allocate interrupt\n");
2003 return (EINVAL);
2004 }
2005 if (sc->flags & AAC_FLAGS_NEW_COMM) {
2006 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2007 INTR_MPSAFE|INTR_TYPE_BIO, NULL,
2008 aac_new_intr, sc, &sc->aac_intr)) {
2009 device_printf(sc->aac_dev, "can't set up interrupt\n");
2010 return (EINVAL);
2011 }
2012 } else {
2013 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2014 INTR_TYPE_BIO, aac_filter, NULL,
2015 sc, &sc->aac_intr)) {
2016 device_printf(sc->aac_dev,
2017 "can't set up interrupt filter\n");
2018 return (EINVAL);
2019 }
2020 }
2021 return (0);
2022 }
2023
2024 /*
2025 * Send a synchronous command to the controller and wait for a result.
2026 * Indicate if the controller completed the command with an error status.
2027 */
2028 static int
2029 aac_sync_command(struct aac_softc *sc, u_int32_t command,
2030 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2031 u_int32_t *sp)
2032 {
2033 time_t then;
2034 u_int32_t status;
2035
2036 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2037
2038 /* populate the mailbox */
2039 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2040
2041 /* ensure the sync command doorbell flag is cleared */
2042 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2043
2044 /* then set it to signal the adapter */
2045 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2046
2047 /* spin waiting for the command to complete */
2048 then = time_uptime;
2049 do {
2050 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) {
2051 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2052 return(EIO);
2053 }
2054 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2055
2056 /* clear the completion flag */
2057 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2058
2059 /* get the command status */
2060 status = AAC_GET_MAILBOX(sc, 0);
2061 if (sp != NULL)
2062 *sp = status;
2063
2064 if (status != AAC_SRB_STS_SUCCESS)
2065 return (-1);
2066 return(0);
2067 }
2068
2069 int
2070 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2071 struct aac_fib *fib, u_int16_t datasize)
2072 {
2073 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2074 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2075
2076 if (datasize > AAC_FIB_DATASIZE)
2077 return(EINVAL);
2078
2079 /*
2080 * Set up the sync FIB
2081 */
2082 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2083 AAC_FIBSTATE_INITIALISED |
2084 AAC_FIBSTATE_EMPTY;
2085 fib->Header.XferState |= xferstate;
2086 fib->Header.Command = command;
2087 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2088 fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2089 fib->Header.SenderSize = sizeof(struct aac_fib);
2090 fib->Header.SenderFibAddress = 0; /* Not needed */
2091 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr +
2092 offsetof(struct aac_common,
2093 ac_sync_fib);
2094
2095 /*
2096 * Give the FIB to the controller, wait for a response.
2097 */
2098 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB,
2099 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) {
2100 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2101 return(EIO);
2102 }
2103
2104 return (0);
2105 }
2106
2107 /*
2108 * Adapter-space FIB queue manipulation
2109 *
2110 * Note that the queue implementation here is a little funky; neither the PI or
2111 * CI will ever be zero. This behaviour is a controller feature.
2112 */
2113 static struct {
2114 int size;
2115 int notify;
2116 } aac_qinfo[] = {
2117 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
2118 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
2119 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
2120 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
2121 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
2122 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
2123 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
2124 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
2125 };
2126
2127 /*
2128 * Atomically insert an entry into the nominated queue, returns 0 on success or
2129 * EBUSY if the queue is full.
2130 *
2131 * Note: it would be more efficient to defer notifying the controller in
2132 * the case where we may be inserting several entries in rapid succession,
2133 * but implementing this usefully may be difficult (it would involve a
2134 * separate queue/notify interface).
2135 */
2136 static int
2137 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
2138 {
2139 u_int32_t pi, ci;
2140 int error;
2141 u_int32_t fib_size;
2142 u_int32_t fib_addr;
2143
2144 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2145
2146 fib_size = cm->cm_fib->Header.Size;
2147 fib_addr = cm->cm_fib->Header.ReceiverFibAddress;
2148
2149 /* get the producer/consumer indices */
2150 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2151 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2152
2153 /* wrap the queue? */
2154 if (pi >= aac_qinfo[queue].size)
2155 pi = 0;
2156
2157 /* check for queue full */
2158 if ((pi + 1) == ci) {
2159 error = EBUSY;
2160 goto out;
2161 }
2162
2163 /*
2164 * To avoid a race with its completion interrupt, place this command on
2165 * the busy queue prior to advertising it to the controller.
2166 */
2167 aac_enqueue_busy(cm);
2168
2169 /* populate queue entry */
2170 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2171 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2172
2173 /* update producer index */
2174 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2175
2176 /* notify the adapter if we know how */
2177 if (aac_qinfo[queue].notify != 0)
2178 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2179
2180 error = 0;
2181
2182 out:
2183 return(error);
2184 }
2185
2186 /*
2187 * Atomically remove one entry from the nominated queue, returns 0 on
2188 * success or ENOENT if the queue is empty.
2189 */
2190 static int
2191 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
2192 struct aac_fib **fib_addr)
2193 {
2194 u_int32_t pi, ci;
2195 u_int32_t fib_index;
2196 int error;
2197 int notify;
2198
2199 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2200
2201 /* get the producer/consumer indices */
2202 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2203 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2204
2205 /* check for queue empty */
2206 if (ci == pi) {
2207 error = ENOENT;
2208 goto out;
2209 }
2210
2211 /* wrap the pi so the following test works */
2212 if (pi >= aac_qinfo[queue].size)
2213 pi = 0;
2214
2215 notify = 0;
2216 if (ci == pi + 1)
2217 notify++;
2218
2219 /* wrap the queue? */
2220 if (ci >= aac_qinfo[queue].size)
2221 ci = 0;
2222
2223 /* fetch the entry */
2224 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
2225
2226 switch (queue) {
2227 case AAC_HOST_NORM_CMD_QUEUE:
2228 case AAC_HOST_HIGH_CMD_QUEUE:
2229 /*
2230 * The aq_fib_addr is only 32 bits wide so it can't be counted
2231 * on to hold an address. For AIF's, the adapter assumes
2232 * that it's giving us an address into the array of AIF fibs.
2233 * Therefore, we have to convert it to an index.
2234 */
2235 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
2236 sizeof(struct aac_fib);
2237 *fib_addr = &sc->aac_common->ac_fibs[fib_index];
2238 break;
2239
2240 case AAC_HOST_NORM_RESP_QUEUE:
2241 case AAC_HOST_HIGH_RESP_QUEUE:
2242 {
2243 struct aac_command *cm;
2244
2245 /*
2246 * As above, an index is used instead of an actual address.
2247 * Gotta shift the index to account for the fast response
2248 * bit. No other correction is needed since this value was
2249 * originally provided by the driver via the SenderFibAddress
2250 * field.
2251 */
2252 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
2253 cm = sc->aac_commands + (fib_index >> 2);
2254 *fib_addr = cm->cm_fib;
2255
2256 /*
2257 * Is this a fast response? If it is, update the fib fields in
2258 * local memory since the whole fib isn't DMA'd back up.
2259 */
2260 if (fib_index & 0x01) {
2261 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP;
2262 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL;
2263 }
2264 break;
2265 }
2266 default:
2267 panic("Invalid queue in aac_dequeue_fib()");
2268 break;
2269 }
2270
2271 /* update consumer index */
2272 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
2273
2274 /* if we have made the queue un-full, notify the adapter */
2275 if (notify && (aac_qinfo[queue].notify != 0))
2276 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2277 error = 0;
2278
2279 out:
2280 return(error);
2281 }
2282
2283 /*
2284 * Put our response to an Adapter Initialed Fib on the response queue
2285 */
2286 static int
2287 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
2288 {
2289 u_int32_t pi, ci;
2290 int error;
2291 u_int32_t fib_size;
2292 u_int32_t fib_addr;
2293
2294 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2295
2296 /* Tell the adapter where the FIB is */
2297 fib_size = fib->Header.Size;
2298 fib_addr = fib->Header.SenderFibAddress;
2299 fib->Header.ReceiverFibAddress = fib_addr;
2300
2301 /* get the producer/consumer indices */
2302 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2303 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2304
2305 /* wrap the queue? */
2306 if (pi >= aac_qinfo[queue].size)
2307 pi = 0;
2308
2309 /* check for queue full */
2310 if ((pi + 1) == ci) {
2311 error = EBUSY;
2312 goto out;
2313 }
2314
2315 /* populate queue entry */
2316 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2317 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2318
2319 /* update producer index */
2320 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2321
2322 /* notify the adapter if we know how */
2323 if (aac_qinfo[queue].notify != 0)
2324 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2325
2326 error = 0;
2327
2328 out:
2329 return(error);
2330 }
2331
2332 /*
2333 * Check for commands that have been outstanding for a suspiciously long time,
2334 * and complain about them.
2335 */
2336 static void
2337 aac_timeout(struct aac_softc *sc)
2338 {
2339 struct aac_command *cm;
2340 time_t deadline;
2341 int timedout, code;
2342
2343 /*
2344 * Traverse the busy command list, bitch about late commands once
2345 * only.
2346 */
2347 timedout = 0;
2348 deadline = time_uptime - AAC_CMD_TIMEOUT;
2349 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2350 if ((cm->cm_timestamp < deadline)
2351 && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) {
2352 cm->cm_flags |= AAC_CMD_TIMEDOUT;
2353 device_printf(sc->aac_dev,
2354 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n",
2355 cm, cm->cm_fib->Header.Command,
2356 (int)(time_uptime-cm->cm_timestamp));
2357 AAC_PRINT_FIB(sc, cm->cm_fib);
2358 timedout++;
2359 }
2360 }
2361
2362 if (timedout) {
2363 code = AAC_GET_FWSTATUS(sc);
2364 if (code != AAC_UP_AND_RUNNING) {
2365 device_printf(sc->aac_dev, "WARNING! Controller is no "
2366 "longer running! code= 0x%x\n", code);
2367 }
2368 }
2369 return;
2370 }
2371
2372 /*
2373 * Interface Function Vectors
2374 */
2375
2376 /*
2377 * Read the current firmware status word.
2378 */
2379 static int
2380 aac_sa_get_fwstatus(struct aac_softc *sc)
2381 {
2382 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2383
2384 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS));
2385 }
2386
2387 static int
2388 aac_rx_get_fwstatus(struct aac_softc *sc)
2389 {
2390 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2391
2392 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2393 AAC_RX_OMR0 : AAC_RX_FWSTATUS));
2394 }
2395
2396 static int
2397 aac_rkt_get_fwstatus(struct aac_softc *sc)
2398 {
2399 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2400
2401 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2402 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS));
2403 }
2404
2405 /*
2406 * Notify the controller of a change in a given queue
2407 */
2408
2409 static void
2410 aac_sa_qnotify(struct aac_softc *sc, int qbit)
2411 {
2412 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2413
2414 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit);
2415 }
2416
2417 static void
2418 aac_rx_qnotify(struct aac_softc *sc, int qbit)
2419 {
2420 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2421
2422 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit);
2423 }
2424
2425 static void
2426 aac_rkt_qnotify(struct aac_softc *sc, int qbit)
2427 {
2428 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2429
2430 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit);
2431 }
2432
2433 /*
2434 * Get the interrupt reason bits
2435 */
2436 static int
2437 aac_sa_get_istatus(struct aac_softc *sc)
2438 {
2439 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2440
2441 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0));
2442 }
2443
2444 static int
2445 aac_rx_get_istatus(struct aac_softc *sc)
2446 {
2447 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2448
2449 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR));
2450 }
2451
2452 static int
2453 aac_rkt_get_istatus(struct aac_softc *sc)
2454 {
2455 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2456
2457 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR));
2458 }
2459
2460 /*
2461 * Clear some interrupt reason bits
2462 */
2463 static void
2464 aac_sa_clear_istatus(struct aac_softc *sc, int mask)
2465 {
2466 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2467
2468 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask);
2469 }
2470
2471 static void
2472 aac_rx_clear_istatus(struct aac_softc *sc, int mask)
2473 {
2474 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2475
2476 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask);
2477 }
2478
2479 static void
2480 aac_rkt_clear_istatus(struct aac_softc *sc, int mask)
2481 {
2482 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2483
2484 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask);
2485 }
2486
2487 /*
2488 * Populate the mailbox and set the command word
2489 */
2490 static void
2491 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2492 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2493 {
2494 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2495
2496 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command);
2497 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0);
2498 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1);
2499 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2);
2500 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3);
2501 }
2502
2503 static void
2504 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
2505 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2506 {
2507 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2508
2509 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command);
2510 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0);
2511 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1);
2512 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2);
2513 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3);
2514 }
2515
2516 static void
2517 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2518 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2519 {
2520 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2521
2522 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command);
2523 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0);
2524 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1);
2525 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2);
2526 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3);
2527 }
2528
2529 /*
2530 * Fetch the immediate command status word
2531 */
2532 static int
2533 aac_sa_get_mailbox(struct aac_softc *sc, int mb)
2534 {
2535 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2536
2537 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4)));
2538 }
2539
2540 static int
2541 aac_rx_get_mailbox(struct aac_softc *sc, int mb)
2542 {
2543 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2544
2545 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4)));
2546 }
2547
2548 static int
2549 aac_rkt_get_mailbox(struct aac_softc *sc, int mb)
2550 {
2551 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2552
2553 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4)));
2554 }
2555
2556 /*
2557 * Set/clear interrupt masks
2558 */
2559 static void
2560 aac_sa_set_interrupts(struct aac_softc *sc, int enable)
2561 {
2562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2563
2564 if (enable) {
2565 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2566 } else {
2567 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0);
2568 }
2569 }
2570
2571 static void
2572 aac_rx_set_interrupts(struct aac_softc *sc, int enable)
2573 {
2574 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2575
2576 if (enable) {
2577 if (sc->flags & AAC_FLAGS_NEW_COMM)
2578 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM);
2579 else
2580 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS);
2581 } else {
2582 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0);
2583 }
2584 }
2585
2586 static void
2587 aac_rkt_set_interrupts(struct aac_softc *sc, int enable)
2588 {
2589 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2590
2591 if (enable) {
2592 if (sc->flags & AAC_FLAGS_NEW_COMM)
2593 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM);
2594 else
2595 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS);
2596 } else {
2597 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0);
2598 }
2599 }
2600
2601 /*
2602 * New comm. interface: Send command functions
2603 */
2604 static int
2605 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm)
2606 {
2607 u_int32_t index, device;
2608
2609 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2610
2611 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2612 if (index == 0xffffffffL)
2613 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2614 if (index == 0xffffffffL)
2615 return index;
2616 aac_enqueue_busy(cm);
2617 device = index;
2618 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2619 device += 4;
2620 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2621 device += 4;
2622 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2623 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index);
2624 return 0;
2625 }
2626
2627 static int
2628 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm)
2629 {
2630 u_int32_t index, device;
2631
2632 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2633
2634 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2635 if (index == 0xffffffffL)
2636 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2637 if (index == 0xffffffffL)
2638 return index;
2639 aac_enqueue_busy(cm);
2640 device = index;
2641 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2642 device += 4;
2643 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2644 device += 4;
2645 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2646 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index);
2647 return 0;
2648 }
2649
2650 /*
2651 * New comm. interface: get, set outbound queue index
2652 */
2653 static int
2654 aac_rx_get_outb_queue(struct aac_softc *sc)
2655 {
2656 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2657
2658 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE));
2659 }
2660
2661 static int
2662 aac_rkt_get_outb_queue(struct aac_softc *sc)
2663 {
2664 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2665
2666 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE));
2667 }
2668
2669 static void
2670 aac_rx_set_outb_queue(struct aac_softc *sc, int index)
2671 {
2672 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2673
2674 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index);
2675 }
2676
2677 static void
2678 aac_rkt_set_outb_queue(struct aac_softc *sc, int index)
2679 {
2680 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2681
2682 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index);
2683 }
2684
2685 /*
2686 * Debugging and Diagnostics
2687 */
2688
2689 /*
2690 * Print some information about the controller.
2691 */
2692 static void
2693 aac_describe_controller(struct aac_softc *sc)
2694 {
2695 struct aac_fib *fib;
2696 struct aac_adapter_info *info;
2697 char *adapter_type = "Adaptec RAID controller";
2698
2699 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2700
2701 mtx_lock(&sc->aac_io_lock);
2702 aac_alloc_sync_fib(sc, &fib);
2703
2704 fib->data[0] = 0;
2705 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2706 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2707 aac_release_sync_fib(sc);
2708 mtx_unlock(&sc->aac_io_lock);
2709 return;
2710 }
2711
2712 /* save the kernel revision structure for later use */
2713 info = (struct aac_adapter_info *)&fib->data[0];
2714 sc->aac_revision = info->KernelRevision;
2715
2716 if (bootverbose) {
2717 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2718 "(%dMB cache, %dMB execution), %s\n",
2719 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2720 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2721 info->BufferMem / (1024 * 1024),
2722 info->ExecutionMem / (1024 * 1024),
2723 aac_describe_code(aac_battery_platform,
2724 info->batteryPlatform));
2725
2726 device_printf(sc->aac_dev,
2727 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2728 info->KernelRevision.external.comp.major,
2729 info->KernelRevision.external.comp.minor,
2730 info->KernelRevision.external.comp.dash,
2731 info->KernelRevision.buildNumber,
2732 (u_int32_t)(info->SerialNumber & 0xffffff));
2733
2734 device_printf(sc->aac_dev, "Supported Options=%b\n",
2735 sc->supported_options,
2736 "\2"
2737 "\1SNAPSHOT"
2738 "\2CLUSTERS"
2739 "\3WCACHE"
2740 "\4DATA64"
2741 "\5HOSTTIME"
2742 "\6RAID50"
2743 "\7WINDOW4GB"
2744 "\10SCSIUPGD"
2745 "\11SOFTERR"
2746 "\12NORECOND"
2747 "\13SGMAP64"
2748 "\14ALARM"
2749 "\15NONDASD"
2750 "\16SCSIMGT"
2751 "\17RAIDSCSI"
2752 "\21ADPTINFO"
2753 "\22NEWCOMM"
2754 "\23ARRAY64BIT"
2755 "\24HEATSENSOR");
2756 }
2757
2758 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2759 fib->data[0] = 0;
2760 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2761 device_printf(sc->aac_dev,
2762 "RequestSupplementAdapterInfo failed\n");
2763 else
2764 adapter_type = ((struct aac_supplement_adapter_info *)
2765 &fib->data[0])->AdapterTypeText;
2766 }
2767 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n",
2768 adapter_type,
2769 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2770 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2771
2772 aac_release_sync_fib(sc);
2773 mtx_unlock(&sc->aac_io_lock);
2774 }
2775
2776 /*
2777 * Look up a text description of a numeric error code and return a pointer to
2778 * same.
2779 */
2780 static char *
2781 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2782 {
2783 int i;
2784
2785 for (i = 0; table[i].string != NULL; i++)
2786 if (table[i].code == code)
2787 return(table[i].string);
2788 return(table[i + 1].string);
2789 }
2790
2791 /*
2792 * Management Interface
2793 */
2794
2795 static int
2796 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2797 {
2798 struct aac_softc *sc;
2799
2800 sc = dev->si_drv1;
2801 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2802 device_busy(sc->aac_dev);
2803 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2804
2805 return 0;
2806 }
2807
2808 static int
2809 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2810 {
2811 union aac_statrequest *as;
2812 struct aac_softc *sc;
2813 int error = 0;
2814
2815 as = (union aac_statrequest *)arg;
2816 sc = dev->si_drv1;
2817 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2818
2819 switch (cmd) {
2820 case AACIO_STATS:
2821 switch (as->as_item) {
2822 case AACQ_FREE:
2823 case AACQ_BIO:
2824 case AACQ_READY:
2825 case AACQ_BUSY:
2826 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2827 sizeof(struct aac_qstat));
2828 break;
2829 default:
2830 error = ENOENT;
2831 break;
2832 }
2833 break;
2834
2835 case FSACTL_SENDFIB:
2836 case FSACTL_SEND_LARGE_FIB:
2837 arg = *(caddr_t*)arg;
2838 case FSACTL_LNX_SENDFIB:
2839 case FSACTL_LNX_SEND_LARGE_FIB:
2840 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2841 error = aac_ioctl_sendfib(sc, arg);
2842 break;
2843 case FSACTL_SEND_RAW_SRB:
2844 arg = *(caddr_t*)arg;
2845 case FSACTL_LNX_SEND_RAW_SRB:
2846 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2847 error = aac_ioctl_send_raw_srb(sc, arg);
2848 break;
2849 case FSACTL_AIF_THREAD:
2850 case FSACTL_LNX_AIF_THREAD:
2851 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2852 error = EINVAL;
2853 break;
2854 case FSACTL_OPEN_GET_ADAPTER_FIB:
2855 arg = *(caddr_t*)arg;
2856 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2857 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2858 error = aac_open_aif(sc, arg);
2859 break;
2860 case FSACTL_GET_NEXT_ADAPTER_FIB:
2861 arg = *(caddr_t*)arg;
2862 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2863 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2864 error = aac_getnext_aif(sc, arg);
2865 break;
2866 case FSACTL_CLOSE_GET_ADAPTER_FIB:
2867 arg = *(caddr_t*)arg;
2868 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2869 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2870 error = aac_close_aif(sc, arg);
2871 break;
2872 case FSACTL_MINIPORT_REV_CHECK:
2873 arg = *(caddr_t*)arg;
2874 case FSACTL_LNX_MINIPORT_REV_CHECK:
2875 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2876 error = aac_rev_check(sc, arg);
2877 break;
2878 case FSACTL_QUERY_DISK:
2879 arg = *(caddr_t*)arg;
2880 case FSACTL_LNX_QUERY_DISK:
2881 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2882 error = aac_query_disk(sc, arg);
2883 break;
2884 case FSACTL_DELETE_DISK:
2885 case FSACTL_LNX_DELETE_DISK:
2886 /*
2887 * We don't trust the underland to tell us when to delete a
2888 * container, rather we rely on an AIF coming from the
2889 * controller
2890 */
2891 error = 0;
2892 break;
2893 case FSACTL_GET_PCI_INFO:
2894 arg = *(caddr_t*)arg;
2895 case FSACTL_LNX_GET_PCI_INFO:
2896 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2897 error = aac_get_pci_info(sc, arg);
2898 break;
2899 case FSACTL_GET_FEATURES:
2900 arg = *(caddr_t*)arg;
2901 case FSACTL_LNX_GET_FEATURES:
2902 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2903 error = aac_supported_features(sc, arg);
2904 break;
2905 default:
2906 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2907 error = EINVAL;
2908 break;
2909 }
2910 return(error);
2911 }
2912
2913 static int
2914 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2915 {
2916 struct aac_softc *sc;
2917 struct aac_fib_context *ctx;
2918 int revents;
2919
2920 sc = dev->si_drv1;
2921 revents = 0;
2922
2923 mtx_lock(&sc->aac_aifq_lock);
2924 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2925 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2926 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2927 revents |= poll_events & (POLLIN | POLLRDNORM);
2928 break;
2929 }
2930 }
2931 }
2932 mtx_unlock(&sc->aac_aifq_lock);
2933
2934 if (revents == 0) {
2935 if (poll_events & (POLLIN | POLLRDNORM))
2936 selrecord(td, &sc->rcv_select);
2937 }
2938
2939 return (revents);
2940 }
2941
2942 static void
2943 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2944 {
2945
2946 switch (event->ev_type) {
2947 case AAC_EVENT_CMFREE:
2948 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2949 if (aac_alloc_command(sc, (struct aac_command **)arg)) {
2950 aac_add_event(sc, event);
2951 return;
2952 }
2953 free(event, M_AACBUF);
2954 wakeup(arg);
2955 break;
2956 default:
2957 break;
2958 }
2959 }
2960
2961 /*
2962 * Send a FIB supplied from userspace
2963 */
2964 static int
2965 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2966 {
2967 struct aac_command *cm;
2968 int size, error;
2969
2970 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2971
2972 cm = NULL;
2973
2974 /*
2975 * Get a command
2976 */
2977 mtx_lock(&sc->aac_io_lock);
2978 if (aac_alloc_command(sc, &cm)) {
2979 struct aac_event *event;
2980
2981 event = malloc(sizeof(struct aac_event), M_AACBUF,
2982 M_NOWAIT | M_ZERO);
2983 if (event == NULL) {
2984 error = EBUSY;
2985 mtx_unlock(&sc->aac_io_lock);
2986 goto out;
2987 }
2988 event->ev_type = AAC_EVENT_CMFREE;
2989 event->ev_callback = aac_ioctl_event;
2990 event->ev_arg = &cm;
2991 aac_add_event(sc, event);
2992 msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0);
2993 }
2994 mtx_unlock(&sc->aac_io_lock);
2995
2996 /*
2997 * Fetch the FIB header, then re-copy to get data as well.
2998 */
2999 if ((error = copyin(ufib, cm->cm_fib,
3000 sizeof(struct aac_fib_header))) != 0)
3001 goto out;
3002 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
3003 if (size > sc->aac_max_fib_size) {
3004 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
3005 size, sc->aac_max_fib_size);
3006 size = sc->aac_max_fib_size;
3007 }
3008 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
3009 goto out;
3010 cm->cm_fib->Header.Size = size;
3011 cm->cm_timestamp = time_uptime;
3012
3013 /*
3014 * Pass the FIB to the controller, wait for it to complete.
3015 */
3016 mtx_lock(&sc->aac_io_lock);
3017 error = aac_wait_command(cm);
3018 mtx_unlock(&sc->aac_io_lock);
3019 if (error != 0) {
3020 device_printf(sc->aac_dev,
3021 "aac_wait_command return %d\n", error);
3022 goto out;
3023 }
3024
3025 /*
3026 * Copy the FIB and data back out to the caller.
3027 */
3028 size = cm->cm_fib->Header.Size;
3029 if (size > sc->aac_max_fib_size) {
3030 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
3031 size, sc->aac_max_fib_size);
3032 size = sc->aac_max_fib_size;
3033 }
3034 error = copyout(cm->cm_fib, ufib, size);
3035
3036 out:
3037 if (cm != NULL) {
3038 mtx_lock(&sc->aac_io_lock);
3039 aac_release_command(cm);
3040 mtx_unlock(&sc->aac_io_lock);
3041 }
3042 return(error);
3043 }
3044
3045 /*
3046 * Send a passthrough FIB supplied from userspace
3047 */
3048 static int
3049 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
3050 {
3051 struct aac_command *cm;
3052 struct aac_event *event;
3053 struct aac_fib *fib;
3054 struct aac_srb *srbcmd, *user_srb;
3055 struct aac_sg_entry *sge;
3056 struct aac_sg_entry64 *sge64;
3057 void *srb_sg_address, *ureply;
3058 uint32_t fibsize, srb_sg_bytecount;
3059 int error, transfer_data;
3060
3061 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3062
3063 cm = NULL;
3064 transfer_data = 0;
3065 fibsize = 0;
3066 user_srb = (struct aac_srb *)arg;
3067
3068 mtx_lock(&sc->aac_io_lock);
3069 if (aac_alloc_command(sc, &cm)) {
3070 event = malloc(sizeof(struct aac_event), M_AACBUF,
3071 M_NOWAIT | M_ZERO);
3072 if (event == NULL) {
3073 error = EBUSY;
3074 mtx_unlock(&sc->aac_io_lock);
3075 goto out;
3076 }
3077 event->ev_type = AAC_EVENT_CMFREE;
3078 event->ev_callback = aac_ioctl_event;
3079 event->ev_arg = &cm;
3080 aac_add_event(sc, event);
3081 msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0);
3082 }
3083 mtx_unlock(&sc->aac_io_lock);
3084
3085 cm->cm_data = NULL;
3086 fib = cm->cm_fib;
3087 srbcmd = (struct aac_srb *)fib->data;
3088 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t));
3089 if (error != 0)
3090 goto out;
3091 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) {
3092 error = EINVAL;
3093 goto out;
3094 }
3095 error = copyin(user_srb, srbcmd, fibsize);
3096 if (error != 0)
3097 goto out;
3098 srbcmd->function = 0;
3099 srbcmd->retry_limit = 0;
3100 if (srbcmd->sg_map.SgCount > 1) {
3101 error = EINVAL;
3102 goto out;
3103 }
3104
3105 /* Retrieve correct SG entries. */
3106 if (fibsize == (sizeof(struct aac_srb) +
3107 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
3108 sge = srbcmd->sg_map.SgEntry;
3109 sge64 = NULL;
3110 srb_sg_bytecount = sge->SgByteCount;
3111 srb_sg_address = (void *)(uintptr_t)sge->SgAddress;
3112 }
3113 #ifdef __amd64__
3114 else if (fibsize == (sizeof(struct aac_srb) +
3115 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
3116 sge = NULL;
3117 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
3118 srb_sg_bytecount = sge64->SgByteCount;
3119 srb_sg_address = (void *)sge64->SgAddress;
3120 if (sge64->SgAddress > 0xffffffffull &&
3121 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
3122 error = EINVAL;
3123 goto out;
3124 }
3125 }
3126 #endif
3127 else {
3128 error = EINVAL;
3129 goto out;
3130 }
3131 ureply = (char *)arg + fibsize;
3132 srbcmd->data_len = srb_sg_bytecount;
3133 if (srbcmd->sg_map.SgCount == 1)
3134 transfer_data = 1;
3135
3136 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3137 if (transfer_data) {
3138 cm->cm_datalen = srb_sg_bytecount;
3139 cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT);
3140 if (cm->cm_data == NULL) {
3141 error = ENOMEM;
3142 goto out;
3143 }
3144 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
3145 cm->cm_flags |= AAC_CMD_DATAIN;
3146 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
3147 cm->cm_flags |= AAC_CMD_DATAOUT;
3148 error = copyin(srb_sg_address, cm->cm_data,
3149 cm->cm_datalen);
3150 if (error != 0)
3151 goto out;
3152 }
3153 }
3154
3155 fib->Header.Size = sizeof(struct aac_fib_header) +
3156 sizeof(struct aac_srb);
3157 fib->Header.XferState =
3158 AAC_FIBSTATE_HOSTOWNED |
3159 AAC_FIBSTATE_INITIALISED |
3160 AAC_FIBSTATE_EMPTY |
3161 AAC_FIBSTATE_FROMHOST |
3162 AAC_FIBSTATE_REXPECTED |
3163 AAC_FIBSTATE_NORM |
3164 AAC_FIBSTATE_ASYNC |
3165 AAC_FIBSTATE_FAST_RESPONSE;
3166 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ?
3167 ScsiPortCommandU64 : ScsiPortCommand;
3168
3169 mtx_lock(&sc->aac_io_lock);
3170 aac_wait_command(cm);
3171 mtx_unlock(&sc->aac_io_lock);
3172
3173 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) {
3174 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen);
3175 if (error != 0)
3176 goto out;
3177 }
3178 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response));
3179 out:
3180 if (cm != NULL) {
3181 if (cm->cm_data != NULL)
3182 free(cm->cm_data, M_AACBUF);
3183 mtx_lock(&sc->aac_io_lock);
3184 aac_release_command(cm);
3185 mtx_unlock(&sc->aac_io_lock);
3186 }
3187 return(error);
3188 }
3189
3190 /*
3191 * cdevpriv interface private destructor.
3192 */
3193 static void
3194 aac_cdevpriv_dtor(void *arg)
3195 {
3196 struct aac_softc *sc;
3197
3198 sc = arg;
3199 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3200 mtx_lock(&Giant);
3201 device_unbusy(sc->aac_dev);
3202 mtx_unlock(&Giant);
3203 }
3204
3205 /*
3206 * Handle an AIF sent to us by the controller; queue it for later reference.
3207 * If the queue fills up, then drop the older entries.
3208 */
3209 static void
3210 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3211 {
3212 struct aac_aif_command *aif;
3213 struct aac_container *co, *co_next;
3214 struct aac_fib_context *ctx;
3215 struct aac_mntinforesp *mir;
3216 int next, current, found;
3217 int count = 0, added = 0, i = 0;
3218 uint32_t channel;
3219
3220 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3221
3222 aif = (struct aac_aif_command*)&fib->data[0];
3223 aac_print_aif(sc, aif);
3224
3225 /* Is it an event that we should care about? */
3226 switch (aif->command) {
3227 case AifCmdEventNotify:
3228 switch (aif->data.EN.type) {
3229 case AifEnAddContainer:
3230 case AifEnDeleteContainer:
3231 /*
3232 * A container was added or deleted, but the message
3233 * doesn't tell us anything else! Re-enumerate the
3234 * containers and sort things out.
3235 */
3236 aac_alloc_sync_fib(sc, &fib);
3237 do {
3238 /*
3239 * Ask the controller for its containers one at
3240 * a time.
3241 * XXX What if the controller's list changes
3242 * midway through this enumaration?
3243 * XXX This should be done async.
3244 */
3245 if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
3246 continue;
3247 if (i == 0)
3248 count = mir->MntRespCount;
3249 /*
3250 * Check the container against our list.
3251 * co->co_found was already set to 0 in a
3252 * previous run.
3253 */
3254 if ((mir->Status == ST_OK) &&
3255 (mir->MntTable[0].VolType != CT_NONE)) {
3256 found = 0;
3257 TAILQ_FOREACH(co,
3258 &sc->aac_container_tqh,
3259 co_link) {
3260 if (co->co_mntobj.ObjectId ==
3261 mir->MntTable[0].ObjectId) {
3262 co->co_found = 1;
3263 found = 1;
3264 break;
3265 }
3266 }
3267 /*
3268 * If the container matched, continue
3269 * in the list.
3270 */
3271 if (found) {
3272 i++;
3273 continue;
3274 }
3275
3276 /*
3277 * This is a new container. Do all the
3278 * appropriate things to set it up.
3279 */
3280 aac_add_container(sc, mir, 1);
3281 added = 1;
3282 }
3283 i++;
3284 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3285 aac_release_sync_fib(sc);
3286
3287 /*
3288 * Go through our list of containers and see which ones
3289 * were not marked 'found'. Since the controller didn't
3290 * list them they must have been deleted. Do the
3291 * appropriate steps to destroy the device. Also reset
3292 * the co->co_found field.
3293 */
3294 co = TAILQ_FIRST(&sc->aac_container_tqh);
3295 while (co != NULL) {
3296 if (co->co_found == 0) {
3297 mtx_unlock(&sc->aac_io_lock);
3298 mtx_lock(&Giant);
3299 device_delete_child(sc->aac_dev,
3300 co->co_disk);
3301 mtx_unlock(&Giant);
3302 mtx_lock(&sc->aac_io_lock);
3303 co_next = TAILQ_NEXT(co, co_link);
3304 mtx_lock(&sc->aac_container_lock);
3305 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3306 co_link);
3307 mtx_unlock(&sc->aac_container_lock);
3308 free(co, M_AACBUF);
3309 co = co_next;
3310 } else {
3311 co->co_found = 0;
3312 co = TAILQ_NEXT(co, co_link);
3313 }
3314 }
3315
3316 /* Attach the newly created containers */
3317 if (added) {
3318 mtx_unlock(&sc->aac_io_lock);
3319 mtx_lock(&Giant);
3320 bus_generic_attach(sc->aac_dev);
3321 mtx_unlock(&Giant);
3322 mtx_lock(&sc->aac_io_lock);
3323 }
3324
3325 break;
3326
3327 case AifEnEnclosureManagement:
3328 switch (aif->data.EN.data.EEE.eventType) {
3329 case AIF_EM_DRIVE_INSERTION:
3330 case AIF_EM_DRIVE_REMOVAL:
3331 channel = aif->data.EN.data.EEE.unitID;
3332 if (sc->cam_rescan_cb != NULL)
3333 sc->cam_rescan_cb(sc,
3334 (channel >> 24) & 0xF,
3335 (channel & 0xFFFF));
3336 break;
3337 }
3338 break;
3339
3340 case AifEnAddJBOD:
3341 case AifEnDeleteJBOD:
3342 channel = aif->data.EN.data.ECE.container;
3343 if (sc->cam_rescan_cb != NULL)
3344 sc->cam_rescan_cb(sc, (channel >> 24) & 0xF,
3345 AAC_CAM_TARGET_WILDCARD);
3346 break;
3347
3348 default:
3349 break;
3350 }
3351
3352 default:
3353 break;
3354 }
3355
3356 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3357 mtx_lock(&sc->aac_aifq_lock);
3358 current = sc->aifq_idx;
3359 next = (current + 1) % AAC_AIFQ_LENGTH;
3360 if (next == 0)
3361 sc->aifq_filled = 1;
3362 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3363 /* modify AIF contexts */
3364 if (sc->aifq_filled) {
3365 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3366 if (next == ctx->ctx_idx)
3367 ctx->ctx_wrap = 1;
3368 else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3369 ctx->ctx_idx = next;
3370 }
3371 }
3372 sc->aifq_idx = next;
3373 /* On the off chance that someone is sleeping for an aif... */
3374 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3375 wakeup(sc->aac_aifq);
3376 /* Wakeup any poll()ers */
3377 selwakeuppri(&sc->rcv_select, PRIBIO);
3378 mtx_unlock(&sc->aac_aifq_lock);
3379
3380 return;
3381 }
3382
3383 /*
3384 * Return the Revision of the driver to userspace and check to see if the
3385 * userspace app is possibly compatible. This is extremely bogus since
3386 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3387 * returning what the card reported.
3388 */
3389 static int
3390 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3391 {
3392 struct aac_rev_check rev_check;
3393 struct aac_rev_check_resp rev_check_resp;
3394 int error = 0;
3395
3396 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3397
3398 /*
3399 * Copyin the revision struct from userspace
3400 */
3401 if ((error = copyin(udata, (caddr_t)&rev_check,
3402 sizeof(struct aac_rev_check))) != 0) {
3403 return error;
3404 }
3405
3406 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3407 rev_check.callingRevision.buildNumber);
3408
3409 /*
3410 * Doctor up the response struct.
3411 */
3412 rev_check_resp.possiblyCompatible = 1;
3413 rev_check_resp.adapterSWRevision.external.comp.major =
3414 AAC_DRIVER_MAJOR_VERSION;
3415 rev_check_resp.adapterSWRevision.external.comp.minor =
3416 AAC_DRIVER_MINOR_VERSION;
3417 rev_check_resp.adapterSWRevision.external.comp.type =
3418 AAC_DRIVER_TYPE;
3419 rev_check_resp.adapterSWRevision.external.comp.dash =
3420 AAC_DRIVER_BUGFIX_LEVEL;
3421 rev_check_resp.adapterSWRevision.buildNumber =
3422 AAC_DRIVER_BUILD;
3423
3424 return(copyout((caddr_t)&rev_check_resp, udata,
3425 sizeof(struct aac_rev_check_resp)));
3426 }
3427
3428 /*
3429 * Pass the fib context to the caller
3430 */
3431 static int
3432 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3433 {
3434 struct aac_fib_context *fibctx, *ctx;
3435 int error = 0;
3436
3437 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3438
3439 fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO);
3440 if (fibctx == NULL)
3441 return (ENOMEM);
3442
3443 mtx_lock(&sc->aac_aifq_lock);
3444 /* all elements are already 0, add to queue */
3445 if (sc->fibctx == NULL)
3446 sc->fibctx = fibctx;
3447 else {
3448 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3449 ;
3450 ctx->next = fibctx;
3451 fibctx->prev = ctx;
3452 }
3453
3454 /* evaluate unique value */
3455 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3456 ctx = sc->fibctx;
3457 while (ctx != fibctx) {
3458 if (ctx->unique == fibctx->unique) {
3459 fibctx->unique++;
3460 ctx = sc->fibctx;
3461 } else {
3462 ctx = ctx->next;
3463 }
3464 }
3465 mtx_unlock(&sc->aac_aifq_lock);
3466
3467 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3468 if (error)
3469 aac_close_aif(sc, (caddr_t)ctx);
3470 return error;
3471 }
3472
3473 /*
3474 * Close the caller's fib context
3475 */
3476 static int
3477 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3478 {
3479 struct aac_fib_context *ctx;
3480
3481 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3482
3483 mtx_lock(&sc->aac_aifq_lock);
3484 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3485 if (ctx->unique == *(uint32_t *)&arg) {
3486 if (ctx == sc->fibctx)
3487 sc->fibctx = NULL;
3488 else {
3489 ctx->prev->next = ctx->next;
3490 if (ctx->next)
3491 ctx->next->prev = ctx->prev;
3492 }
3493 break;
3494 }
3495 }
3496 mtx_unlock(&sc->aac_aifq_lock);
3497 if (ctx)
3498 free(ctx, M_AACBUF);
3499
3500 return 0;
3501 }
3502
3503 /*
3504 * Pass the caller the next AIF in their queue
3505 */
3506 static int
3507 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3508 {
3509 struct get_adapter_fib_ioctl agf;
3510 struct aac_fib_context *ctx;
3511 int error;
3512
3513 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3514
3515 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3516 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3517 if (agf.AdapterFibContext == ctx->unique)
3518 break;
3519 }
3520 if (!ctx)
3521 return (EFAULT);
3522
3523 error = aac_return_aif(sc, ctx, agf.AifFib);
3524 if (error == EAGAIN && agf.Wait) {
3525 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3526 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3527 while (error == EAGAIN) {
3528 error = tsleep(sc->aac_aifq, PRIBIO |
3529 PCATCH, "aacaif", 0);
3530 if (error == 0)
3531 error = aac_return_aif(sc, ctx, agf.AifFib);
3532 }
3533 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3534 }
3535 }
3536 return(error);
3537 }
3538
3539 /*
3540 * Hand the next AIF off the top of the queue out to userspace.
3541 */
3542 static int
3543 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3544 {
3545 int current, error;
3546
3547 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3548
3549 mtx_lock(&sc->aac_aifq_lock);
3550 current = ctx->ctx_idx;
3551 if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3552 /* empty */
3553 mtx_unlock(&sc->aac_aifq_lock);
3554 return (EAGAIN);
3555 }
3556 error =
3557 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3558 if (error)
3559 device_printf(sc->aac_dev,
3560 "aac_return_aif: copyout returned %d\n", error);
3561 else {
3562 ctx->ctx_wrap = 0;
3563 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3564 }
3565 mtx_unlock(&sc->aac_aifq_lock);
3566 return(error);
3567 }
3568
3569 static int
3570 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3571 {
3572 struct aac_pci_info {
3573 u_int32_t bus;
3574 u_int32_t slot;
3575 } pciinf;
3576 int error;
3577
3578 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3579
3580 pciinf.bus = pci_get_bus(sc->aac_dev);
3581 pciinf.slot = pci_get_slot(sc->aac_dev);
3582
3583 error = copyout((caddr_t)&pciinf, uptr,
3584 sizeof(struct aac_pci_info));
3585
3586 return (error);
3587 }
3588
3589 static int
3590 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3591 {
3592 struct aac_features f;
3593 int error;
3594
3595 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3596
3597 if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3598 return (error);
3599
3600 /*
3601 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3602 * ALL zero in the featuresState, the driver will return the current
3603 * state of all the supported features, the data field will not be
3604 * valid.
3605 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3606 * a specific bit set in the featuresState, the driver will return the
3607 * current state of this specific feature and whatever data that are
3608 * associated with the feature in the data field or perform whatever
3609 * action needed indicates in the data field.
3610 */
3611 if (f.feat.fValue == 0) {
3612 f.feat.fBits.largeLBA =
3613 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3614 /* TODO: In the future, add other features state here as well */
3615 } else {
3616 if (f.feat.fBits.largeLBA)
3617 f.feat.fBits.largeLBA =
3618 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3619 /* TODO: Add other features state and data in the future */
3620 }
3621
3622 error = copyout(&f, uptr, sizeof (f));
3623 return (error);
3624 }
3625
3626 /*
3627 * Give the userland some information about the container. The AAC arch
3628 * expects the driver to be a SCSI passthrough type driver, so it expects
3629 * the containers to have b:t:l numbers. Fake it.
3630 */
3631 static int
3632 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3633 {
3634 struct aac_query_disk query_disk;
3635 struct aac_container *co;
3636 struct aac_disk *disk;
3637 int error, id;
3638
3639 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3640
3641 disk = NULL;
3642
3643 error = copyin(uptr, (caddr_t)&query_disk,
3644 sizeof(struct aac_query_disk));
3645 if (error)
3646 return (error);
3647
3648 id = query_disk.ContainerNumber;
3649 if (id == -1)
3650 return (EINVAL);
3651
3652 mtx_lock(&sc->aac_container_lock);
3653 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3654 if (co->co_mntobj.ObjectId == id)
3655 break;
3656 }
3657
3658 if (co == NULL) {
3659 query_disk.Valid = 0;
3660 query_disk.Locked = 0;
3661 query_disk.Deleted = 1; /* XXX is this right? */
3662 } else {
3663 disk = device_get_softc(co->co_disk);
3664 query_disk.Valid = 1;
3665 query_disk.Locked =
3666 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0;
3667 query_disk.Deleted = 0;
3668 query_disk.Bus = device_get_unit(sc->aac_dev);
3669 query_disk.Target = disk->unit;
3670 query_disk.Lun = 0;
3671 query_disk.UnMapped = 0;
3672 sprintf(&query_disk.diskDeviceName[0], "%s%d",
3673 disk->ad_disk->d_name, disk->ad_disk->d_unit);
3674 }
3675 mtx_unlock(&sc->aac_container_lock);
3676
3677 error = copyout((caddr_t)&query_disk, uptr,
3678 sizeof(struct aac_query_disk));
3679
3680 return (error);
3681 }
3682
3683 static void
3684 aac_get_bus_info(struct aac_softc *sc)
3685 {
3686 struct aac_fib *fib;
3687 struct aac_ctcfg *c_cmd;
3688 struct aac_ctcfg_resp *c_resp;
3689 struct aac_vmioctl *vmi;
3690 struct aac_vmi_businf_resp *vmi_resp;
3691 struct aac_getbusinf businfo;
3692 struct aac_sim *caminf;
3693 device_t child;
3694 int i, found, error;
3695
3696 mtx_lock(&sc->aac_io_lock);
3697 aac_alloc_sync_fib(sc, &fib);
3698 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3699 bzero(c_cmd, sizeof(struct aac_ctcfg));
3700
3701 c_cmd->Command = VM_ContainerConfig;
3702 c_cmd->cmd = CT_GET_SCSI_METHOD;
3703 c_cmd->param = 0;
3704
3705 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3706 sizeof(struct aac_ctcfg));
3707 if (error) {
3708 device_printf(sc->aac_dev, "Error %d sending "
3709 "VM_ContainerConfig command\n", error);
3710 aac_release_sync_fib(sc);
3711 mtx_unlock(&sc->aac_io_lock);
3712 return;
3713 }
3714
3715 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3716 if (c_resp->Status != ST_OK) {
3717 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3718 c_resp->Status);
3719 aac_release_sync_fib(sc);
3720 mtx_unlock(&sc->aac_io_lock);
3721 return;
3722 }
3723
3724 sc->scsi_method_id = c_resp->param;
3725
3726 vmi = (struct aac_vmioctl *)&fib->data[0];
3727 bzero(vmi, sizeof(struct aac_vmioctl));
3728
3729 vmi->Command = VM_Ioctl;
3730 vmi->ObjType = FT_DRIVE;
3731 vmi->MethId = sc->scsi_method_id;
3732 vmi->ObjId = 0;
3733 vmi->IoctlCmd = GetBusInfo;
3734
3735 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3736 sizeof(struct aac_vmi_businf_resp));
3737 if (error) {
3738 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3739 error);
3740 aac_release_sync_fib(sc);
3741 mtx_unlock(&sc->aac_io_lock);
3742 return;
3743 }
3744
3745 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3746 if (vmi_resp->Status != ST_OK) {
3747 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3748 vmi_resp->Status);
3749 aac_release_sync_fib(sc);
3750 mtx_unlock(&sc->aac_io_lock);
3751 return;
3752 }
3753
3754 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3755 aac_release_sync_fib(sc);
3756 mtx_unlock(&sc->aac_io_lock);
3757
3758 found = 0;
3759 for (i = 0; i < businfo.BusCount; i++) {
3760 if (businfo.BusValid[i] != AAC_BUS_VALID)
3761 continue;
3762
3763 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3764 M_AACBUF, M_NOWAIT | M_ZERO);
3765 if (caminf == NULL) {
3766 device_printf(sc->aac_dev,
3767 "No memory to add passthrough bus %d\n", i);
3768 break;
3769 };
3770
3771 child = device_add_child(sc->aac_dev, "aacp", -1);
3772 if (child == NULL) {
3773 device_printf(sc->aac_dev,
3774 "device_add_child failed for passthrough bus %d\n",
3775 i);
3776 free(caminf, M_AACBUF);
3777 break;
3778 }
3779
3780 caminf->TargetsPerBus = businfo.TargetsPerBus;
3781 caminf->BusNumber = i;
3782 caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3783 caminf->aac_sc = sc;
3784 caminf->sim_dev = child;
3785
3786 device_set_ivars(child, caminf);
3787 device_set_desc(child, "SCSI Passthrough Bus");
3788 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3789
3790 found = 1;
3791 }
3792
3793 if (found)
3794 bus_generic_attach(sc->aac_dev);
3795
3796 return;
3797 }
Cache object: 1a1590949ca7c4631a6d389265aaae6c
|