FreeBSD/Linux Kernel Cross Reference
sys/dev/aac/aac.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2001 Scott Long
6 * Copyright (c) 2000 BSDi
7 * Copyright (c) 2001 Adaptec, Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 /*
36 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters.
37 */
38 #define AAC_DRIVERNAME "aac"
39
40 #include "opt_aac.h"
41
42 /* #include <stddef.h> */
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/kthread.h>
48 #include <sys/proc.h>
49 #include <sys/sysctl.h>
50 #include <sys/sysent.h>
51 #include <sys/poll.h>
52 #include <sys/ioccom.h>
53
54 #include <sys/bus.h>
55 #include <sys/conf.h>
56 #include <sys/signalvar.h>
57 #include <sys/time.h>
58 #include <sys/eventhandler.h>
59 #include <sys/rman.h>
60
61 #include <machine/bus.h>
62 #include <machine/resource.h>
63
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66
67 #include <dev/aac/aacreg.h>
68 #include <sys/aac_ioctl.h>
69 #include <dev/aac/aacvar.h>
70 #include <dev/aac/aac_tables.h>
71
72 static void aac_startup(void *arg);
73 static void aac_add_container(struct aac_softc *sc,
74 struct aac_mntinforesp *mir, int f);
75 static void aac_get_bus_info(struct aac_softc *sc);
76 static void aac_daemon(void *arg);
77
78 /* Command Processing */
79 static void aac_timeout(struct aac_softc *sc);
80 static void aac_complete(void *context, int pending);
81 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp);
82 static void aac_bio_complete(struct aac_command *cm);
83 static int aac_wait_command(struct aac_command *cm);
84 static void aac_command_thread(struct aac_softc *sc);
85
86 /* Command Buffer Management */
87 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs,
88 int nseg, int error);
89 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
90 int nseg, int error);
91 static int aac_alloc_commands(struct aac_softc *sc);
92 static void aac_free_commands(struct aac_softc *sc);
93 static void aac_unmap_command(struct aac_command *cm);
94
95 /* Hardware Interface */
96 static int aac_alloc(struct aac_softc *sc);
97 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
98 int error);
99 static int aac_check_firmware(struct aac_softc *sc);
100 static int aac_init(struct aac_softc *sc);
101 static int aac_sync_command(struct aac_softc *sc, u_int32_t command,
102 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2,
103 u_int32_t arg3, u_int32_t *sp);
104 static int aac_setup_intr(struct aac_softc *sc);
105 static int aac_enqueue_fib(struct aac_softc *sc, int queue,
106 struct aac_command *cm);
107 static int aac_dequeue_fib(struct aac_softc *sc, int queue,
108 u_int32_t *fib_size, struct aac_fib **fib_addr);
109 static int aac_enqueue_response(struct aac_softc *sc, int queue,
110 struct aac_fib *fib);
111
112 /* StrongARM interface */
113 static int aac_sa_get_fwstatus(struct aac_softc *sc);
114 static void aac_sa_qnotify(struct aac_softc *sc, int qbit);
115 static int aac_sa_get_istatus(struct aac_softc *sc);
116 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask);
117 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
118 u_int32_t arg0, u_int32_t arg1,
119 u_int32_t arg2, u_int32_t arg3);
120 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb);
121 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable);
122
123 const struct aac_interface aac_sa_interface = {
124 aac_sa_get_fwstatus,
125 aac_sa_qnotify,
126 aac_sa_get_istatus,
127 aac_sa_clear_istatus,
128 aac_sa_set_mailbox,
129 aac_sa_get_mailbox,
130 aac_sa_set_interrupts,
131 NULL, NULL, NULL
132 };
133
134 /* i960Rx interface */
135 static int aac_rx_get_fwstatus(struct aac_softc *sc);
136 static void aac_rx_qnotify(struct aac_softc *sc, int qbit);
137 static int aac_rx_get_istatus(struct aac_softc *sc);
138 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask);
139 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
140 u_int32_t arg0, u_int32_t arg1,
141 u_int32_t arg2, u_int32_t arg3);
142 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb);
143 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable);
144 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm);
145 static int aac_rx_get_outb_queue(struct aac_softc *sc);
146 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index);
147
148 const struct aac_interface aac_rx_interface = {
149 aac_rx_get_fwstatus,
150 aac_rx_qnotify,
151 aac_rx_get_istatus,
152 aac_rx_clear_istatus,
153 aac_rx_set_mailbox,
154 aac_rx_get_mailbox,
155 aac_rx_set_interrupts,
156 aac_rx_send_command,
157 aac_rx_get_outb_queue,
158 aac_rx_set_outb_queue
159 };
160
161 /* Rocket/MIPS interface */
162 static int aac_rkt_get_fwstatus(struct aac_softc *sc);
163 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit);
164 static int aac_rkt_get_istatus(struct aac_softc *sc);
165 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask);
166 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command,
167 u_int32_t arg0, u_int32_t arg1,
168 u_int32_t arg2, u_int32_t arg3);
169 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb);
170 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable);
171 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm);
172 static int aac_rkt_get_outb_queue(struct aac_softc *sc);
173 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index);
174
175 const struct aac_interface aac_rkt_interface = {
176 aac_rkt_get_fwstatus,
177 aac_rkt_qnotify,
178 aac_rkt_get_istatus,
179 aac_rkt_clear_istatus,
180 aac_rkt_set_mailbox,
181 aac_rkt_get_mailbox,
182 aac_rkt_set_interrupts,
183 aac_rkt_send_command,
184 aac_rkt_get_outb_queue,
185 aac_rkt_set_outb_queue
186 };
187
188 /* Debugging and Diagnostics */
189 static void aac_describe_controller(struct aac_softc *sc);
190 static const char *aac_describe_code(const struct aac_code_lookup *table,
191 u_int32_t code);
192
193 /* Management Interface */
194 static d_open_t aac_open;
195 static d_ioctl_t aac_ioctl;
196 static d_poll_t aac_poll;
197 static void aac_cdevpriv_dtor(void *arg);
198 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
199 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
200 static void aac_handle_aif(struct aac_softc *sc,
201 struct aac_fib *fib);
202 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
203 static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
204 static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
205 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
206 static int aac_return_aif(struct aac_softc *sc,
207 struct aac_fib_context *ctx, caddr_t uptr);
208 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
209 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
210 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
211 static void aac_ioctl_event(struct aac_softc *sc,
212 struct aac_event *event, void *arg);
213 static struct aac_mntinforesp *
214 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid);
215
216 static struct cdevsw aac_cdevsw = {
217 .d_version = D_VERSION,
218 .d_flags = 0,
219 .d_open = aac_open,
220 .d_ioctl = aac_ioctl,
221 .d_poll = aac_poll,
222 .d_name = "aac",
223 };
224
225 static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
226
227 /* sysctl node */
228 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
229 "AAC driver parameters");
230
231 /*
232 * Device Interface
233 */
234
235 /*
236 * Initialize the controller and softc
237 */
238 int
239 aac_attach(struct aac_softc *sc)
240 {
241 int error, unit;
242
243 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
244
245 /*
246 * Initialize per-controller queues.
247 */
248 aac_initq_free(sc);
249 aac_initq_ready(sc);
250 aac_initq_busy(sc);
251 aac_initq_bio(sc);
252
253 /*
254 * Initialize command-completion task.
255 */
256 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc);
257
258 /* mark controller as suspended until we get ourselves organised */
259 sc->aac_state |= AAC_STATE_SUSPEND;
260
261 /*
262 * Check that the firmware on the card is supported.
263 */
264 if ((error = aac_check_firmware(sc)) != 0)
265 return(error);
266
267 /*
268 * Initialize locks
269 */
270 mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF);
271 mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF);
272 mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF);
273 TAILQ_INIT(&sc->aac_container_tqh);
274 TAILQ_INIT(&sc->aac_ev_cmfree);
275
276 /* Initialize the clock daemon callout. */
277 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
278
279 /*
280 * Initialize the adapter.
281 */
282 if ((error = aac_alloc(sc)) != 0)
283 return(error);
284 if ((error = aac_init(sc)) != 0)
285 return(error);
286
287 /*
288 * Allocate and connect our interrupt.
289 */
290 if ((error = aac_setup_intr(sc)) != 0)
291 return(error);
292
293 /*
294 * Print a little information about the controller.
295 */
296 aac_describe_controller(sc);
297
298 /*
299 * Add sysctls.
300 */
301 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev),
302 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)),
303 OID_AUTO, "firmware_build", CTLFLAG_RD,
304 &sc->aac_revision.buildNumber, 0,
305 "firmware build number");
306
307 /*
308 * Register to probe our containers later.
309 */
310 sc->aac_ich.ich_func = aac_startup;
311 sc->aac_ich.ich_arg = sc;
312 if (config_intrhook_establish(&sc->aac_ich) != 0) {
313 device_printf(sc->aac_dev,
314 "can't establish configuration hook\n");
315 return(ENXIO);
316 }
317
318 /*
319 * Make the control device.
320 */
321 unit = device_get_unit(sc->aac_dev);
322 sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR,
323 0640, "aac%d", unit);
324 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit);
325 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit);
326 sc->aac_dev_t->si_drv1 = sc;
327
328 /* Create the AIF thread */
329 if (kproc_create((void(*)(void *))aac_command_thread, sc,
330 &sc->aifthread, 0, 0, "aac%daif", unit))
331 panic("Could not create AIF thread");
332
333 /* Register the shutdown method to only be called post-dump */
334 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown,
335 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
336 device_printf(sc->aac_dev,
337 "shutdown event registration failed\n");
338
339 /* Register with CAM for the non-DASD devices */
340 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) {
341 TAILQ_INIT(&sc->aac_sim_tqh);
342 aac_get_bus_info(sc);
343 }
344
345 mtx_lock(&sc->aac_io_lock);
346 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
347 mtx_unlock(&sc->aac_io_lock);
348
349 return(0);
350 }
351
352 static void
353 aac_daemon(void *arg)
354 {
355 struct timeval tv;
356 struct aac_softc *sc;
357 struct aac_fib *fib;
358
359 sc = arg;
360 mtx_assert(&sc->aac_io_lock, MA_OWNED);
361
362 if (callout_pending(&sc->aac_daemontime) ||
363 callout_active(&sc->aac_daemontime) == 0)
364 return;
365 getmicrotime(&tv);
366 aac_alloc_sync_fib(sc, &fib);
367 *(uint32_t *)fib->data = tv.tv_sec;
368 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t));
369 aac_release_sync_fib(sc);
370 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
371 }
372
373 void
374 aac_add_event(struct aac_softc *sc, struct aac_event *event)
375 {
376
377 switch (event->ev_type & AAC_EVENT_MASK) {
378 case AAC_EVENT_CMFREE:
379 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
380 break;
381 default:
382 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
383 event->ev_type);
384 break;
385 }
386 }
387
388 /*
389 * Request information of container #cid
390 */
391 static struct aac_mntinforesp *
392 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid)
393 {
394 struct aac_mntinfo *mi;
395
396 mi = (struct aac_mntinfo *)&fib->data[0];
397 /* use 64-bit LBA if enabled */
398 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ?
399 VM_NameServe64 : VM_NameServe;
400 mi->MntType = FT_FILESYS;
401 mi->MntCount = cid;
402
403 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
404 sizeof(struct aac_mntinfo))) {
405 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
406 return (NULL);
407 }
408
409 return ((struct aac_mntinforesp *)&fib->data[0]);
410 }
411
412 /*
413 * Probe for containers, create disks.
414 */
415 static void
416 aac_startup(void *arg)
417 {
418 struct aac_softc *sc;
419 struct aac_fib *fib;
420 struct aac_mntinforesp *mir;
421 int count = 0, i = 0;
422
423 sc = (struct aac_softc *)arg;
424 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
425
426 mtx_lock(&sc->aac_io_lock);
427 aac_alloc_sync_fib(sc, &fib);
428
429 /* loop over possible containers */
430 do {
431 if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
432 continue;
433 if (i == 0)
434 count = mir->MntRespCount;
435 aac_add_container(sc, mir, 0);
436 i++;
437 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
438
439 aac_release_sync_fib(sc);
440 mtx_unlock(&sc->aac_io_lock);
441
442 /* mark the controller up */
443 sc->aac_state &= ~AAC_STATE_SUSPEND;
444
445 /* poke the bus to actually attach the child devices */
446 if (bus_generic_attach(sc->aac_dev))
447 device_printf(sc->aac_dev, "bus_generic_attach failed\n");
448
449 /* disconnect ourselves from the intrhook chain */
450 config_intrhook_disestablish(&sc->aac_ich);
451
452 /* enable interrupts now */
453 AAC_UNMASK_INTERRUPTS(sc);
454 }
455
456 /*
457 * Create a device to represent a new container
458 */
459 static void
460 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f)
461 {
462 struct aac_container *co;
463 device_t child;
464
465 /*
466 * Check container volume type for validity. Note that many of
467 * the possible types may never show up.
468 */
469 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
470 co = (struct aac_container *)malloc(sizeof *co, M_AACBUF,
471 M_NOWAIT | M_ZERO);
472 if (co == NULL)
473 panic("Out of memory?!");
474 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d",
475 mir->MntTable[0].ObjectId,
476 mir->MntTable[0].FileSystemName,
477 mir->MntTable[0].Capacity, mir->MntTable[0].VolType);
478
479 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL)
480 device_printf(sc->aac_dev, "device_add_child failed\n");
481 else
482 device_set_ivars(child, co);
483 device_set_desc(child, aac_describe_code(aac_container_types,
484 mir->MntTable[0].VolType));
485 co->co_disk = child;
486 co->co_found = f;
487 bcopy(&mir->MntTable[0], &co->co_mntobj,
488 sizeof(struct aac_mntobj));
489 mtx_lock(&sc->aac_container_lock);
490 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
491 mtx_unlock(&sc->aac_container_lock);
492 }
493 }
494
495 /*
496 * Allocate resources associated with (sc)
497 */
498 static int
499 aac_alloc(struct aac_softc *sc)
500 {
501
502 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
503
504 /*
505 * Create DMA tag for mapping buffers into controller-addressable space.
506 */
507 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
508 1, 0, /* algnmnt, boundary */
509 (sc->flags & AAC_FLAGS_SG_64BIT) ?
510 BUS_SPACE_MAXADDR :
511 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
512 BUS_SPACE_MAXADDR, /* highaddr */
513 NULL, NULL, /* filter, filterarg */
514 sc->aac_max_sectors << 9, /* maxsize */
515 sc->aac_sg_tablesize, /* nsegments */
516 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
517 BUS_DMA_ALLOCNOW, /* flags */
518 busdma_lock_mutex, /* lockfunc */
519 &sc->aac_io_lock, /* lockfuncarg */
520 &sc->aac_buffer_dmat)) {
521 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
522 return (ENOMEM);
523 }
524
525 /*
526 * Create DMA tag for mapping FIBs into controller-addressable space..
527 */
528 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
529 1, 0, /* algnmnt, boundary */
530 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
531 BUS_SPACE_MAXADDR_32BIT :
532 0x7fffffff, /* lowaddr */
533 BUS_SPACE_MAXADDR, /* highaddr */
534 NULL, NULL, /* filter, filterarg */
535 sc->aac_max_fibs_alloc *
536 sc->aac_max_fib_size, /* maxsize */
537 1, /* nsegments */
538 sc->aac_max_fibs_alloc *
539 sc->aac_max_fib_size, /* maxsize */
540 0, /* flags */
541 NULL, NULL, /* No locking needed */
542 &sc->aac_fib_dmat)) {
543 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
544 return (ENOMEM);
545 }
546
547 /*
548 * Create DMA tag for the common structure and allocate it.
549 */
550 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
551 1, 0, /* algnmnt, boundary */
552 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
553 BUS_SPACE_MAXADDR_32BIT :
554 0x7fffffff, /* lowaddr */
555 BUS_SPACE_MAXADDR, /* highaddr */
556 NULL, NULL, /* filter, filterarg */
557 8192 + sizeof(struct aac_common), /* maxsize */
558 1, /* nsegments */
559 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
560 0, /* flags */
561 NULL, NULL, /* No locking needed */
562 &sc->aac_common_dmat)) {
563 device_printf(sc->aac_dev,
564 "can't allocate common structure DMA tag\n");
565 return (ENOMEM);
566 }
567 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
568 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
569 device_printf(sc->aac_dev, "can't allocate common structure\n");
570 return (ENOMEM);
571 }
572
573 /*
574 * Work around a bug in the 2120 and 2200 that cannot DMA commands
575 * below address 8192 in physical memory.
576 * XXX If the padding is not needed, can it be put to use instead
577 * of ignored?
578 */
579 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
580 sc->aac_common, 8192 + sizeof(*sc->aac_common),
581 aac_common_map, sc, 0);
582
583 if (sc->aac_common_busaddr < 8192) {
584 sc->aac_common = (struct aac_common *)
585 ((uint8_t *)sc->aac_common + 8192);
586 sc->aac_common_busaddr += 8192;
587 }
588 bzero(sc->aac_common, sizeof(*sc->aac_common));
589
590 /* Allocate some FIBs and associated command structs */
591 TAILQ_INIT(&sc->aac_fibmap_tqh);
592 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
593 M_AACBUF, M_WAITOK|M_ZERO);
594 while (sc->total_fibs < sc->aac_max_fibs) {
595 if (aac_alloc_commands(sc) != 0)
596 break;
597 }
598 if (sc->total_fibs == 0)
599 return (ENOMEM);
600
601 return (0);
602 }
603
604 /*
605 * Free all of the resources associated with (sc)
606 *
607 * Should not be called if the controller is active.
608 */
609 void
610 aac_free(struct aac_softc *sc)
611 {
612
613 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
614
615 /* remove the control device */
616 if (sc->aac_dev_t != NULL)
617 destroy_dev(sc->aac_dev_t);
618
619 /* throw away any FIB buffers, discard the FIB DMA tag */
620 aac_free_commands(sc);
621 if (sc->aac_fib_dmat)
622 bus_dma_tag_destroy(sc->aac_fib_dmat);
623
624 free(sc->aac_commands, M_AACBUF);
625
626 /* destroy the common area */
627 if (sc->aac_common) {
628 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
629 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
630 sc->aac_common_dmamap);
631 }
632 if (sc->aac_common_dmat)
633 bus_dma_tag_destroy(sc->aac_common_dmat);
634
635 /* disconnect the interrupt handler */
636 if (sc->aac_intr)
637 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr);
638 if (sc->aac_irq != NULL) {
639 bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
640 rman_get_rid(sc->aac_irq), sc->aac_irq);
641 pci_release_msi(sc->aac_dev);
642 }
643
644 /* destroy data-transfer DMA tag */
645 if (sc->aac_buffer_dmat)
646 bus_dma_tag_destroy(sc->aac_buffer_dmat);
647
648 /* destroy the parent DMA tag */
649 if (sc->aac_parent_dmat)
650 bus_dma_tag_destroy(sc->aac_parent_dmat);
651
652 /* release the register window mapping */
653 if (sc->aac_regs_res0 != NULL)
654 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
655 rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0);
656 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL)
657 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
658 rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1);
659 }
660
661 /*
662 * Disconnect from the controller completely, in preparation for unload.
663 */
664 int
665 aac_detach(device_t dev)
666 {
667 struct aac_softc *sc;
668 struct aac_container *co;
669 struct aac_sim *sim;
670 int error;
671
672 sc = device_get_softc(dev);
673 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
674
675 callout_drain(&sc->aac_daemontime);
676
677 mtx_lock(&sc->aac_io_lock);
678 while (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
679 sc->aifflags |= AAC_AIFFLAGS_EXIT;
680 wakeup(sc->aifthread);
681 msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0);
682 }
683 mtx_unlock(&sc->aac_io_lock);
684 KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0,
685 ("%s: invalid detach state", __func__));
686
687 /* Remove the child containers */
688 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
689 error = device_delete_child(dev, co->co_disk);
690 if (error)
691 return (error);
692 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
693 free(co, M_AACBUF);
694 }
695
696 /* Remove the CAM SIMs */
697 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
698 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
699 error = device_delete_child(dev, sim->sim_dev);
700 if (error)
701 return (error);
702 free(sim, M_AACBUF);
703 }
704
705 if ((error = aac_shutdown(dev)))
706 return(error);
707
708 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
709
710 aac_free(sc);
711
712 mtx_destroy(&sc->aac_aifq_lock);
713 mtx_destroy(&sc->aac_io_lock);
714 mtx_destroy(&sc->aac_container_lock);
715
716 return(0);
717 }
718
719 /*
720 * Bring the controller down to a dormant state and detach all child devices.
721 *
722 * This function is called before detach or system shutdown.
723 *
724 * Note that we can assume that the bioq on the controller is empty, as we won't
725 * allow shutdown if any device is open.
726 */
727 int
728 aac_shutdown(device_t dev)
729 {
730 struct aac_softc *sc;
731 struct aac_fib *fib;
732 struct aac_close_command *cc;
733
734 sc = device_get_softc(dev);
735 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
736
737 sc->aac_state |= AAC_STATE_SUSPEND;
738
739 /*
740 * Send a Container shutdown followed by a HostShutdown FIB to the
741 * controller to convince it that we don't want to talk to it anymore.
742 * We've been closed and all I/O completed already
743 */
744 device_printf(sc->aac_dev, "shutting down controller...");
745
746 mtx_lock(&sc->aac_io_lock);
747 aac_alloc_sync_fib(sc, &fib);
748 cc = (struct aac_close_command *)&fib->data[0];
749
750 bzero(cc, sizeof(struct aac_close_command));
751 cc->Command = VM_CloseAll;
752 cc->ContainerId = 0xffffffff;
753 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
754 sizeof(struct aac_close_command)))
755 printf("FAILED.\n");
756 else
757 printf("done\n");
758 #if 0
759 else {
760 fib->data[0] = 0;
761 /*
762 * XXX Issuing this command to the controller makes it shut down
763 * but also keeps it from coming back up without a reset of the
764 * PCI bus. This is not desirable if you are just unloading the
765 * driver module with the intent to reload it later.
766 */
767 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN,
768 fib, 1)) {
769 printf("FAILED.\n");
770 } else {
771 printf("done.\n");
772 }
773 }
774 #endif
775
776 AAC_MASK_INTERRUPTS(sc);
777 aac_release_sync_fib(sc);
778 mtx_unlock(&sc->aac_io_lock);
779
780 return(0);
781 }
782
783 /*
784 * Bring the controller to a quiescent state, ready for system suspend.
785 */
786 int
787 aac_suspend(device_t dev)
788 {
789 struct aac_softc *sc;
790
791 sc = device_get_softc(dev);
792
793 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
794 sc->aac_state |= AAC_STATE_SUSPEND;
795
796 AAC_MASK_INTERRUPTS(sc);
797 return(0);
798 }
799
800 /*
801 * Bring the controller back to a state ready for operation.
802 */
803 int
804 aac_resume(device_t dev)
805 {
806 struct aac_softc *sc;
807
808 sc = device_get_softc(dev);
809
810 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
811 sc->aac_state &= ~AAC_STATE_SUSPEND;
812 AAC_UNMASK_INTERRUPTS(sc);
813 return(0);
814 }
815
816 /*
817 * Interrupt handler for NEW_COMM interface.
818 */
819 void
820 aac_new_intr(void *arg)
821 {
822 struct aac_softc *sc;
823 u_int32_t index, fast;
824 struct aac_command *cm;
825 struct aac_fib *fib;
826 int i;
827
828 sc = (struct aac_softc *)arg;
829
830 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
831 mtx_lock(&sc->aac_io_lock);
832 while (1) {
833 index = AAC_GET_OUTB_QUEUE(sc);
834 if (index == 0xffffffff)
835 index = AAC_GET_OUTB_QUEUE(sc);
836 if (index == 0xffffffff)
837 break;
838 if (index & 2) {
839 if (index == 0xfffffffe) {
840 /* XXX This means that the controller wants
841 * more work. Ignore it for now.
842 */
843 continue;
844 }
845 /* AIF */
846 fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF,
847 M_NOWAIT | M_ZERO);
848 if (fib == NULL) {
849 /* If we're really this short on memory,
850 * hopefully breaking out of the handler will
851 * allow something to get freed. This
852 * actually sucks a whole lot.
853 */
854 break;
855 }
856 index &= ~2;
857 for (i = 0; i < sizeof(struct aac_fib)/4; ++i)
858 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4);
859 aac_handle_aif(sc, fib);
860 free(fib, M_AACBUF);
861
862 /*
863 * AIF memory is owned by the adapter, so let it
864 * know that we are done with it.
865 */
866 AAC_SET_OUTB_QUEUE(sc, index);
867 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
868 } else {
869 fast = index & 1;
870 cm = sc->aac_commands + (index >> 2);
871 fib = cm->cm_fib;
872 if (fast) {
873 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
874 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL;
875 }
876 aac_remove_busy(cm);
877 aac_unmap_command(cm);
878 cm->cm_flags |= AAC_CMD_COMPLETED;
879
880 /* is there a completion handler? */
881 if (cm->cm_complete != NULL) {
882 cm->cm_complete(cm);
883 } else {
884 /* assume that someone is sleeping on this
885 * command
886 */
887 wakeup(cm);
888 }
889 sc->flags &= ~AAC_QUEUE_FRZN;
890 }
891 }
892 /* see if we can start some more I/O */
893 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
894 aac_startio(sc);
895
896 mtx_unlock(&sc->aac_io_lock);
897 }
898
899 /*
900 * Interrupt filter for !NEW_COMM interface.
901 */
902 int
903 aac_filter(void *arg)
904 {
905 struct aac_softc *sc;
906 u_int16_t reason;
907
908 sc = (struct aac_softc *)arg;
909
910 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
911 /*
912 * Read the status register directly. This is faster than taking the
913 * driver lock and reading the queues directly. It also saves having
914 * to turn parts of the driver lock into a spin mutex, which would be
915 * ugly.
916 */
917 reason = AAC_GET_ISTATUS(sc);
918 AAC_CLEAR_ISTATUS(sc, reason);
919
920 /* handle completion processing */
921 if (reason & AAC_DB_RESPONSE_READY)
922 taskqueue_enqueue(taskqueue_fast, &sc->aac_task_complete);
923
924 /* controller wants to talk to us */
925 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) {
926 /*
927 * XXX Make sure that we don't get fooled by strange messages
928 * that start with a NULL.
929 */
930 if ((reason & AAC_DB_PRINTF) &&
931 (sc->aac_common->ac_printf[0] == 0))
932 sc->aac_common->ac_printf[0] = 32;
933
934 /*
935 * This might miss doing the actual wakeup. However, the
936 * msleep that this is waking up has a timeout, so it will
937 * wake up eventually. AIFs and printfs are low enough
938 * priority that they can handle hanging out for a few seconds
939 * if needed.
940 */
941 wakeup(sc->aifthread);
942 }
943 return (FILTER_HANDLED);
944 }
945
946 /*
947 * Command Processing
948 */
949
950 /*
951 * Start as much queued I/O as possible on the controller
952 */
953 void
954 aac_startio(struct aac_softc *sc)
955 {
956 struct aac_command *cm;
957 int error;
958
959 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
960
961 for (;;) {
962 /*
963 * This flag might be set if the card is out of resources.
964 * Checking it here prevents an infinite loop of deferrals.
965 */
966 if (sc->flags & AAC_QUEUE_FRZN)
967 break;
968
969 /*
970 * Try to get a command that's been put off for lack of
971 * resources
972 */
973 cm = aac_dequeue_ready(sc);
974
975 /*
976 * Try to build a command off the bio queue (ignore error
977 * return)
978 */
979 if (cm == NULL)
980 aac_bio_command(sc, &cm);
981
982 /* nothing to do? */
983 if (cm == NULL)
984 break;
985
986 /* don't map more than once */
987 if (cm->cm_flags & AAC_CMD_MAPPED)
988 panic("aac: command %p already mapped", cm);
989
990 /*
991 * Set up the command to go to the controller. If there are no
992 * data buffers associated with the command then it can bypass
993 * busdma.
994 */
995 if (cm->cm_datalen != 0) {
996 if (cm->cm_flags & AAC_REQ_BIO)
997 error = bus_dmamap_load_bio(
998 sc->aac_buffer_dmat, cm->cm_datamap,
999 (struct bio *)cm->cm_private,
1000 aac_map_command_sg, cm, 0);
1001 else
1002 error = bus_dmamap_load(sc->aac_buffer_dmat,
1003 cm->cm_datamap, cm->cm_data,
1004 cm->cm_datalen, aac_map_command_sg, cm, 0);
1005 if (error == EINPROGRESS) {
1006 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n");
1007 sc->flags |= AAC_QUEUE_FRZN;
1008 } else if (error != 0)
1009 panic("aac_startio: unexpected error %d from "
1010 "busdma", error);
1011 } else
1012 aac_map_command_sg(cm, NULL, 0, 0);
1013 }
1014 }
1015
1016 /*
1017 * Handle notification of one or more FIBs coming from the controller.
1018 */
1019 static void
1020 aac_command_thread(struct aac_softc *sc)
1021 {
1022 struct aac_fib *fib;
1023 u_int32_t fib_size;
1024 int size, retval;
1025
1026 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1027
1028 mtx_lock(&sc->aac_io_lock);
1029 sc->aifflags = AAC_AIFFLAGS_RUNNING;
1030
1031 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1032 retval = 0;
1033 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1034 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1035 "aifthd", AAC_PERIODIC_INTERVAL * hz);
1036
1037 /*
1038 * First see if any FIBs need to be allocated. This needs
1039 * to be called without the driver lock because contigmalloc
1040 * can sleep.
1041 */
1042 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1043 mtx_unlock(&sc->aac_io_lock);
1044 aac_alloc_commands(sc);
1045 mtx_lock(&sc->aac_io_lock);
1046 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1047 aac_startio(sc);
1048 }
1049
1050 /*
1051 * While we're here, check to see if any commands are stuck.
1052 * This is pretty low-priority, so it's ok if it doesn't
1053 * always fire.
1054 */
1055 if (retval == EWOULDBLOCK)
1056 aac_timeout(sc);
1057
1058 /* Check the hardware printf message buffer */
1059 if (sc->aac_common->ac_printf[0] != 0)
1060 aac_print_printf(sc);
1061
1062 /* Also check to see if the adapter has a command for us. */
1063 if (sc->flags & AAC_FLAGS_NEW_COMM)
1064 continue;
1065 for (;;) {
1066 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE,
1067 &fib_size, &fib))
1068 break;
1069
1070 AAC_PRINT_FIB(sc, fib);
1071
1072 switch (fib->Header.Command) {
1073 case AifRequest:
1074 aac_handle_aif(sc, fib);
1075 break;
1076 default:
1077 device_printf(sc->aac_dev, "unknown command "
1078 "from controller\n");
1079 break;
1080 }
1081
1082 if ((fib->Header.XferState == 0) ||
1083 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) {
1084 break;
1085 }
1086
1087 /* Return the AIF to the controller. */
1088 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) {
1089 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST;
1090 *(AAC_FSAStatus*)fib->data = ST_OK;
1091
1092 /* XXX Compute the Size field? */
1093 size = fib->Header.Size;
1094 if (size > sizeof(struct aac_fib)) {
1095 size = sizeof(struct aac_fib);
1096 fib->Header.Size = size;
1097 }
1098 /*
1099 * Since we did not generate this command, it
1100 * cannot go through the normal
1101 * enqueue->startio chain.
1102 */
1103 aac_enqueue_response(sc,
1104 AAC_ADAP_NORM_RESP_QUEUE,
1105 fib);
1106 }
1107 }
1108 }
1109 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1110 mtx_unlock(&sc->aac_io_lock);
1111 wakeup(sc->aac_dev);
1112
1113 kproc_exit(0);
1114 }
1115
1116 /*
1117 * Process completed commands.
1118 */
1119 static void
1120 aac_complete(void *context, int pending)
1121 {
1122 struct aac_softc *sc;
1123 struct aac_command *cm;
1124 struct aac_fib *fib;
1125 u_int32_t fib_size;
1126
1127 sc = (struct aac_softc *)context;
1128 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1129
1130 mtx_lock(&sc->aac_io_lock);
1131
1132 /* pull completed commands off the queue */
1133 for (;;) {
1134 /* look for completed FIBs on our queue */
1135 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size,
1136 &fib))
1137 break; /* nothing to do */
1138
1139 /* get the command, unmap and hand off for processing */
1140 cm = sc->aac_commands + fib->Header.SenderData;
1141 if (cm == NULL) {
1142 AAC_PRINT_FIB(sc, fib);
1143 break;
1144 }
1145 if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0)
1146 device_printf(sc->aac_dev,
1147 "COMMAND %p COMPLETED AFTER %d SECONDS\n",
1148 cm, (int)(time_uptime-cm->cm_timestamp));
1149
1150 aac_remove_busy(cm);
1151
1152 aac_unmap_command(cm);
1153 cm->cm_flags |= AAC_CMD_COMPLETED;
1154
1155 /* is there a completion handler? */
1156 if (cm->cm_complete != NULL) {
1157 cm->cm_complete(cm);
1158 } else {
1159 /* assume that someone is sleeping on this command */
1160 wakeup(cm);
1161 }
1162 }
1163
1164 /* see if we can start some more I/O */
1165 sc->flags &= ~AAC_QUEUE_FRZN;
1166 aac_startio(sc);
1167
1168 mtx_unlock(&sc->aac_io_lock);
1169 }
1170
1171 /*
1172 * Handle a bio submitted from a disk device.
1173 */
1174 void
1175 aac_submit_bio(struct bio *bp)
1176 {
1177 struct aac_disk *ad;
1178 struct aac_softc *sc;
1179
1180 ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1181 sc = ad->ad_controller;
1182 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1183
1184 /* queue the BIO and try to get some work done */
1185 aac_enqueue_bio(sc, bp);
1186 aac_startio(sc);
1187 }
1188
1189 /*
1190 * Get a bio and build a command to go with it.
1191 */
1192 static int
1193 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp)
1194 {
1195 struct aac_command *cm;
1196 struct aac_fib *fib;
1197 struct aac_disk *ad;
1198 struct bio *bp;
1199
1200 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1201
1202 /* get the resources we will need */
1203 cm = NULL;
1204 bp = NULL;
1205 if (aac_alloc_command(sc, &cm)) /* get a command */
1206 goto fail;
1207 if ((bp = aac_dequeue_bio(sc)) == NULL)
1208 goto fail;
1209
1210 /* fill out the command */
1211 cm->cm_datalen = bp->bio_bcount;
1212 cm->cm_complete = aac_bio_complete;
1213 cm->cm_flags = AAC_REQ_BIO;
1214 cm->cm_private = bp;
1215 cm->cm_timestamp = time_uptime;
1216
1217 /* build the FIB */
1218 fib = cm->cm_fib;
1219 fib->Header.Size = sizeof(struct aac_fib_header);
1220 fib->Header.XferState =
1221 AAC_FIBSTATE_HOSTOWNED |
1222 AAC_FIBSTATE_INITIALISED |
1223 AAC_FIBSTATE_EMPTY |
1224 AAC_FIBSTATE_FROMHOST |
1225 AAC_FIBSTATE_REXPECTED |
1226 AAC_FIBSTATE_NORM |
1227 AAC_FIBSTATE_ASYNC |
1228 AAC_FIBSTATE_FAST_RESPONSE;
1229
1230 /* build the read/write request */
1231 ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1232
1233 if (sc->flags & AAC_FLAGS_RAW_IO) {
1234 struct aac_raw_io *raw;
1235 raw = (struct aac_raw_io *)&fib->data[0];
1236 fib->Header.Command = RawIo;
1237 raw->BlockNumber = (u_int64_t)bp->bio_pblkno;
1238 raw->ByteCount = bp->bio_bcount;
1239 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1240 raw->BpTotal = 0;
1241 raw->BpComplete = 0;
1242 fib->Header.Size += sizeof(struct aac_raw_io);
1243 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw;
1244 if (bp->bio_cmd == BIO_READ) {
1245 raw->Flags = 1;
1246 cm->cm_flags |= AAC_CMD_DATAIN;
1247 } else {
1248 raw->Flags = 0;
1249 cm->cm_flags |= AAC_CMD_DATAOUT;
1250 }
1251 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1252 fib->Header.Command = ContainerCommand;
1253 if (bp->bio_cmd == BIO_READ) {
1254 struct aac_blockread *br;
1255 br = (struct aac_blockread *)&fib->data[0];
1256 br->Command = VM_CtBlockRead;
1257 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1258 br->BlockNumber = bp->bio_pblkno;
1259 br->ByteCount = bp->bio_bcount;
1260 fib->Header.Size += sizeof(struct aac_blockread);
1261 cm->cm_sgtable = &br->SgMap;
1262 cm->cm_flags |= AAC_CMD_DATAIN;
1263 } else {
1264 struct aac_blockwrite *bw;
1265 bw = (struct aac_blockwrite *)&fib->data[0];
1266 bw->Command = VM_CtBlockWrite;
1267 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1268 bw->BlockNumber = bp->bio_pblkno;
1269 bw->ByteCount = bp->bio_bcount;
1270 bw->Stable = CUNSTABLE;
1271 fib->Header.Size += sizeof(struct aac_blockwrite);
1272 cm->cm_flags |= AAC_CMD_DATAOUT;
1273 cm->cm_sgtable = &bw->SgMap;
1274 }
1275 } else {
1276 fib->Header.Command = ContainerCommand64;
1277 if (bp->bio_cmd == BIO_READ) {
1278 struct aac_blockread64 *br;
1279 br = (struct aac_blockread64 *)&fib->data[0];
1280 br->Command = VM_CtHostRead64;
1281 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1282 br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1283 br->BlockNumber = bp->bio_pblkno;
1284 br->Pad = 0;
1285 br->Flags = 0;
1286 fib->Header.Size += sizeof(struct aac_blockread64);
1287 cm->cm_flags |= AAC_CMD_DATAIN;
1288 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64;
1289 } else {
1290 struct aac_blockwrite64 *bw;
1291 bw = (struct aac_blockwrite64 *)&fib->data[0];
1292 bw->Command = VM_CtHostWrite64;
1293 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1294 bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1295 bw->BlockNumber = bp->bio_pblkno;
1296 bw->Pad = 0;
1297 bw->Flags = 0;
1298 fib->Header.Size += sizeof(struct aac_blockwrite64);
1299 cm->cm_flags |= AAC_CMD_DATAOUT;
1300 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64;
1301 }
1302 }
1303
1304 *cmp = cm;
1305 return(0);
1306
1307 fail:
1308 if (bp != NULL)
1309 aac_enqueue_bio(sc, bp);
1310 if (cm != NULL)
1311 aac_release_command(cm);
1312 return(ENOMEM);
1313 }
1314
1315 /*
1316 * Handle a bio-instigated command that has been completed.
1317 */
1318 static void
1319 aac_bio_complete(struct aac_command *cm)
1320 {
1321 struct aac_blockread_response *brr;
1322 struct aac_blockwrite_response *bwr;
1323 struct bio *bp;
1324 AAC_FSAStatus status;
1325
1326 /* fetch relevant status and then release the command */
1327 bp = (struct bio *)cm->cm_private;
1328 if (bp->bio_cmd == BIO_READ) {
1329 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0];
1330 status = brr->Status;
1331 } else {
1332 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0];
1333 status = bwr->Status;
1334 }
1335 aac_release_command(cm);
1336
1337 /* fix up the bio based on status */
1338 if (status == ST_OK) {
1339 bp->bio_resid = 0;
1340 } else {
1341 bp->bio_error = EIO;
1342 bp->bio_flags |= BIO_ERROR;
1343 }
1344 aac_biodone(bp);
1345 }
1346
1347 /*
1348 * Submit a command to the controller, return when it completes.
1349 * XXX This is very dangerous! If the card has gone out to lunch, we could
1350 * be stuck here forever. At the same time, signals are not caught
1351 * because there is a risk that a signal could wakeup the sleep before
1352 * the card has a chance to complete the command. Since there is no way
1353 * to cancel a command that is in progress, we can't protect against the
1354 * card completing a command late and spamming the command and data
1355 * memory. So, we are held hostage until the command completes.
1356 */
1357 static int
1358 aac_wait_command(struct aac_command *cm)
1359 {
1360 struct aac_softc *sc;
1361 int error;
1362
1363 sc = cm->cm_sc;
1364 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1365
1366 /* Put the command on the ready queue and get things going */
1367 aac_enqueue_ready(cm);
1368 aac_startio(sc);
1369 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0);
1370 return(error);
1371 }
1372
1373 /*
1374 *Command Buffer Management
1375 */
1376
1377 /*
1378 * Allocate a command.
1379 */
1380 int
1381 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1382 {
1383 struct aac_command *cm;
1384
1385 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1386
1387 if ((cm = aac_dequeue_free(sc)) == NULL) {
1388 if (sc->total_fibs < sc->aac_max_fibs) {
1389 mtx_lock(&sc->aac_io_lock);
1390 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1391 mtx_unlock(&sc->aac_io_lock);
1392 wakeup(sc->aifthread);
1393 }
1394 return (EBUSY);
1395 }
1396
1397 *cmp = cm;
1398 return(0);
1399 }
1400
1401 /*
1402 * Release a command back to the freelist.
1403 */
1404 void
1405 aac_release_command(struct aac_command *cm)
1406 {
1407 struct aac_event *event;
1408 struct aac_softc *sc;
1409
1410 sc = cm->cm_sc;
1411 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1412
1413 /* (re)initialize the command/FIB */
1414 cm->cm_datalen = 0;
1415 cm->cm_sgtable = NULL;
1416 cm->cm_flags = 0;
1417 cm->cm_complete = NULL;
1418 cm->cm_private = NULL;
1419 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1420 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1421 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1422 cm->cm_fib->Header.Flags = 0;
1423 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1424
1425 /*
1426 * These are duplicated in aac_start to cover the case where an
1427 * intermediate stage may have destroyed them. They're left
1428 * initialized here for debugging purposes only.
1429 */
1430 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1431 cm->cm_fib->Header.SenderData = 0;
1432
1433 aac_enqueue_free(cm);
1434
1435 if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1436 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1437 event->ev_callback(sc, event, event->ev_arg);
1438 }
1439 }
1440
1441 /*
1442 * Map helper for command/FIB allocation.
1443 */
1444 static void
1445 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1446 {
1447 uint64_t *fibphys;
1448
1449 fibphys = (uint64_t *)arg;
1450
1451 *fibphys = segs[0].ds_addr;
1452 }
1453
1454 /*
1455 * Allocate and initialize commands/FIBs for this adapter.
1456 */
1457 static int
1458 aac_alloc_commands(struct aac_softc *sc)
1459 {
1460 struct aac_command *cm;
1461 struct aac_fibmap *fm;
1462 uint64_t fibphys;
1463 int i, error;
1464
1465 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1466
1467 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1468 return (ENOMEM);
1469
1470 fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO);
1471 if (fm == NULL)
1472 return (ENOMEM);
1473
1474 /* allocate the FIBs in DMAable memory and load them */
1475 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1476 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1477 device_printf(sc->aac_dev,
1478 "Not enough contiguous memory available.\n");
1479 free(fm, M_AACBUF);
1480 return (ENOMEM);
1481 }
1482
1483 /* Ignore errors since this doesn't bounce */
1484 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1485 sc->aac_max_fibs_alloc * sc->aac_max_fib_size,
1486 aac_map_command_helper, &fibphys, 0);
1487
1488 /* initialize constant fields in the command structure */
1489 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size);
1490 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1491 cm = sc->aac_commands + sc->total_fibs;
1492 fm->aac_commands = cm;
1493 cm->cm_sc = sc;
1494 cm->cm_fib = (struct aac_fib *)
1495 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size);
1496 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size;
1497 cm->cm_index = sc->total_fibs;
1498
1499 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1500 &cm->cm_datamap)) != 0)
1501 break;
1502 mtx_lock(&sc->aac_io_lock);
1503 aac_release_command(cm);
1504 sc->total_fibs++;
1505 mtx_unlock(&sc->aac_io_lock);
1506 }
1507
1508 if (i > 0) {
1509 mtx_lock(&sc->aac_io_lock);
1510 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1511 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1512 mtx_unlock(&sc->aac_io_lock);
1513 return (0);
1514 }
1515
1516 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1517 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1518 free(fm, M_AACBUF);
1519 return (ENOMEM);
1520 }
1521
1522 /*
1523 * Free FIBs owned by this adapter.
1524 */
1525 static void
1526 aac_free_commands(struct aac_softc *sc)
1527 {
1528 struct aac_fibmap *fm;
1529 struct aac_command *cm;
1530 int i;
1531
1532 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1533
1534 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1535 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1536 /*
1537 * We check against total_fibs to handle partially
1538 * allocated blocks.
1539 */
1540 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1541 cm = fm->aac_commands + i;
1542 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1543 }
1544 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1545 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1546 free(fm, M_AACBUF);
1547 }
1548 }
1549
1550 /*
1551 * Command-mapping helper function - populate this command's s/g table.
1552 */
1553 static void
1554 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1555 {
1556 struct aac_softc *sc;
1557 struct aac_command *cm;
1558 struct aac_fib *fib;
1559 int i;
1560
1561 cm = (struct aac_command *)arg;
1562 sc = cm->cm_sc;
1563 fib = cm->cm_fib;
1564 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1565
1566 /* copy into the FIB */
1567 if (cm->cm_sgtable != NULL) {
1568 if (fib->Header.Command == RawIo) {
1569 struct aac_sg_tableraw *sg;
1570 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1571 sg->SgCount = nseg;
1572 for (i = 0; i < nseg; i++) {
1573 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1574 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1575 sg->SgEntryRaw[i].Next = 0;
1576 sg->SgEntryRaw[i].Prev = 0;
1577 sg->SgEntryRaw[i].Flags = 0;
1578 }
1579 /* update the FIB size for the s/g count */
1580 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1581 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1582 struct aac_sg_table *sg;
1583 sg = cm->cm_sgtable;
1584 sg->SgCount = nseg;
1585 for (i = 0; i < nseg; i++) {
1586 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1587 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1588 }
1589 /* update the FIB size for the s/g count */
1590 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1591 } else {
1592 struct aac_sg_table64 *sg;
1593 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1594 sg->SgCount = nseg;
1595 for (i = 0; i < nseg; i++) {
1596 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1597 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1598 }
1599 /* update the FIB size for the s/g count */
1600 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1601 }
1602 }
1603
1604 /* Fix up the address values in the FIB. Use the command array index
1605 * instead of a pointer since these fields are only 32 bits. Shift
1606 * the SenderFibAddress over to make room for the fast response bit
1607 * and for the AIF bit
1608 */
1609 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1610 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1611
1612 /* save a pointer to the command for speedy reverse-lookup */
1613 cm->cm_fib->Header.SenderData = cm->cm_index;
1614
1615 if (cm->cm_flags & AAC_CMD_DATAIN)
1616 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1617 BUS_DMASYNC_PREREAD);
1618 if (cm->cm_flags & AAC_CMD_DATAOUT)
1619 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1620 BUS_DMASYNC_PREWRITE);
1621 cm->cm_flags |= AAC_CMD_MAPPED;
1622
1623 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1624 int count = 10000000L;
1625 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1626 if (--count == 0) {
1627 aac_unmap_command(cm);
1628 sc->flags |= AAC_QUEUE_FRZN;
1629 aac_requeue_ready(cm);
1630 }
1631 DELAY(5); /* wait 5 usec. */
1632 }
1633 } else {
1634 /* Put the FIB on the outbound queue */
1635 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) {
1636 aac_unmap_command(cm);
1637 sc->flags |= AAC_QUEUE_FRZN;
1638 aac_requeue_ready(cm);
1639 }
1640 }
1641 }
1642
1643 /*
1644 * Unmap a command from controller-visible space.
1645 */
1646 static void
1647 aac_unmap_command(struct aac_command *cm)
1648 {
1649 struct aac_softc *sc;
1650
1651 sc = cm->cm_sc;
1652 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1653
1654 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1655 return;
1656
1657 if (cm->cm_datalen != 0) {
1658 if (cm->cm_flags & AAC_CMD_DATAIN)
1659 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1660 BUS_DMASYNC_POSTREAD);
1661 if (cm->cm_flags & AAC_CMD_DATAOUT)
1662 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1663 BUS_DMASYNC_POSTWRITE);
1664
1665 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1666 }
1667 cm->cm_flags &= ~AAC_CMD_MAPPED;
1668 }
1669
1670 /*
1671 * Hardware Interface
1672 */
1673
1674 /*
1675 * Initialize the adapter.
1676 */
1677 static void
1678 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1679 {
1680 struct aac_softc *sc;
1681
1682 sc = (struct aac_softc *)arg;
1683 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1684
1685 sc->aac_common_busaddr = segs[0].ds_addr;
1686 }
1687
1688 static int
1689 aac_check_firmware(struct aac_softc *sc)
1690 {
1691 u_int32_t code, major, minor, options = 0, atu_size = 0;
1692 int rid, status;
1693 time_t then;
1694
1695 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1696 /*
1697 * Wait for the adapter to come ready.
1698 */
1699 then = time_uptime;
1700 do {
1701 code = AAC_GET_FWSTATUS(sc);
1702 if (code & AAC_SELF_TEST_FAILED) {
1703 device_printf(sc->aac_dev, "FATAL: selftest failed\n");
1704 return(ENXIO);
1705 }
1706 if (code & AAC_KERNEL_PANIC) {
1707 device_printf(sc->aac_dev,
1708 "FATAL: controller kernel panic");
1709 return(ENXIO);
1710 }
1711 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1712 device_printf(sc->aac_dev,
1713 "FATAL: controller not coming ready, "
1714 "status %x\n", code);
1715 return(ENXIO);
1716 }
1717 } while (!(code & AAC_UP_AND_RUNNING));
1718
1719 /*
1720 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1721 * firmware version 1.x are not compatible with this driver.
1722 */
1723 if (sc->flags & AAC_FLAGS_PERC2QC) {
1724 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1725 NULL)) {
1726 device_printf(sc->aac_dev,
1727 "Error reading firmware version\n");
1728 return (EIO);
1729 }
1730
1731 /* These numbers are stored as ASCII! */
1732 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1733 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1734 if (major == 1) {
1735 device_printf(sc->aac_dev,
1736 "Firmware version %d.%d is not supported.\n",
1737 major, minor);
1738 return (EINVAL);
1739 }
1740 }
1741
1742 /*
1743 * Retrieve the capabilities/supported options word so we know what
1744 * work-arounds to enable. Some firmware revs don't support this
1745 * command.
1746 */
1747 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) {
1748 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1749 device_printf(sc->aac_dev,
1750 "RequestAdapterInfo failed\n");
1751 return (EIO);
1752 }
1753 } else {
1754 options = AAC_GET_MAILBOX(sc, 1);
1755 atu_size = AAC_GET_MAILBOX(sc, 2);
1756 sc->supported_options = options;
1757
1758 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1759 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1760 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1761 if (options & AAC_SUPPORTED_NONDASD)
1762 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1763 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1764 && (sizeof(bus_addr_t) > 4)) {
1765 device_printf(sc->aac_dev,
1766 "Enabling 64-bit address support\n");
1767 sc->flags |= AAC_FLAGS_SG_64BIT;
1768 }
1769 if ((options & AAC_SUPPORTED_NEW_COMM)
1770 && sc->aac_if->aif_send_command)
1771 sc->flags |= AAC_FLAGS_NEW_COMM;
1772 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1773 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1774 }
1775
1776 /* Check for broken hardware that does a lower number of commands */
1777 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1778
1779 /* Remap mem. resource, if required */
1780 if ((sc->flags & AAC_FLAGS_NEW_COMM) &&
1781 atu_size > rman_get_size(sc->aac_regs_res1)) {
1782 rid = rman_get_rid(sc->aac_regs_res1);
1783 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid,
1784 sc->aac_regs_res1);
1785 sc->aac_regs_res1 = bus_alloc_resource_anywhere(sc->aac_dev,
1786 SYS_RES_MEMORY, &rid, atu_size, RF_ACTIVE);
1787 if (sc->aac_regs_res1 == NULL) {
1788 sc->aac_regs_res1 = bus_alloc_resource_any(
1789 sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1790 if (sc->aac_regs_res1 == NULL) {
1791 device_printf(sc->aac_dev,
1792 "couldn't allocate register window\n");
1793 return (ENXIO);
1794 }
1795 sc->flags &= ~AAC_FLAGS_NEW_COMM;
1796 }
1797 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1);
1798 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1);
1799
1800 if (sc->aac_hwif == AAC_HWIF_NARK) {
1801 sc->aac_regs_res0 = sc->aac_regs_res1;
1802 sc->aac_btag0 = sc->aac_btag1;
1803 sc->aac_bhandle0 = sc->aac_bhandle1;
1804 }
1805 }
1806
1807 /* Read preferred settings */
1808 sc->aac_max_fib_size = sizeof(struct aac_fib);
1809 sc->aac_max_sectors = 128; /* 64KB */
1810 if (sc->flags & AAC_FLAGS_SG_64BIT)
1811 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1812 - sizeof(struct aac_blockwrite64))
1813 / sizeof(struct aac_sg_entry64);
1814 else
1815 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1816 - sizeof(struct aac_blockwrite))
1817 / sizeof(struct aac_sg_entry);
1818
1819 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) {
1820 options = AAC_GET_MAILBOX(sc, 1);
1821 sc->aac_max_fib_size = (options & 0xFFFF);
1822 sc->aac_max_sectors = (options >> 16) << 1;
1823 options = AAC_GET_MAILBOX(sc, 2);
1824 sc->aac_sg_tablesize = (options >> 16);
1825 options = AAC_GET_MAILBOX(sc, 3);
1826 sc->aac_max_fibs = (options & 0xFFFF);
1827 }
1828 if (sc->aac_max_fib_size > PAGE_SIZE)
1829 sc->aac_max_fib_size = PAGE_SIZE;
1830 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size;
1831
1832 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1833 sc->flags |= AAC_FLAGS_RAW_IO;
1834 device_printf(sc->aac_dev, "Enable Raw I/O\n");
1835 }
1836 if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1837 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1838 sc->flags |= AAC_FLAGS_LBA_64BIT;
1839 device_printf(sc->aac_dev, "Enable 64-bit array\n");
1840 }
1841
1842 return (0);
1843 }
1844
1845 static int
1846 aac_init(struct aac_softc *sc)
1847 {
1848 struct aac_adapter_init *ip;
1849 u_int32_t qoffset;
1850 int error;
1851
1852 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1853
1854 /*
1855 * Fill in the init structure. This tells the adapter about the
1856 * physical location of various important shared data structures.
1857 */
1858 ip = &sc->aac_common->ac_init;
1859 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1860 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1861 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1862 sc->flags |= AAC_FLAGS_RAW_IO;
1863 }
1864 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION;
1865
1866 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1867 offsetof(struct aac_common, ac_fibs);
1868 ip->AdapterFibsVirtualAddress = 0;
1869 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1870 ip->AdapterFibAlign = sizeof(struct aac_fib);
1871
1872 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1873 offsetof(struct aac_common, ac_printf);
1874 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1875
1876 /*
1877 * The adapter assumes that pages are 4K in size, except on some
1878 * broken firmware versions that do the page->byte conversion twice,
1879 * therefore 'assuming' that this value is in 16MB units (2^24).
1880 * Round up since the granularity is so high.
1881 */
1882 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1883 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1884 ip->HostPhysMemPages =
1885 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1886 }
1887 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1888
1889 ip->InitFlags = 0;
1890 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1891 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1892 device_printf(sc->aac_dev, "New comm. interface enabled\n");
1893 }
1894
1895 ip->MaxIoCommands = sc->aac_max_fibs;
1896 ip->MaxIoSize = sc->aac_max_sectors << 9;
1897 ip->MaxFibSize = sc->aac_max_fib_size;
1898
1899 /*
1900 * Initialize FIB queues. Note that it appears that the layout of the
1901 * indexes and the segmentation of the entries may be mandated by the
1902 * adapter, which is only told about the base of the queue index fields.
1903 *
1904 * The initial values of the indices are assumed to inform the adapter
1905 * of the sizes of the respective queues, and theoretically it could
1906 * work out the entire layout of the queue structures from this. We
1907 * take the easy route and just lay this area out like everyone else
1908 * does.
1909 *
1910 * The Linux driver uses a much more complex scheme whereby several
1911 * header records are kept for each queue. We use a couple of generic
1912 * list manipulation functions which 'know' the size of each list by
1913 * virtue of a table.
1914 */
1915 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN;
1916 qoffset &= ~(AAC_QUEUE_ALIGN - 1);
1917 sc->aac_queues =
1918 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset);
1919 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset;
1920
1921 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1922 AAC_HOST_NORM_CMD_ENTRIES;
1923 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1924 AAC_HOST_NORM_CMD_ENTRIES;
1925 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1926 AAC_HOST_HIGH_CMD_ENTRIES;
1927 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1928 AAC_HOST_HIGH_CMD_ENTRIES;
1929 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1930 AAC_ADAP_NORM_CMD_ENTRIES;
1931 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1932 AAC_ADAP_NORM_CMD_ENTRIES;
1933 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1934 AAC_ADAP_HIGH_CMD_ENTRIES;
1935 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1936 AAC_ADAP_HIGH_CMD_ENTRIES;
1937 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1938 AAC_HOST_NORM_RESP_ENTRIES;
1939 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1940 AAC_HOST_NORM_RESP_ENTRIES;
1941 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1942 AAC_HOST_HIGH_RESP_ENTRIES;
1943 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1944 AAC_HOST_HIGH_RESP_ENTRIES;
1945 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1946 AAC_ADAP_NORM_RESP_ENTRIES;
1947 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1948 AAC_ADAP_NORM_RESP_ENTRIES;
1949 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1950 AAC_ADAP_HIGH_RESP_ENTRIES;
1951 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1952 AAC_ADAP_HIGH_RESP_ENTRIES;
1953 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] =
1954 &sc->aac_queues->qt_HostNormCmdQueue[0];
1955 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] =
1956 &sc->aac_queues->qt_HostHighCmdQueue[0];
1957 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] =
1958 &sc->aac_queues->qt_AdapNormCmdQueue[0];
1959 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] =
1960 &sc->aac_queues->qt_AdapHighCmdQueue[0];
1961 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] =
1962 &sc->aac_queues->qt_HostNormRespQueue[0];
1963 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] =
1964 &sc->aac_queues->qt_HostHighRespQueue[0];
1965 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] =
1966 &sc->aac_queues->qt_AdapNormRespQueue[0];
1967 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] =
1968 &sc->aac_queues->qt_AdapHighRespQueue[0];
1969
1970 /*
1971 * Do controller-type-specific initialisation
1972 */
1973 switch (sc->aac_hwif) {
1974 case AAC_HWIF_I960RX:
1975 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0);
1976 break;
1977 case AAC_HWIF_RKT:
1978 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0);
1979 break;
1980 default:
1981 break;
1982 }
1983
1984 /*
1985 * Give the init structure to the controller.
1986 */
1987 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT,
1988 sc->aac_common_busaddr +
1989 offsetof(struct aac_common, ac_init), 0, 0, 0,
1990 NULL)) {
1991 device_printf(sc->aac_dev,
1992 "error establishing init structure\n");
1993 error = EIO;
1994 goto out;
1995 }
1996
1997 error = 0;
1998 out:
1999 return(error);
2000 }
2001
2002 static int
2003 aac_setup_intr(struct aac_softc *sc)
2004 {
2005
2006 if (sc->flags & AAC_FLAGS_NEW_COMM) {
2007 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2008 INTR_MPSAFE|INTR_TYPE_BIO, NULL,
2009 aac_new_intr, sc, &sc->aac_intr)) {
2010 device_printf(sc->aac_dev, "can't set up interrupt\n");
2011 return (EINVAL);
2012 }
2013 } else {
2014 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2015 INTR_TYPE_BIO, aac_filter, NULL,
2016 sc, &sc->aac_intr)) {
2017 device_printf(sc->aac_dev,
2018 "can't set up interrupt filter\n");
2019 return (EINVAL);
2020 }
2021 }
2022 return (0);
2023 }
2024
2025 /*
2026 * Send a synchronous command to the controller and wait for a result.
2027 * Indicate if the controller completed the command with an error status.
2028 */
2029 static int
2030 aac_sync_command(struct aac_softc *sc, u_int32_t command,
2031 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2032 u_int32_t *sp)
2033 {
2034 time_t then;
2035 u_int32_t status;
2036
2037 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2038
2039 /* populate the mailbox */
2040 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2041
2042 /* ensure the sync command doorbell flag is cleared */
2043 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2044
2045 /* then set it to signal the adapter */
2046 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2047
2048 /* spin waiting for the command to complete */
2049 then = time_uptime;
2050 do {
2051 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) {
2052 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2053 return(EIO);
2054 }
2055 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2056
2057 /* clear the completion flag */
2058 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2059
2060 /* get the command status */
2061 status = AAC_GET_MAILBOX(sc, 0);
2062 if (sp != NULL)
2063 *sp = status;
2064
2065 if (status != AAC_SRB_STS_SUCCESS)
2066 return (-1);
2067 return(0);
2068 }
2069
2070 int
2071 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2072 struct aac_fib *fib, u_int16_t datasize)
2073 {
2074 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2075 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2076
2077 if (datasize > AAC_FIB_DATASIZE)
2078 return(EINVAL);
2079
2080 /*
2081 * Set up the sync FIB
2082 */
2083 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2084 AAC_FIBSTATE_INITIALISED |
2085 AAC_FIBSTATE_EMPTY;
2086 fib->Header.XferState |= xferstate;
2087 fib->Header.Command = command;
2088 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2089 fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2090 fib->Header.SenderSize = sizeof(struct aac_fib);
2091 fib->Header.SenderFibAddress = 0; /* Not needed */
2092 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr +
2093 offsetof(struct aac_common,
2094 ac_sync_fib);
2095
2096 /*
2097 * Give the FIB to the controller, wait for a response.
2098 */
2099 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB,
2100 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) {
2101 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2102 return(EIO);
2103 }
2104
2105 return (0);
2106 }
2107
2108 /*
2109 * Adapter-space FIB queue manipulation
2110 *
2111 * Note that the queue implementation here is a little funky; neither the PI or
2112 * CI will ever be zero. This behaviour is a controller feature.
2113 */
2114 static const struct {
2115 int size;
2116 int notify;
2117 } aac_qinfo[] = {
2118 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
2119 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
2120 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
2121 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
2122 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
2123 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
2124 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
2125 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
2126 };
2127
2128 /*
2129 * Atomically insert an entry into the nominated queue, returns 0 on success or
2130 * EBUSY if the queue is full.
2131 *
2132 * Note: it would be more efficient to defer notifying the controller in
2133 * the case where we may be inserting several entries in rapid succession,
2134 * but implementing this usefully may be difficult (it would involve a
2135 * separate queue/notify interface).
2136 */
2137 static int
2138 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
2139 {
2140 u_int32_t pi, ci;
2141 int error;
2142 u_int32_t fib_size;
2143 u_int32_t fib_addr;
2144
2145 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2146
2147 fib_size = cm->cm_fib->Header.Size;
2148 fib_addr = cm->cm_fib->Header.ReceiverFibAddress;
2149
2150 /* get the producer/consumer indices */
2151 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2152 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2153
2154 /* wrap the queue? */
2155 if (pi >= aac_qinfo[queue].size)
2156 pi = 0;
2157
2158 /* check for queue full */
2159 if ((pi + 1) == ci) {
2160 error = EBUSY;
2161 goto out;
2162 }
2163
2164 /*
2165 * To avoid a race with its completion interrupt, place this command on
2166 * the busy queue prior to advertising it to the controller.
2167 */
2168 aac_enqueue_busy(cm);
2169
2170 /* populate queue entry */
2171 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2172 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2173
2174 /* update producer index */
2175 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2176
2177 /* notify the adapter if we know how */
2178 if (aac_qinfo[queue].notify != 0)
2179 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2180
2181 error = 0;
2182
2183 out:
2184 return(error);
2185 }
2186
2187 /*
2188 * Atomically remove one entry from the nominated queue, returns 0 on
2189 * success or ENOENT if the queue is empty.
2190 */
2191 static int
2192 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
2193 struct aac_fib **fib_addr)
2194 {
2195 u_int32_t pi, ci;
2196 u_int32_t fib_index;
2197 int error;
2198 int notify;
2199
2200 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2201
2202 /* get the producer/consumer indices */
2203 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2204 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2205
2206 /* check for queue empty */
2207 if (ci == pi) {
2208 error = ENOENT;
2209 goto out;
2210 }
2211
2212 /* wrap the pi so the following test works */
2213 if (pi >= aac_qinfo[queue].size)
2214 pi = 0;
2215
2216 notify = 0;
2217 if (ci == pi + 1)
2218 notify++;
2219
2220 /* wrap the queue? */
2221 if (ci >= aac_qinfo[queue].size)
2222 ci = 0;
2223
2224 /* fetch the entry */
2225 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
2226
2227 switch (queue) {
2228 case AAC_HOST_NORM_CMD_QUEUE:
2229 case AAC_HOST_HIGH_CMD_QUEUE:
2230 /*
2231 * The aq_fib_addr is only 32 bits wide so it can't be counted
2232 * on to hold an address. For AIF's, the adapter assumes
2233 * that it's giving us an address into the array of AIF fibs.
2234 * Therefore, we have to convert it to an index.
2235 */
2236 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
2237 sizeof(struct aac_fib);
2238 *fib_addr = &sc->aac_common->ac_fibs[fib_index];
2239 break;
2240
2241 case AAC_HOST_NORM_RESP_QUEUE:
2242 case AAC_HOST_HIGH_RESP_QUEUE:
2243 {
2244 struct aac_command *cm;
2245
2246 /*
2247 * As above, an index is used instead of an actual address.
2248 * Gotta shift the index to account for the fast response
2249 * bit. No other correction is needed since this value was
2250 * originally provided by the driver via the SenderFibAddress
2251 * field.
2252 */
2253 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
2254 cm = sc->aac_commands + (fib_index >> 2);
2255 *fib_addr = cm->cm_fib;
2256
2257 /*
2258 * Is this a fast response? If it is, update the fib fields in
2259 * local memory since the whole fib isn't DMA'd back up.
2260 */
2261 if (fib_index & 0x01) {
2262 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP;
2263 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL;
2264 }
2265 break;
2266 }
2267 default:
2268 panic("Invalid queue in aac_dequeue_fib()");
2269 break;
2270 }
2271
2272 /* update consumer index */
2273 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
2274
2275 /* if we have made the queue un-full, notify the adapter */
2276 if (notify && (aac_qinfo[queue].notify != 0))
2277 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2278 error = 0;
2279
2280 out:
2281 return(error);
2282 }
2283
2284 /*
2285 * Put our response to an Adapter Initialed Fib on the response queue
2286 */
2287 static int
2288 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
2289 {
2290 u_int32_t pi, ci;
2291 int error;
2292 u_int32_t fib_size;
2293 u_int32_t fib_addr;
2294
2295 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2296
2297 /* Tell the adapter where the FIB is */
2298 fib_size = fib->Header.Size;
2299 fib_addr = fib->Header.SenderFibAddress;
2300 fib->Header.ReceiverFibAddress = fib_addr;
2301
2302 /* get the producer/consumer indices */
2303 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2304 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2305
2306 /* wrap the queue? */
2307 if (pi >= aac_qinfo[queue].size)
2308 pi = 0;
2309
2310 /* check for queue full */
2311 if ((pi + 1) == ci) {
2312 error = EBUSY;
2313 goto out;
2314 }
2315
2316 /* populate queue entry */
2317 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2318 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2319
2320 /* update producer index */
2321 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2322
2323 /* notify the adapter if we know how */
2324 if (aac_qinfo[queue].notify != 0)
2325 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2326
2327 error = 0;
2328
2329 out:
2330 return(error);
2331 }
2332
2333 /*
2334 * Check for commands that have been outstanding for a suspiciously long time,
2335 * and complain about them.
2336 */
2337 static void
2338 aac_timeout(struct aac_softc *sc)
2339 {
2340 struct aac_command *cm;
2341 time_t deadline;
2342 int timedout, code;
2343
2344 /*
2345 * Traverse the busy command list, bitch about late commands once
2346 * only.
2347 */
2348 timedout = 0;
2349 deadline = time_uptime - AAC_CMD_TIMEOUT;
2350 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2351 if ((cm->cm_timestamp < deadline)
2352 && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) {
2353 cm->cm_flags |= AAC_CMD_TIMEDOUT;
2354 device_printf(sc->aac_dev,
2355 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n",
2356 cm, cm->cm_fib->Header.Command,
2357 (int)(time_uptime-cm->cm_timestamp));
2358 AAC_PRINT_FIB(sc, cm->cm_fib);
2359 timedout++;
2360 }
2361 }
2362
2363 if (timedout) {
2364 code = AAC_GET_FWSTATUS(sc);
2365 if (code != AAC_UP_AND_RUNNING) {
2366 device_printf(sc->aac_dev, "WARNING! Controller is no "
2367 "longer running! code= 0x%x\n", code);
2368 }
2369 }
2370 }
2371
2372 /*
2373 * Interface Function Vectors
2374 */
2375
2376 /*
2377 * Read the current firmware status word.
2378 */
2379 static int
2380 aac_sa_get_fwstatus(struct aac_softc *sc)
2381 {
2382 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2383
2384 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS));
2385 }
2386
2387 static int
2388 aac_rx_get_fwstatus(struct aac_softc *sc)
2389 {
2390 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2391
2392 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2393 AAC_RX_OMR0 : AAC_RX_FWSTATUS));
2394 }
2395
2396 static int
2397 aac_rkt_get_fwstatus(struct aac_softc *sc)
2398 {
2399 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2400
2401 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2402 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS));
2403 }
2404
2405 /*
2406 * Notify the controller of a change in a given queue
2407 */
2408
2409 static void
2410 aac_sa_qnotify(struct aac_softc *sc, int qbit)
2411 {
2412 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2413
2414 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit);
2415 }
2416
2417 static void
2418 aac_rx_qnotify(struct aac_softc *sc, int qbit)
2419 {
2420 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2421
2422 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit);
2423 }
2424
2425 static void
2426 aac_rkt_qnotify(struct aac_softc *sc, int qbit)
2427 {
2428 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2429
2430 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit);
2431 }
2432
2433 /*
2434 * Get the interrupt reason bits
2435 */
2436 static int
2437 aac_sa_get_istatus(struct aac_softc *sc)
2438 {
2439 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2440
2441 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0));
2442 }
2443
2444 static int
2445 aac_rx_get_istatus(struct aac_softc *sc)
2446 {
2447 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2448
2449 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR));
2450 }
2451
2452 static int
2453 aac_rkt_get_istatus(struct aac_softc *sc)
2454 {
2455 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2456
2457 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR));
2458 }
2459
2460 /*
2461 * Clear some interrupt reason bits
2462 */
2463 static void
2464 aac_sa_clear_istatus(struct aac_softc *sc, int mask)
2465 {
2466 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2467
2468 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask);
2469 }
2470
2471 static void
2472 aac_rx_clear_istatus(struct aac_softc *sc, int mask)
2473 {
2474 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2475
2476 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask);
2477 }
2478
2479 static void
2480 aac_rkt_clear_istatus(struct aac_softc *sc, int mask)
2481 {
2482 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2483
2484 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask);
2485 }
2486
2487 /*
2488 * Populate the mailbox and set the command word
2489 */
2490 static void
2491 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2492 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2493 {
2494 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2495
2496 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command);
2497 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0);
2498 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1);
2499 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2);
2500 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3);
2501 }
2502
2503 static void
2504 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
2505 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2506 {
2507 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2508
2509 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command);
2510 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0);
2511 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1);
2512 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2);
2513 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3);
2514 }
2515
2516 static void
2517 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2518 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2519 {
2520 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2521
2522 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command);
2523 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0);
2524 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1);
2525 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2);
2526 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3);
2527 }
2528
2529 /*
2530 * Fetch the immediate command status word
2531 */
2532 static int
2533 aac_sa_get_mailbox(struct aac_softc *sc, int mb)
2534 {
2535 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2536
2537 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4)));
2538 }
2539
2540 static int
2541 aac_rx_get_mailbox(struct aac_softc *sc, int mb)
2542 {
2543 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2544
2545 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4)));
2546 }
2547
2548 static int
2549 aac_rkt_get_mailbox(struct aac_softc *sc, int mb)
2550 {
2551 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2552
2553 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4)));
2554 }
2555
2556 /*
2557 * Set/clear interrupt masks
2558 */
2559 static void
2560 aac_sa_set_interrupts(struct aac_softc *sc, int enable)
2561 {
2562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2563
2564 if (enable) {
2565 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2566 } else {
2567 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0);
2568 }
2569 }
2570
2571 static void
2572 aac_rx_set_interrupts(struct aac_softc *sc, int enable)
2573 {
2574 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2575
2576 if (enable) {
2577 if (sc->flags & AAC_FLAGS_NEW_COMM)
2578 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM);
2579 else
2580 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS);
2581 } else {
2582 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0);
2583 }
2584 }
2585
2586 static void
2587 aac_rkt_set_interrupts(struct aac_softc *sc, int enable)
2588 {
2589 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2590
2591 if (enable) {
2592 if (sc->flags & AAC_FLAGS_NEW_COMM)
2593 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM);
2594 else
2595 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS);
2596 } else {
2597 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0);
2598 }
2599 }
2600
2601 /*
2602 * New comm. interface: Send command functions
2603 */
2604 static int
2605 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm)
2606 {
2607 u_int32_t index, device;
2608
2609 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2610
2611 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2612 if (index == 0xffffffffL)
2613 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2614 if (index == 0xffffffffL)
2615 return index;
2616 aac_enqueue_busy(cm);
2617 device = index;
2618 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2619 device += 4;
2620 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2621 device += 4;
2622 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2623 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index);
2624 return 0;
2625 }
2626
2627 static int
2628 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm)
2629 {
2630 u_int32_t index, device;
2631
2632 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2633
2634 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2635 if (index == 0xffffffffL)
2636 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2637 if (index == 0xffffffffL)
2638 return index;
2639 aac_enqueue_busy(cm);
2640 device = index;
2641 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2642 device += 4;
2643 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2644 device += 4;
2645 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2646 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index);
2647 return 0;
2648 }
2649
2650 /*
2651 * New comm. interface: get, set outbound queue index
2652 */
2653 static int
2654 aac_rx_get_outb_queue(struct aac_softc *sc)
2655 {
2656 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2657
2658 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE));
2659 }
2660
2661 static int
2662 aac_rkt_get_outb_queue(struct aac_softc *sc)
2663 {
2664 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2665
2666 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE));
2667 }
2668
2669 static void
2670 aac_rx_set_outb_queue(struct aac_softc *sc, int index)
2671 {
2672 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2673
2674 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index);
2675 }
2676
2677 static void
2678 aac_rkt_set_outb_queue(struct aac_softc *sc, int index)
2679 {
2680 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2681
2682 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index);
2683 }
2684
2685 /*
2686 * Debugging and Diagnostics
2687 */
2688
2689 /*
2690 * Print some information about the controller.
2691 */
2692 static void
2693 aac_describe_controller(struct aac_softc *sc)
2694 {
2695 struct aac_fib *fib;
2696 struct aac_adapter_info *info;
2697 char *adapter_type = "Adaptec RAID controller";
2698
2699 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2700
2701 mtx_lock(&sc->aac_io_lock);
2702 aac_alloc_sync_fib(sc, &fib);
2703
2704 fib->data[0] = 0;
2705 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2706 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2707 aac_release_sync_fib(sc);
2708 mtx_unlock(&sc->aac_io_lock);
2709 return;
2710 }
2711
2712 /* save the kernel revision structure for later use */
2713 info = (struct aac_adapter_info *)&fib->data[0];
2714 sc->aac_revision = info->KernelRevision;
2715
2716 if (bootverbose) {
2717 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2718 "(%dMB cache, %dMB execution), %s\n",
2719 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2720 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2721 info->BufferMem / (1024 * 1024),
2722 info->ExecutionMem / (1024 * 1024),
2723 aac_describe_code(aac_battery_platform,
2724 info->batteryPlatform));
2725
2726 device_printf(sc->aac_dev,
2727 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2728 info->KernelRevision.external.comp.major,
2729 info->KernelRevision.external.comp.minor,
2730 info->KernelRevision.external.comp.dash,
2731 info->KernelRevision.buildNumber,
2732 (u_int32_t)(info->SerialNumber & 0xffffff));
2733
2734 device_printf(sc->aac_dev, "Supported Options=%b\n",
2735 sc->supported_options,
2736 "\2"
2737 "\1SNAPSHOT"
2738 "\2CLUSTERS"
2739 "\3WCACHE"
2740 "\4DATA64"
2741 "\5HOSTTIME"
2742 "\6RAID50"
2743 "\7WINDOW4GB"
2744 "\10SCSIUPGD"
2745 "\11SOFTERR"
2746 "\12NORECOND"
2747 "\13SGMAP64"
2748 "\14ALARM"
2749 "\15NONDASD"
2750 "\16SCSIMGT"
2751 "\17RAIDSCSI"
2752 "\21ADPTINFO"
2753 "\22NEWCOMM"
2754 "\23ARRAY64BIT"
2755 "\24HEATSENSOR");
2756 }
2757
2758 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2759 fib->data[0] = 0;
2760 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2761 device_printf(sc->aac_dev,
2762 "RequestSupplementAdapterInfo failed\n");
2763 else
2764 adapter_type = ((struct aac_supplement_adapter_info *)
2765 &fib->data[0])->AdapterTypeText;
2766 }
2767 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n",
2768 adapter_type,
2769 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2770 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2771
2772 aac_release_sync_fib(sc);
2773 mtx_unlock(&sc->aac_io_lock);
2774 }
2775
2776 /*
2777 * Look up a text description of a numeric error code and return a pointer to
2778 * same.
2779 */
2780 static const char *
2781 aac_describe_code(const struct aac_code_lookup *table, u_int32_t code)
2782 {
2783 int i;
2784
2785 for (i = 0; table[i].string != NULL; i++)
2786 if (table[i].code == code)
2787 return(table[i].string);
2788 return(table[i + 1].string);
2789 }
2790
2791 /*
2792 * Management Interface
2793 */
2794
2795 static int
2796 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2797 {
2798 struct aac_softc *sc;
2799
2800 sc = dev->si_drv1;
2801 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2802 device_busy(sc->aac_dev);
2803 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2804
2805 return 0;
2806 }
2807
2808 static int
2809 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2810 {
2811 union aac_statrequest *as;
2812 struct aac_softc *sc;
2813 int error = 0;
2814
2815 as = (union aac_statrequest *)arg;
2816 sc = dev->si_drv1;
2817 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2818
2819 switch (cmd) {
2820 case AACIO_STATS:
2821 switch (as->as_item) {
2822 case AACQ_FREE:
2823 case AACQ_BIO:
2824 case AACQ_READY:
2825 case AACQ_BUSY:
2826 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2827 sizeof(struct aac_qstat));
2828 break;
2829 default:
2830 error = ENOENT;
2831 break;
2832 }
2833 break;
2834
2835 case FSACTL_SENDFIB:
2836 case FSACTL_SEND_LARGE_FIB:
2837 arg = *(caddr_t*)arg;
2838 case FSACTL_LNX_SENDFIB:
2839 case FSACTL_LNX_SEND_LARGE_FIB:
2840 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2841 error = aac_ioctl_sendfib(sc, arg);
2842 break;
2843 case FSACTL_SEND_RAW_SRB:
2844 arg = *(caddr_t*)arg;
2845 case FSACTL_LNX_SEND_RAW_SRB:
2846 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2847 error = aac_ioctl_send_raw_srb(sc, arg);
2848 break;
2849 case FSACTL_AIF_THREAD:
2850 case FSACTL_LNX_AIF_THREAD:
2851 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2852 error = EINVAL;
2853 break;
2854 case FSACTL_OPEN_GET_ADAPTER_FIB:
2855 arg = *(caddr_t*)arg;
2856 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2857 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2858 error = aac_open_aif(sc, arg);
2859 break;
2860 case FSACTL_GET_NEXT_ADAPTER_FIB:
2861 arg = *(caddr_t*)arg;
2862 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2863 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2864 error = aac_getnext_aif(sc, arg);
2865 break;
2866 case FSACTL_CLOSE_GET_ADAPTER_FIB:
2867 arg = *(caddr_t*)arg;
2868 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2869 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2870 error = aac_close_aif(sc, arg);
2871 break;
2872 case FSACTL_MINIPORT_REV_CHECK:
2873 arg = *(caddr_t*)arg;
2874 case FSACTL_LNX_MINIPORT_REV_CHECK:
2875 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2876 error = aac_rev_check(sc, arg);
2877 break;
2878 case FSACTL_QUERY_DISK:
2879 arg = *(caddr_t*)arg;
2880 case FSACTL_LNX_QUERY_DISK:
2881 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2882 error = aac_query_disk(sc, arg);
2883 break;
2884 case FSACTL_DELETE_DISK:
2885 case FSACTL_LNX_DELETE_DISK:
2886 /*
2887 * We don't trust the underland to tell us when to delete a
2888 * container, rather we rely on an AIF coming from the
2889 * controller
2890 */
2891 error = 0;
2892 break;
2893 case FSACTL_GET_PCI_INFO:
2894 arg = *(caddr_t*)arg;
2895 case FSACTL_LNX_GET_PCI_INFO:
2896 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2897 error = aac_get_pci_info(sc, arg);
2898 break;
2899 case FSACTL_GET_FEATURES:
2900 arg = *(caddr_t*)arg;
2901 case FSACTL_LNX_GET_FEATURES:
2902 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2903 error = aac_supported_features(sc, arg);
2904 break;
2905 default:
2906 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2907 error = EINVAL;
2908 break;
2909 }
2910 return(error);
2911 }
2912
2913 static int
2914 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2915 {
2916 struct aac_softc *sc;
2917 struct aac_fib_context *ctx;
2918 int revents;
2919
2920 sc = dev->si_drv1;
2921 revents = 0;
2922
2923 mtx_lock(&sc->aac_aifq_lock);
2924 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2925 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2926 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2927 revents |= poll_events & (POLLIN | POLLRDNORM);
2928 break;
2929 }
2930 }
2931 }
2932 mtx_unlock(&sc->aac_aifq_lock);
2933
2934 if (revents == 0) {
2935 if (poll_events & (POLLIN | POLLRDNORM))
2936 selrecord(td, &sc->rcv_select);
2937 }
2938
2939 return (revents);
2940 }
2941
2942 static void
2943 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2944 {
2945
2946 switch (event->ev_type) {
2947 case AAC_EVENT_CMFREE:
2948 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2949 if (aac_alloc_command(sc, (struct aac_command **)arg)) {
2950 aac_add_event(sc, event);
2951 return;
2952 }
2953 free(event, M_AACBUF);
2954 wakeup(arg);
2955 break;
2956 default:
2957 break;
2958 }
2959 }
2960
2961 /*
2962 * Send a FIB supplied from userspace
2963 */
2964 static int
2965 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2966 {
2967 struct aac_command *cm;
2968 int size, error;
2969
2970 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2971
2972 cm = NULL;
2973
2974 /*
2975 * Get a command
2976 */
2977 mtx_lock(&sc->aac_io_lock);
2978 if (aac_alloc_command(sc, &cm)) {
2979 struct aac_event *event;
2980
2981 event = malloc(sizeof(struct aac_event), M_AACBUF,
2982 M_NOWAIT | M_ZERO);
2983 if (event == NULL) {
2984 error = EBUSY;
2985 mtx_unlock(&sc->aac_io_lock);
2986 goto out;
2987 }
2988 event->ev_type = AAC_EVENT_CMFREE;
2989 event->ev_callback = aac_ioctl_event;
2990 event->ev_arg = &cm;
2991 aac_add_event(sc, event);
2992 msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0);
2993 }
2994 mtx_unlock(&sc->aac_io_lock);
2995
2996 /*
2997 * Fetch the FIB header, then re-copy to get data as well.
2998 */
2999 if ((error = copyin(ufib, cm->cm_fib,
3000 sizeof(struct aac_fib_header))) != 0)
3001 goto out;
3002 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
3003 if (size > sc->aac_max_fib_size) {
3004 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
3005 size, sc->aac_max_fib_size);
3006 size = sc->aac_max_fib_size;
3007 }
3008 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
3009 goto out;
3010 cm->cm_fib->Header.Size = size;
3011 cm->cm_timestamp = time_uptime;
3012
3013 /*
3014 * Pass the FIB to the controller, wait for it to complete.
3015 */
3016 mtx_lock(&sc->aac_io_lock);
3017 error = aac_wait_command(cm);
3018 mtx_unlock(&sc->aac_io_lock);
3019 if (error != 0) {
3020 device_printf(sc->aac_dev,
3021 "aac_wait_command return %d\n", error);
3022 goto out;
3023 }
3024
3025 /*
3026 * Copy the FIB and data back out to the caller.
3027 */
3028 size = cm->cm_fib->Header.Size;
3029 if (size > sc->aac_max_fib_size) {
3030 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
3031 size, sc->aac_max_fib_size);
3032 size = sc->aac_max_fib_size;
3033 }
3034 error = copyout(cm->cm_fib, ufib, size);
3035
3036 out:
3037 if (cm != NULL) {
3038 mtx_lock(&sc->aac_io_lock);
3039 aac_release_command(cm);
3040 mtx_unlock(&sc->aac_io_lock);
3041 }
3042 return(error);
3043 }
3044
3045 /*
3046 * Send a passthrough FIB supplied from userspace
3047 */
3048 static int
3049 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
3050 {
3051 struct aac_command *cm;
3052 struct aac_event *event;
3053 struct aac_fib *fib;
3054 struct aac_srb *srbcmd, *user_srb;
3055 struct aac_sg_entry *sge;
3056 void *srb_sg_address, *ureply;
3057 uint32_t fibsize, srb_sg_bytecount;
3058 int error, transfer_data;
3059
3060 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3061
3062 cm = NULL;
3063 transfer_data = 0;
3064 fibsize = 0;
3065 user_srb = (struct aac_srb *)arg;
3066
3067 mtx_lock(&sc->aac_io_lock);
3068 if (aac_alloc_command(sc, &cm)) {
3069 event = malloc(sizeof(struct aac_event), M_AACBUF,
3070 M_NOWAIT | M_ZERO);
3071 if (event == NULL) {
3072 error = EBUSY;
3073 mtx_unlock(&sc->aac_io_lock);
3074 goto out;
3075 }
3076 event->ev_type = AAC_EVENT_CMFREE;
3077 event->ev_callback = aac_ioctl_event;
3078 event->ev_arg = &cm;
3079 aac_add_event(sc, event);
3080 msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0);
3081 }
3082 mtx_unlock(&sc->aac_io_lock);
3083
3084 cm->cm_data = NULL;
3085 fib = cm->cm_fib;
3086 srbcmd = (struct aac_srb *)fib->data;
3087 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t));
3088 if (error != 0)
3089 goto out;
3090 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) {
3091 error = EINVAL;
3092 goto out;
3093 }
3094 error = copyin(user_srb, srbcmd, fibsize);
3095 if (error != 0)
3096 goto out;
3097 srbcmd->function = 0;
3098 srbcmd->retry_limit = 0;
3099 if (srbcmd->sg_map.SgCount > 1) {
3100 error = EINVAL;
3101 goto out;
3102 }
3103
3104 /* Retrieve correct SG entries. */
3105 if (fibsize == (sizeof(struct aac_srb) +
3106 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
3107 struct aac_sg_entry sg;
3108
3109 sge = srbcmd->sg_map.SgEntry;
3110
3111 if ((error = copyin(sge, &sg, sizeof(sg))) != 0)
3112 goto out;
3113
3114 srb_sg_bytecount = sg.SgByteCount;
3115 srb_sg_address = (void *)(uintptr_t)sg.SgAddress;
3116 }
3117 #ifdef __amd64__
3118 else if (fibsize == (sizeof(struct aac_srb) +
3119 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
3120 struct aac_sg_entry64 *sge64;
3121 struct aac_sg_entry64 sg;
3122
3123 sge = NULL;
3124 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
3125
3126 if ((error = copyin(sge64, &sg, sizeof(sg))) != 0)
3127 goto out;
3128
3129 srb_sg_bytecount = sg.SgByteCount;
3130 srb_sg_address = (void *)sg.SgAddress;
3131 if (sge64->SgAddress > 0xffffffffull &&
3132 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
3133 error = EINVAL;
3134 goto out;
3135 }
3136 }
3137 #endif
3138 else {
3139 error = EINVAL;
3140 goto out;
3141 }
3142 ureply = (char *)arg + fibsize;
3143 srbcmd->data_len = srb_sg_bytecount;
3144 if (srbcmd->sg_map.SgCount == 1)
3145 transfer_data = 1;
3146
3147 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3148 if (transfer_data) {
3149 cm->cm_datalen = srb_sg_bytecount;
3150 cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT);
3151 if (cm->cm_data == NULL) {
3152 error = ENOMEM;
3153 goto out;
3154 }
3155 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
3156 cm->cm_flags |= AAC_CMD_DATAIN;
3157 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
3158 cm->cm_flags |= AAC_CMD_DATAOUT;
3159 error = copyin(srb_sg_address, cm->cm_data,
3160 cm->cm_datalen);
3161 if (error != 0)
3162 goto out;
3163 }
3164 }
3165
3166 fib->Header.Size = sizeof(struct aac_fib_header) +
3167 sizeof(struct aac_srb);
3168 fib->Header.XferState =
3169 AAC_FIBSTATE_HOSTOWNED |
3170 AAC_FIBSTATE_INITIALISED |
3171 AAC_FIBSTATE_EMPTY |
3172 AAC_FIBSTATE_FROMHOST |
3173 AAC_FIBSTATE_REXPECTED |
3174 AAC_FIBSTATE_NORM |
3175 AAC_FIBSTATE_ASYNC |
3176 AAC_FIBSTATE_FAST_RESPONSE;
3177 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ?
3178 ScsiPortCommandU64 : ScsiPortCommand;
3179
3180 mtx_lock(&sc->aac_io_lock);
3181 aac_wait_command(cm);
3182 mtx_unlock(&sc->aac_io_lock);
3183
3184 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) {
3185 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen);
3186 if (error != 0)
3187 goto out;
3188 }
3189 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response));
3190 out:
3191 if (cm != NULL) {
3192 if (cm->cm_data != NULL)
3193 free(cm->cm_data, M_AACBUF);
3194 mtx_lock(&sc->aac_io_lock);
3195 aac_release_command(cm);
3196 mtx_unlock(&sc->aac_io_lock);
3197 }
3198 return(error);
3199 }
3200
3201 /*
3202 * cdevpriv interface private destructor.
3203 */
3204 static void
3205 aac_cdevpriv_dtor(void *arg)
3206 {
3207 struct aac_softc *sc;
3208
3209 sc = arg;
3210 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3211 device_unbusy(sc->aac_dev);
3212 }
3213
3214 /*
3215 * Handle an AIF sent to us by the controller; queue it for later reference.
3216 * If the queue fills up, then drop the older entries.
3217 */
3218 static void
3219 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3220 {
3221 struct aac_aif_command *aif;
3222 struct aac_container *co, *co_next;
3223 struct aac_fib_context *ctx;
3224 struct aac_mntinforesp *mir;
3225 int next, current, found;
3226 int count = 0, added = 0, i = 0;
3227 uint32_t channel;
3228
3229 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3230
3231 aif = (struct aac_aif_command*)&fib->data[0];
3232 aac_print_aif(sc, aif);
3233
3234 /* Is it an event that we should care about? */
3235 switch (aif->command) {
3236 case AifCmdEventNotify:
3237 switch (aif->data.EN.type) {
3238 case AifEnAddContainer:
3239 case AifEnDeleteContainer:
3240 /*
3241 * A container was added or deleted, but the message
3242 * doesn't tell us anything else! Re-enumerate the
3243 * containers and sort things out.
3244 */
3245 aac_alloc_sync_fib(sc, &fib);
3246 do {
3247 /*
3248 * Ask the controller for its containers one at
3249 * a time.
3250 * XXX What if the controller's list changes
3251 * midway through this enumaration?
3252 * XXX This should be done async.
3253 */
3254 if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
3255 continue;
3256 if (i == 0)
3257 count = mir->MntRespCount;
3258 /*
3259 * Check the container against our list.
3260 * co->co_found was already set to 0 in a
3261 * previous run.
3262 */
3263 if ((mir->Status == ST_OK) &&
3264 (mir->MntTable[0].VolType != CT_NONE)) {
3265 found = 0;
3266 TAILQ_FOREACH(co,
3267 &sc->aac_container_tqh,
3268 co_link) {
3269 if (co->co_mntobj.ObjectId ==
3270 mir->MntTable[0].ObjectId) {
3271 co->co_found = 1;
3272 found = 1;
3273 break;
3274 }
3275 }
3276 /*
3277 * If the container matched, continue
3278 * in the list.
3279 */
3280 if (found) {
3281 i++;
3282 continue;
3283 }
3284
3285 /*
3286 * This is a new container. Do all the
3287 * appropriate things to set it up.
3288 */
3289 aac_add_container(sc, mir, 1);
3290 added = 1;
3291 }
3292 i++;
3293 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3294 aac_release_sync_fib(sc);
3295
3296 /*
3297 * Go through our list of containers and see which ones
3298 * were not marked 'found'. Since the controller didn't
3299 * list them they must have been deleted. Do the
3300 * appropriate steps to destroy the device. Also reset
3301 * the co->co_found field.
3302 */
3303 co = TAILQ_FIRST(&sc->aac_container_tqh);
3304 while (co != NULL) {
3305 if (co->co_found == 0) {
3306 mtx_unlock(&sc->aac_io_lock);
3307 bus_topo_lock();
3308 device_delete_child(sc->aac_dev,
3309 co->co_disk);
3310 bus_topo_unlock();
3311 mtx_lock(&sc->aac_io_lock);
3312 co_next = TAILQ_NEXT(co, co_link);
3313 mtx_lock(&sc->aac_container_lock);
3314 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3315 co_link);
3316 mtx_unlock(&sc->aac_container_lock);
3317 free(co, M_AACBUF);
3318 co = co_next;
3319 } else {
3320 co->co_found = 0;
3321 co = TAILQ_NEXT(co, co_link);
3322 }
3323 }
3324
3325 /* Attach the newly created containers */
3326 if (added) {
3327 mtx_unlock(&sc->aac_io_lock);
3328 bus_topo_lock();
3329 bus_generic_attach(sc->aac_dev);
3330 bus_topo_unlock();
3331 mtx_lock(&sc->aac_io_lock);
3332 }
3333
3334 break;
3335
3336 case AifEnEnclosureManagement:
3337 switch (aif->data.EN.data.EEE.eventType) {
3338 case AIF_EM_DRIVE_INSERTION:
3339 case AIF_EM_DRIVE_REMOVAL:
3340 channel = aif->data.EN.data.EEE.unitID;
3341 if (sc->cam_rescan_cb != NULL)
3342 sc->cam_rescan_cb(sc,
3343 (channel >> 24) & 0xF,
3344 (channel & 0xFFFF));
3345 break;
3346 }
3347 break;
3348
3349 case AifEnAddJBOD:
3350 case AifEnDeleteJBOD:
3351 channel = aif->data.EN.data.ECE.container;
3352 if (sc->cam_rescan_cb != NULL)
3353 sc->cam_rescan_cb(sc, (channel >> 24) & 0xF,
3354 AAC_CAM_TARGET_WILDCARD);
3355 break;
3356
3357 default:
3358 break;
3359 }
3360
3361 default:
3362 break;
3363 }
3364
3365 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3366 mtx_lock(&sc->aac_aifq_lock);
3367 current = sc->aifq_idx;
3368 next = (current + 1) % AAC_AIFQ_LENGTH;
3369 if (next == 0)
3370 sc->aifq_filled = 1;
3371 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3372 /* modify AIF contexts */
3373 if (sc->aifq_filled) {
3374 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3375 if (next == ctx->ctx_idx)
3376 ctx->ctx_wrap = 1;
3377 else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3378 ctx->ctx_idx = next;
3379 }
3380 }
3381 sc->aifq_idx = next;
3382 /* On the off chance that someone is sleeping for an aif... */
3383 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3384 wakeup(sc->aac_aifq);
3385 /* Wakeup any poll()ers */
3386 selwakeuppri(&sc->rcv_select, PRIBIO);
3387 mtx_unlock(&sc->aac_aifq_lock);
3388 }
3389
3390 /*
3391 * Return the Revision of the driver to userspace and check to see if the
3392 * userspace app is possibly compatible. This is extremely bogus since
3393 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3394 * returning what the card reported.
3395 */
3396 static int
3397 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3398 {
3399 struct aac_rev_check rev_check;
3400 struct aac_rev_check_resp rev_check_resp;
3401 int error = 0;
3402
3403 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3404
3405 /*
3406 * Copyin the revision struct from userspace
3407 */
3408 if ((error = copyin(udata, (caddr_t)&rev_check,
3409 sizeof(struct aac_rev_check))) != 0) {
3410 return error;
3411 }
3412
3413 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3414 rev_check.callingRevision.buildNumber);
3415
3416 /*
3417 * Doctor up the response struct.
3418 */
3419 rev_check_resp.possiblyCompatible = 1;
3420 rev_check_resp.adapterSWRevision.external.comp.major =
3421 AAC_DRIVER_MAJOR_VERSION;
3422 rev_check_resp.adapterSWRevision.external.comp.minor =
3423 AAC_DRIVER_MINOR_VERSION;
3424 rev_check_resp.adapterSWRevision.external.comp.type =
3425 AAC_DRIVER_TYPE;
3426 rev_check_resp.adapterSWRevision.external.comp.dash =
3427 AAC_DRIVER_BUGFIX_LEVEL;
3428 rev_check_resp.adapterSWRevision.buildNumber =
3429 AAC_DRIVER_BUILD;
3430
3431 return(copyout((caddr_t)&rev_check_resp, udata,
3432 sizeof(struct aac_rev_check_resp)));
3433 }
3434
3435 /*
3436 * Pass the fib context to the caller
3437 */
3438 static int
3439 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3440 {
3441 struct aac_fib_context *fibctx, *ctx;
3442 int error = 0;
3443
3444 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3445
3446 fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO);
3447 if (fibctx == NULL)
3448 return (ENOMEM);
3449
3450 mtx_lock(&sc->aac_aifq_lock);
3451 /* all elements are already 0, add to queue */
3452 if (sc->fibctx == NULL)
3453 sc->fibctx = fibctx;
3454 else {
3455 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3456 ;
3457 ctx->next = fibctx;
3458 fibctx->prev = ctx;
3459 }
3460
3461 /* evaluate unique value */
3462 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3463 ctx = sc->fibctx;
3464 while (ctx != fibctx) {
3465 if (ctx->unique == fibctx->unique) {
3466 fibctx->unique++;
3467 ctx = sc->fibctx;
3468 } else {
3469 ctx = ctx->next;
3470 }
3471 }
3472 mtx_unlock(&sc->aac_aifq_lock);
3473
3474 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3475 if (error)
3476 aac_close_aif(sc, (caddr_t)ctx);
3477 return error;
3478 }
3479
3480 /*
3481 * Close the caller's fib context
3482 */
3483 static int
3484 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3485 {
3486 struct aac_fib_context *ctx;
3487
3488 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3489
3490 mtx_lock(&sc->aac_aifq_lock);
3491 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3492 if (ctx->unique == *(uint32_t *)&arg) {
3493 if (ctx == sc->fibctx)
3494 sc->fibctx = NULL;
3495 else {
3496 ctx->prev->next = ctx->next;
3497 if (ctx->next)
3498 ctx->next->prev = ctx->prev;
3499 }
3500 break;
3501 }
3502 }
3503 mtx_unlock(&sc->aac_aifq_lock);
3504 if (ctx)
3505 free(ctx, M_AACBUF);
3506
3507 return 0;
3508 }
3509
3510 /*
3511 * Pass the caller the next AIF in their queue
3512 */
3513 static int
3514 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3515 {
3516 struct get_adapter_fib_ioctl agf;
3517 struct aac_fib_context *ctx;
3518 int error;
3519
3520 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3521
3522 #ifdef COMPAT_FREEBSD32
3523 if (SV_CURPROC_FLAG(SV_ILP32)) {
3524 struct get_adapter_fib_ioctl32 agf32;
3525 error = copyin(arg, &agf32, sizeof(agf32));
3526 if (error == 0) {
3527 agf.AdapterFibContext = agf32.AdapterFibContext;
3528 agf.Wait = agf32.Wait;
3529 agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3530 }
3531 } else
3532 #endif
3533 error = copyin(arg, &agf, sizeof(agf));
3534 if (error == 0) {
3535 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3536 if (agf.AdapterFibContext == ctx->unique)
3537 break;
3538 }
3539 if (!ctx)
3540 return (EFAULT);
3541
3542 error = aac_return_aif(sc, ctx, agf.AifFib);
3543 if (error == EAGAIN && agf.Wait) {
3544 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3545 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3546 while (error == EAGAIN) {
3547 error = tsleep(sc->aac_aifq, PRIBIO |
3548 PCATCH, "aacaif", 0);
3549 if (error == 0)
3550 error = aac_return_aif(sc, ctx, agf.AifFib);
3551 }
3552 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3553 }
3554 }
3555 return(error);
3556 }
3557
3558 /*
3559 * Hand the next AIF off the top of the queue out to userspace.
3560 */
3561 static int
3562 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3563 {
3564 int current, error;
3565
3566 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3567
3568 mtx_lock(&sc->aac_aifq_lock);
3569 current = ctx->ctx_idx;
3570 if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3571 /* empty */
3572 mtx_unlock(&sc->aac_aifq_lock);
3573 return (EAGAIN);
3574 }
3575 error =
3576 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3577 if (error)
3578 device_printf(sc->aac_dev,
3579 "aac_return_aif: copyout returned %d\n", error);
3580 else {
3581 ctx->ctx_wrap = 0;
3582 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3583 }
3584 mtx_unlock(&sc->aac_aifq_lock);
3585 return(error);
3586 }
3587
3588 static int
3589 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3590 {
3591 struct aac_pci_info {
3592 u_int32_t bus;
3593 u_int32_t slot;
3594 } pciinf;
3595 int error;
3596
3597 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3598
3599 pciinf.bus = pci_get_bus(sc->aac_dev);
3600 pciinf.slot = pci_get_slot(sc->aac_dev);
3601
3602 error = copyout((caddr_t)&pciinf, uptr,
3603 sizeof(struct aac_pci_info));
3604
3605 return (error);
3606 }
3607
3608 static int
3609 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3610 {
3611 struct aac_features f;
3612 int error;
3613
3614 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3615
3616 if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3617 return (error);
3618
3619 /*
3620 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3621 * ALL zero in the featuresState, the driver will return the current
3622 * state of all the supported features, the data field will not be
3623 * valid.
3624 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3625 * a specific bit set in the featuresState, the driver will return the
3626 * current state of this specific feature and whatever data that are
3627 * associated with the feature in the data field or perform whatever
3628 * action needed indicates in the data field.
3629 */
3630 if (f.feat.fValue == 0) {
3631 f.feat.fBits.largeLBA =
3632 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3633 /* TODO: In the future, add other features state here as well */
3634 } else {
3635 if (f.feat.fBits.largeLBA)
3636 f.feat.fBits.largeLBA =
3637 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3638 /* TODO: Add other features state and data in the future */
3639 }
3640
3641 error = copyout(&f, uptr, sizeof (f));
3642 return (error);
3643 }
3644
3645 /*
3646 * Give the userland some information about the container. The AAC arch
3647 * expects the driver to be a SCSI passthrough type driver, so it expects
3648 * the containers to have b:t:l numbers. Fake it.
3649 */
3650 static int
3651 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3652 {
3653 struct aac_query_disk query_disk;
3654 struct aac_container *co;
3655 struct aac_disk *disk;
3656 int error, id;
3657
3658 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3659
3660 disk = NULL;
3661
3662 error = copyin(uptr, (caddr_t)&query_disk,
3663 sizeof(struct aac_query_disk));
3664 if (error)
3665 return (error);
3666
3667 id = query_disk.ContainerNumber;
3668 if (id == -1)
3669 return (EINVAL);
3670
3671 mtx_lock(&sc->aac_container_lock);
3672 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3673 if (co->co_mntobj.ObjectId == id)
3674 break;
3675 }
3676
3677 if (co == NULL) {
3678 query_disk.Valid = 0;
3679 query_disk.Locked = 0;
3680 query_disk.Deleted = 1; /* XXX is this right? */
3681 } else {
3682 disk = device_get_softc(co->co_disk);
3683 query_disk.Valid = 1;
3684 query_disk.Locked =
3685 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0;
3686 query_disk.Deleted = 0;
3687 query_disk.Bus = device_get_unit(sc->aac_dev);
3688 query_disk.Target = disk->unit;
3689 query_disk.Lun = 0;
3690 query_disk.UnMapped = 0;
3691 sprintf(&query_disk.diskDeviceName[0], "%s%d",
3692 disk->ad_disk->d_name, disk->ad_disk->d_unit);
3693 }
3694 mtx_unlock(&sc->aac_container_lock);
3695
3696 error = copyout((caddr_t)&query_disk, uptr,
3697 sizeof(struct aac_query_disk));
3698
3699 return (error);
3700 }
3701
3702 static void
3703 aac_get_bus_info(struct aac_softc *sc)
3704 {
3705 struct aac_fib *fib;
3706 struct aac_ctcfg *c_cmd;
3707 struct aac_ctcfg_resp *c_resp;
3708 struct aac_vmioctl *vmi;
3709 struct aac_vmi_businf_resp *vmi_resp;
3710 struct aac_getbusinf businfo;
3711 struct aac_sim *caminf;
3712 device_t child;
3713 int i, found, error;
3714
3715 mtx_lock(&sc->aac_io_lock);
3716 aac_alloc_sync_fib(sc, &fib);
3717 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3718 bzero(c_cmd, sizeof(struct aac_ctcfg));
3719
3720 c_cmd->Command = VM_ContainerConfig;
3721 c_cmd->cmd = CT_GET_SCSI_METHOD;
3722 c_cmd->param = 0;
3723
3724 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3725 sizeof(struct aac_ctcfg));
3726 if (error) {
3727 device_printf(sc->aac_dev, "Error %d sending "
3728 "VM_ContainerConfig command\n", error);
3729 aac_release_sync_fib(sc);
3730 mtx_unlock(&sc->aac_io_lock);
3731 return;
3732 }
3733
3734 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3735 if (c_resp->Status != ST_OK) {
3736 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3737 c_resp->Status);
3738 aac_release_sync_fib(sc);
3739 mtx_unlock(&sc->aac_io_lock);
3740 return;
3741 }
3742
3743 sc->scsi_method_id = c_resp->param;
3744
3745 vmi = (struct aac_vmioctl *)&fib->data[0];
3746 bzero(vmi, sizeof(struct aac_vmioctl));
3747
3748 vmi->Command = VM_Ioctl;
3749 vmi->ObjType = FT_DRIVE;
3750 vmi->MethId = sc->scsi_method_id;
3751 vmi->ObjId = 0;
3752 vmi->IoctlCmd = GetBusInfo;
3753
3754 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3755 sizeof(struct aac_vmi_businf_resp));
3756 if (error) {
3757 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3758 error);
3759 aac_release_sync_fib(sc);
3760 mtx_unlock(&sc->aac_io_lock);
3761 return;
3762 }
3763
3764 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3765 if (vmi_resp->Status != ST_OK) {
3766 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3767 vmi_resp->Status);
3768 aac_release_sync_fib(sc);
3769 mtx_unlock(&sc->aac_io_lock);
3770 return;
3771 }
3772
3773 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3774 aac_release_sync_fib(sc);
3775 mtx_unlock(&sc->aac_io_lock);
3776
3777 found = 0;
3778 for (i = 0; i < businfo.BusCount; i++) {
3779 if (businfo.BusValid[i] != AAC_BUS_VALID)
3780 continue;
3781
3782 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3783 M_AACBUF, M_NOWAIT | M_ZERO);
3784 if (caminf == NULL) {
3785 device_printf(sc->aac_dev,
3786 "No memory to add passthrough bus %d\n", i);
3787 break;
3788 }
3789
3790 child = device_add_child(sc->aac_dev, "aacp", -1);
3791 if (child == NULL) {
3792 device_printf(sc->aac_dev,
3793 "device_add_child failed for passthrough bus %d\n",
3794 i);
3795 free(caminf, M_AACBUF);
3796 break;
3797 }
3798
3799 caminf->TargetsPerBus = businfo.TargetsPerBus;
3800 caminf->BusNumber = i;
3801 caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3802 caminf->aac_sc = sc;
3803 caminf->sim_dev = child;
3804
3805 device_set_ivars(child, caminf);
3806 device_set_desc(child, "SCSI Passthrough Bus");
3807 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3808
3809 found = 1;
3810 }
3811
3812 if (found)
3813 bus_generic_attach(sc->aac_dev);
3814 }
Cache object: 3d67860b6e69fed8a9d86e7501a62346
|