1 /*
2 * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 /*
33 * AMCC'S 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 */
37
38
39 /*
40 * FreeBSD specific functions not related to CAM, and other
41 * miscellaneous functions.
42 */
43
44
45 #include <dev/twa/tw_osl_includes.h>
46 #include <dev/twa/tw_cl_fwif.h>
47 #include <dev/twa/tw_cl_ioctl.h>
48 #include <dev/twa/tw_osl_ioctl.h>
49
50 #ifdef TW_OSL_DEBUG
51 TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
52 TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
53 #endif /* TW_OSL_DEBUG */
54
55 MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
56
57
58 static d_open_t twa_open;
59 static d_close_t twa_close;
60 static d_ioctl_t twa_ioctl;
61
62 static struct cdevsw twa_cdevsw = {
63 .d_version = D_VERSION,
64 .d_open = twa_open,
65 .d_close = twa_close,
66 .d_ioctl = twa_ioctl,
67 .d_name = "twa",
68 };
69
70 static devclass_t twa_devclass;
71
72
73 /*
74 * Function name: twa_open
75 * Description: Called when the controller is opened.
76 * Simply marks the controller as open.
77 *
78 * Input: dev -- control device corresponding to the ctlr
79 * flags -- mode of open
80 * fmt -- device type (character/block etc.)
81 * proc -- current process
82 * Output: None
83 * Return value: 0 -- success
84 * non-zero-- failure
85 */
86 static TW_INT32
87 twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
88 {
89 TW_INT32 unit = minor(dev);
90 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
91
92 tw_osli_dbg_dprintf(5, sc, "entered");
93 sc->state |= TW_OSLI_CTLR_STATE_OPEN;
94 return(0);
95 }
96
97
98
99 /*
100 * Function name: twa_close
101 * Description: Called when the controller is closed.
102 * Simply marks the controller as not open.
103 *
104 * Input: dev -- control device corresponding to the ctlr
105 * flags -- mode of corresponding open
106 * fmt -- device type (character/block etc.)
107 * proc -- current process
108 * Output: None
109 * Return value: 0 -- success
110 * non-zero-- failure
111 */
112 static TW_INT32
113 twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
114 {
115 TW_INT32 unit = minor(dev);
116 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
117
118 tw_osli_dbg_dprintf(5, sc, "entered");
119 sc->state &= ~TW_OSLI_CTLR_STATE_OPEN;
120 return(0);
121 }
122
123
124
125 /*
126 * Function name: twa_ioctl
127 * Description: Called when an ioctl is posted to the controller.
128 * Handles any OS Layer specific cmds, passes the rest
129 * on to the Common Layer.
130 *
131 * Input: dev -- control device corresponding to the ctlr
132 * cmd -- ioctl cmd
133 * buf -- ptr to buffer in kernel memory, which is
134 * a copy of the input buffer in user-space
135 * flags -- mode of corresponding open
136 * proc -- current process
137 * Output: buf -- ptr to buffer in kernel memory, which will
138 * be copied to the output buffer in user-space
139 * Return value: 0 -- success
140 * non-zero-- failure
141 */
142 static TW_INT32
143 twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, d_thread_t *proc)
144 {
145 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
146 TW_INT32 error;
147
148 tw_osli_dbg_dprintf(5, sc, "entered");
149
150 switch (cmd) {
151 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
152 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
153 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
154 break;
155
156 case TW_OSL_IOCTL_SCAN_BUS:
157 /* Request CAM for a bus scan. */
158 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
159 error = tw_osli_request_bus_scan(sc);
160 break;
161
162 default:
163 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
164 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
165 break;
166 }
167 return(error);
168 }
169
170
171
172 static TW_INT32 twa_probe(device_t dev);
173 static TW_INT32 twa_attach(device_t dev);
174 static TW_INT32 twa_detach(device_t dev);
175 static TW_INT32 twa_shutdown(device_t dev);
176 static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
177 static TW_VOID twa_pci_intr(TW_VOID *arg);
178 #ifdef TW_OSLI_DEFERRED_INTR_USED
179 static TW_VOID twa_deferred_intr(TW_VOID *context, TW_INT32 pending);
180 #endif /* TW_OSLI_DEFERRED_INTR_USED */
181
182 static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
183 static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
184
185 static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
186 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
187 static TW_VOID twa_map_load_callback(TW_VOID *arg,
188 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
189
190
191 static device_method_t twa_methods[] = {
192 /* Device interface */
193 DEVMETHOD(device_probe, twa_probe),
194 DEVMETHOD(device_attach, twa_attach),
195 DEVMETHOD(device_detach, twa_detach),
196 DEVMETHOD(device_shutdown, twa_shutdown),
197
198 DEVMETHOD(bus_print_child, bus_generic_print_child),
199 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
200 {0, 0}
201 };
202
203 static driver_t twa_pci_driver = {
204 "twa",
205 twa_methods,
206 sizeof(struct twa_softc)
207 };
208
209 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
210
211
212
213 /*
214 * Function name: twa_probe
215 * Description: Called at driver load time. Claims 9000 ctlrs.
216 *
217 * Input: dev -- bus device corresponding to the ctlr
218 * Output: None
219 * Return value: <= 0 -- success
220 * > 0 -- failure
221 */
222 static TW_INT32
223 twa_probe(device_t dev)
224 {
225 static TW_UINT8 first_ctlr = 1;
226
227 tw_osli_dbg_printf(3, "entered");
228
229 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
230 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
231 /* Print the driver version only once. */
232 if (first_ctlr) {
233 printf("3ware device driver for 9000 series storage "
234 "controllers, version: %s\n",
235 TW_OSL_DRIVER_VERSION_STRING);
236 first_ctlr = 0;
237 }
238 return(0);
239 }
240 return(ENXIO);
241 }
242
243
244
245 /*
246 * Function name: twa_attach
247 * Description: Allocates pci resources; updates sc; adds a node to the
248 * sysctl tree to expose the driver version; makes calls
249 * (to the Common Layer) to initialize ctlr, and to
250 * attach to CAM.
251 *
252 * Input: dev -- bus device corresponding to the ctlr
253 * Output: None
254 * Return value: 0 -- success
255 * non-zero-- failure
256 */
257 static TW_INT32
258 twa_attach(device_t dev)
259 {
260 struct twa_softc *sc = device_get_softc(dev);
261 TW_UINT32 command;
262 TW_INT32 bar_num;
263 TW_INT32 bar0_offset;
264 TW_INT32 bar_size;
265 TW_INT32 error;
266
267 tw_osli_dbg_dprintf(3, sc, "entered");
268
269 sc->ctlr_handle.osl_ctlr_ctxt = sc;
270
271 /* Initialize the softc structure. */
272 sc->bus_dev = dev;
273 sc->device_id = pci_get_device(dev);
274
275 /* Initialize the mutexes right here. */
276 sc->io_lock = &(sc->io_lock_handle);
277 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
278 sc->q_lock = &(sc->q_lock_handle);
279 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
280
281 sysctl_ctx_init(&sc->sysctl_ctxt);
282 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
283 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
284 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
285 if (sc->sysctl_tree == NULL) {
286 tw_osli_printf(sc, "error = %d",
287 TW_CL_SEVERITY_ERROR_STRING,
288 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
289 0x2000,
290 "Cannot add sysctl tree node",
291 ENXIO);
292 return(ENXIO);
293 }
294 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
295 OID_AUTO, "driver_version", CTLFLAG_RD,
296 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
297
298 /* Make sure we are going to be able to talk to this board. */
299 command = pci_read_config(dev, PCIR_COMMAND, 2);
300 if ((command & PCIM_CMD_PORTEN) == 0) {
301 tw_osli_printf(sc, "error = %d",
302 TW_CL_SEVERITY_ERROR_STRING,
303 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
304 0x2001,
305 "Register window not available",
306 ENXIO);
307 tw_osli_free_resources(sc);
308 return(ENXIO);
309 }
310
311 /* Force the busmaster enable bit on, in case the BIOS forgot. */
312 command |= PCIM_CMD_BUSMASTEREN;
313 pci_write_config(dev, PCIR_COMMAND, command, 2);
314
315 /* Allocate the PCI register window. */
316 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
317 &bar_num, &bar0_offset, &bar_size))) {
318 tw_osli_printf(sc, "error = %d",
319 TW_CL_SEVERITY_ERROR_STRING,
320 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
321 0x201F,
322 "Can't get PCI BAR info",
323 error);
324 tw_osli_free_resources(sc);
325 return(error);
326 }
327 sc->reg_res_id = PCIR_BARS + bar0_offset;
328 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
329 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
330 == NULL) {
331 tw_osli_printf(sc, "error = %d",
332 TW_CL_SEVERITY_ERROR_STRING,
333 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
334 0x2002,
335 "Can't allocate register window",
336 ENXIO);
337 tw_osli_free_resources(sc);
338 return(ENXIO);
339 }
340 sc->bus_tag = rman_get_bustag(sc->reg_res);
341 sc->bus_handle = rman_get_bushandle(sc->reg_res);
342
343 /* Allocate and register our interrupt. */
344 sc->irq_res_id = 0;
345 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
346 &(sc->irq_res_id), 0, ~0, 1,
347 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
348 tw_osli_printf(sc, "error = %d",
349 TW_CL_SEVERITY_ERROR_STRING,
350 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
351 0x2003,
352 "Can't allocate interrupt",
353 ENXIO);
354 tw_osli_free_resources(sc);
355 return(ENXIO);
356 }
357 if ((error = bus_setup_intr(sc->bus_dev, sc->irq_res,
358 ((mp_ncpus > 1) ? (INTR_MPSAFE
359 #ifdef TW_OSLI_DEFERRED_INTR_USED
360 | INTR_FAST
361 #endif /* TW_OSLI_DEFERRED_INTR_USED */
362 ) : 0) | INTR_TYPE_CAM,
363 twa_pci_intr, sc, &sc->intr_handle))) {
364 tw_osli_printf(sc, "error = %d",
365 TW_CL_SEVERITY_ERROR_STRING,
366 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
367 0x2004,
368 "Can't set up interrupt",
369 error);
370 tw_osli_free_resources(sc);
371 return(error);
372 }
373
374 #ifdef TW_OSLI_DEFERRED_INTR_USED
375 TASK_INIT(&sc->deferred_intr_callback, 0, twa_deferred_intr, sc);
376 #endif /* TW_OSLI_DEFERRED_INTR_USED */
377
378 if ((error = tw_osli_alloc_mem(sc))) {
379 tw_osli_printf(sc, "error = %d",
380 TW_CL_SEVERITY_ERROR_STRING,
381 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
382 0x2005,
383 "Memory allocation failure",
384 error);
385 tw_osli_free_resources(sc);
386 return(error);
387 }
388
389 /* Initialize the Common Layer for this controller. */
390 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
391 TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
392 sc->non_dma_mem, sc->dma_mem,
393 sc->dma_mem_phys
394 #ifdef TW_OSL_FLASH_FIRMWARE
395 , sc->flash_dma_mem, sc->flash_dma_mem_phys
396 #endif /* TW_OSL_FLASH_FIRMWARE */
397 ))) {
398 tw_osli_printf(sc, "error = %d",
399 TW_CL_SEVERITY_ERROR_STRING,
400 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
401 0x2006,
402 "Failed to initialize Common Layer/controller",
403 error);
404 tw_osli_free_resources(sc);
405 return(error);
406 }
407
408 #ifdef TW_OSL_FLASH_FIRMWARE
409 /* Free any memory allocated for firmware flashing. */
410 if (sc->flash_dma_mem) {
411 bus_dmamap_unload(sc->flash_tag, sc->flash_map);
412 bus_dmamem_free(sc->flash_tag, sc->flash_dma_mem,
413 sc->flash_map);
414 }
415 if (sc->flash_tag)
416 bus_dma_tag_destroy(sc->flash_tag);
417 /*
418 * Set flash_tag and flash_dma_mem to 0, so we don't try freeing them
419 * again, later.
420 */
421 sc->flash_tag = 0;
422 sc->flash_dma_mem = 0;
423 #endif /* TW_OSL_FLASH_FIRMWARE */
424
425 /* Create the control device. */
426 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
427 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
428 "twa%d", device_get_unit(sc->bus_dev));
429 sc->ctrl_dev->si_drv1 = sc;
430
431 if ((error = tw_osli_cam_attach(sc))) {
432 tw_osli_free_resources(sc);
433 tw_osli_printf(sc, "error = %d",
434 TW_CL_SEVERITY_ERROR_STRING,
435 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
436 0x2007,
437 "Failed to initialize CAM",
438 error);
439 return(error);
440 }
441
442 return(0);
443 }
444
445
446
447 /*
448 * Function name: tw_osli_alloc_mem
449 * Description: Allocates memory needed both by CL and OSL.
450 *
451 * Input: sc -- OSL internal controller context
452 * Output: None
453 * Return value: 0 -- success
454 * non-zero-- failure
455 */
456 static TW_INT32
457 tw_osli_alloc_mem(struct twa_softc *sc)
458 {
459 struct tw_osli_req_context *req;
460 TW_UINT32 max_sg_elements;
461 TW_UINT32 non_dma_mem_size;
462 TW_UINT32 dma_mem_size;
463 #ifdef TW_OSL_FLASH_FIRMWARE
464 TW_UINT32 flash_dma_mem_size;
465 #endif /* TW_OSL_FLASH_FIRMWARE */
466 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
467 TW_UINT32 per_req_dma_mem_size;
468 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
469 TW_INT32 error;
470 TW_INT32 i;
471
472 tw_osli_dbg_dprintf(3, sc, "entered");
473
474 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
475 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
476 #ifdef TW_OSL_FLASH_FIRMWARE
477 sc->flags |= TW_CL_FLASH_FIRMWARE;
478 #endif /* TW_OSL_FLASH_FIRMWARE */
479 #ifdef TW_OSLI_DEFERRED_INTR_USED
480 sc->flags |= TW_CL_DEFERRED_INTR_USED;
481 #endif /* TW_OSLI_DEFERRED_INTR_USED */
482
483 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
484 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
485
486 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
487 sc->device_id, TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
488 &(sc->alignment), &(sc->sg_size_factor),
489 &non_dma_mem_size, &dma_mem_size
490 #ifdef TW_OSL_FLASH_FIRMWARE
491 , &flash_dma_mem_size
492 #endif /* TW_OSL_FLASH_FIRMWARE */
493 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
494 , &per_req_dma_mem_size
495 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
496 ))) {
497 tw_osli_printf(sc, "error = %d",
498 TW_CL_SEVERITY_ERROR_STRING,
499 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
500 0x2008,
501 "Can't get Common Layer's memory requirements",
502 error);
503 return(error);
504 }
505
506 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
507 M_WAITOK)) == NULL) {
508 tw_osli_printf(sc, "error = %d",
509 TW_CL_SEVERITY_ERROR_STRING,
510 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
511 0x2009,
512 "Can't allocate non-dma memory",
513 ENOMEM);
514 return(ENOMEM);
515 }
516
517 /* Create the parent dma tag. */
518 if (bus_dma_tag_create(NULL, /* parent */
519 sc->alignment, /* alignment */
520 0, /* boundary */
521 BUS_SPACE_MAXADDR, /* lowaddr */
522 BUS_SPACE_MAXADDR, /* highaddr */
523 NULL, NULL, /* filter, filterarg */
524 TW_CL_MAX_IO_SIZE, /* maxsize */
525 max_sg_elements, /* nsegments */
526 TW_CL_MAX_IO_SIZE, /* maxsegsize */
527 0, /* flags */
528 NULL, /* lockfunc */
529 NULL, /* lockfuncarg */
530 &sc->parent_tag /* tag */)) {
531 tw_osli_printf(sc, "error = %d",
532 TW_CL_SEVERITY_ERROR_STRING,
533 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
534 0x200A,
535 "Can't allocate parent DMA tag",
536 ENOMEM);
537 return(ENOMEM);
538 }
539
540 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
541 if (bus_dma_tag_create(sc->parent_tag, /* parent */
542 sc->alignment, /* alignment */
543 0, /* boundary */
544 BUS_SPACE_MAXADDR, /* lowaddr */
545 BUS_SPACE_MAXADDR, /* highaddr */
546 NULL, NULL, /* filter, filterarg */
547 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
548 (TW_OSLI_MAX_NUM_IOS *
549 per_req_dma_mem_size) +
550 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
551 dma_mem_size, /* maxsize */
552 1, /* nsegments */
553 BUS_SPACE_MAXSIZE, /* maxsegsize */
554 0, /* flags */
555 NULL, /* lockfunc */
556 NULL, /* lockfuncarg */
557 &sc->cmd_tag /* tag */)) {
558 tw_osli_printf(sc, "error = %d",
559 TW_CL_SEVERITY_ERROR_STRING,
560 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
561 0x200B,
562 "Can't allocate DMA tag for Common Layer's "
563 "DMA'able memory",
564 ENOMEM);
565 return(ENOMEM);
566 }
567
568 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
569 BUS_DMA_NOWAIT, &sc->cmd_map)) {
570 /* Try a second time. */
571 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
572 BUS_DMA_NOWAIT, &sc->cmd_map)) {
573 tw_osli_printf(sc, "error = %d",
574 TW_CL_SEVERITY_ERROR_STRING,
575 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
576 0x200C,
577 "Can't allocate DMA'able memory for the"
578 "Common Layer",
579 ENOMEM);
580 return(ENOMEM);
581 }
582 }
583
584 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
585 dma_mem_size, twa_map_load_callback,
586 &sc->dma_mem_phys, 0);
587
588
589 #ifdef TW_OSL_FLASH_FIRMWARE
590 /*
591 * Create a dma tag for Common Layer's DMA'able memory,
592 * used to flash firmware (flash_dma_mem).
593 */
594 if (bus_dma_tag_create(sc->parent_tag, /* parent */
595 sc->alignment, /* alignment */
596 0, /* boundary */
597 BUS_SPACE_MAXADDR, /* lowaddr */
598 BUS_SPACE_MAXADDR, /* highaddr */
599 NULL, NULL, /* filter, filterarg */
600 flash_dma_mem_size, /* maxsize */
601 1, /* nsegments */
602 flash_dma_mem_size, /* maxsegsize */
603 0, /* flags */
604 NULL, /* lockfunc */
605 NULL, /* lockfuncarg */
606 &sc->flash_tag /* tag */)) {
607 tw_osli_printf(sc, "error = %d",
608 TW_CL_SEVERITY_ERROR_STRING,
609 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
610 0x200D,
611 "Can't allocate DMA tag for Common Layer's "
612 "firmware flash memory",
613 ENOMEM);
614 return(ENOMEM);
615 }
616
617 if (bus_dmamem_alloc(sc->flash_tag, &sc->flash_dma_mem,
618 BUS_DMA_NOWAIT, &sc->flash_map)) {
619 tw_osli_printf(sc, "error = %d",
620 TW_CL_SEVERITY_ERROR_STRING,
621 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
622 0x200E,
623 "Can't allocate DMA'able memory for Common Layer's "
624 "firmware flash",
625 ENOMEM);
626 return(ENOMEM);
627 }
628
629 bus_dmamap_load(sc->flash_tag, sc->flash_map, sc->flash_dma_mem,
630 flash_dma_mem_size, twa_map_load_callback,
631 &sc->flash_dma_mem_phys, 0);
632
633 #endif /* TW_OSL_FLASH_FIRMWARE */
634
635 /*
636 * Create a dma tag for data buffers; size will be the maximum
637 * possible I/O size (128kB).
638 */
639 if (bus_dma_tag_create(sc->parent_tag, /* parent */
640 sc->alignment, /* alignment */
641 0, /* boundary */
642 BUS_SPACE_MAXADDR, /* lowaddr */
643 BUS_SPACE_MAXADDR, /* highaddr */
644 NULL, NULL, /* filter, filterarg */
645 TW_CL_MAX_IO_SIZE, /* maxsize */
646 max_sg_elements, /* nsegments */
647 TW_CL_MAX_IO_SIZE, /* maxsegsize */
648 BUS_DMA_ALLOCNOW, /* flags */
649 twa_busdma_lock, /* lockfunc */
650 sc->io_lock, /* lockfuncarg */
651 &sc->dma_tag /* tag */)) {
652 tw_osli_printf(sc, "error = %d",
653 TW_CL_SEVERITY_ERROR_STRING,
654 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
655 0x200F,
656 "Can't allocate DMA tag for data buffers",
657 ENOMEM);
658 return(ENOMEM);
659 }
660
661 /*
662 * Create a dma tag for ioctl data buffers; size will be the maximum
663 * possible I/O size (128kB).
664 */
665 if (bus_dma_tag_create(sc->parent_tag, /* parent */
666 sc->alignment, /* alignment */
667 0, /* boundary */
668 BUS_SPACE_MAXADDR, /* lowaddr */
669 BUS_SPACE_MAXADDR, /* highaddr */
670 NULL, NULL, /* filter, filterarg */
671 TW_CL_MAX_IO_SIZE, /* maxsize */
672 max_sg_elements, /* nsegments */
673 TW_CL_MAX_IO_SIZE, /* maxsegsize */
674 BUS_DMA_ALLOCNOW, /* flags */
675 twa_busdma_lock, /* lockfunc */
676 sc->io_lock, /* lockfuncarg */
677 &sc->ioctl_tag /* tag */)) {
678 tw_osli_printf(sc, "error = %d",
679 TW_CL_SEVERITY_ERROR_STRING,
680 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
681 0x2010,
682 "Can't allocate DMA tag for ioctl data buffers",
683 ENOMEM);
684 return(ENOMEM);
685 }
686
687 /* Create just one map for all ioctl request data buffers. */
688 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
689 tw_osli_printf(sc, "error = %d",
690 TW_CL_SEVERITY_ERROR_STRING,
691 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
692 0x2011,
693 "Can't create ioctl map",
694 ENOMEM);
695 return(ENOMEM);
696 }
697
698
699 /* Initialize request queues. */
700 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
701 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
702
703 if ((sc->req_ctxt_buf = (struct tw_osli_req_context *)
704 malloc((sizeof(struct tw_osli_req_context) *
705 TW_OSLI_MAX_NUM_IOS),
706 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
707 tw_osli_printf(sc, "error = %d",
708 TW_CL_SEVERITY_ERROR_STRING,
709 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
710 0x2012,
711 "Failed to allocate request packets",
712 ENOMEM);
713 return(ENOMEM);
714 }
715 bzero(sc->req_ctxt_buf,
716 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_IOS);
717
718 for (i = 0; i < TW_OSLI_MAX_NUM_IOS; i++) {
719 req = &(sc->req_ctxt_buf[i]);
720 req->ctlr = sc;
721 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
722 tw_osli_printf(sc, "request # = %d, error = %d",
723 TW_CL_SEVERITY_ERROR_STRING,
724 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
725 0x2013,
726 "Can't create dma map",
727 i, ENOMEM);
728 return(ENOMEM);
729 }
730
731 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
732
733 req->req_pkt.dma_mem = ((TW_INT8 *)(sc->dma_mem)) +
734 (i * per_req_dma_mem_size);
735 req->req_pkt.dma_mem_phys = sc->dma_mem_phys +
736 (i * per_req_dma_mem_size);
737
738 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
739
740 /* Insert request into the free queue. */
741 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
742 }
743
744 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
745
746 sc->dma_mem = ((TW_INT8 *)(sc->dma_mem)) +
747 (TW_OSLI_MAX_NUM_IOS * per_req_dma_mem_size);
748 sc->dma_mem_phys += (TW_OSLI_MAX_NUM_IOS * per_req_dma_mem_size);
749
750 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
751
752 return(0);
753 }
754
755
756
757 /*
758 * Function name: tw_osli_free_resources
759 * Description: Performs clean-up at the time of going down.
760 *
761 * Input: sc -- ptr to OSL internal ctlr context
762 * Output: None
763 * Return value: None
764 */
765 static TW_VOID
766 tw_osli_free_resources(struct twa_softc *sc)
767 {
768 struct tw_osli_req_context *req;
769 TW_INT32 error = 0;
770
771 tw_osli_dbg_dprintf(3, sc, "entered");
772
773 /* Detach from CAM */
774 tw_osli_cam_detach(sc);
775
776 if (sc->req_ctxt_buf)
777 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
778 NULL)
779 if ((error = bus_dmamap_destroy(sc->dma_tag,
780 req->dma_map)))
781 tw_osli_dbg_dprintf(1, sc,
782 "dmamap_destroy(dma) returned %d",
783 error);
784
785 if ((sc->ioctl_tag) && (sc->ioctl_map))
786 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
787 tw_osli_dbg_dprintf(1, sc,
788 "dmamap_destroy(ioctl) returned %d", error);
789
790 /* Free all memory allocated so far. */
791 if (sc->req_ctxt_buf)
792 free(sc->req_ctxt_buf, TW_OSLI_MALLOC_CLASS);
793
794 if (sc->non_dma_mem)
795 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
796
797 if (sc->dma_mem) {
798 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
799 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
800 sc->cmd_map);
801 }
802 if (sc->cmd_tag)
803 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
804 tw_osli_dbg_dprintf(1, sc,
805 "dma_tag_destroy(cmd) returned %d", error);
806
807
808 #ifdef TW_OSL_FLASH_FIRMWARE
809
810 if (sc->flash_dma_mem) {
811 /* In case this piece of memory has already been freed. */
812 bus_dmamap_unload(sc->flash_tag, sc->flash_map);
813 bus_dmamem_free(sc->flash_tag, sc->flash_dma_mem,
814 sc->flash_map);
815 }
816 if (sc->flash_tag)
817 if ((error = bus_dma_tag_destroy(sc->flash_tag)))
818 tw_osli_dbg_dprintf(1, sc,
819 "dma_tag_destroy(flash) returned %d", error);
820
821 #endif /* TW_OSL_FLASH_FIRMWARE */
822
823 if (sc->dma_tag)
824 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
825 tw_osli_dbg_dprintf(1, sc,
826 "dma_tag_destroy(dma) returned %d", error);
827
828 if (sc->ioctl_tag)
829 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
830 tw_osli_dbg_dprintf(1, sc,
831 "dma_tag_destroy(ioctl) returned %d", error);
832
833 if (sc->parent_tag)
834 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
835 tw_osli_dbg_dprintf(1, sc,
836 "dma_tag_destroy(parent) returned %d", error);
837
838
839 /* Disconnect the interrupt handler. */
840 if (sc->intr_handle)
841 if ((error = bus_teardown_intr(sc->bus_dev,
842 sc->irq_res, sc->intr_handle)))
843 tw_osli_dbg_dprintf(1, sc,
844 "teardown_intr returned %d", error);
845
846 if (sc->irq_res != NULL)
847 if ((error = bus_release_resource(sc->bus_dev,
848 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
849 tw_osli_dbg_dprintf(1, sc,
850 "release_resource(irq) returned %d", error);
851
852
853 /* Release the register window mapping. */
854 if (sc->reg_res != NULL)
855 if ((error = bus_release_resource(sc->bus_dev,
856 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
857 tw_osli_dbg_dprintf(1, sc,
858 "release_resource(io) returned %d", error);
859
860
861 /* Destroy the control device. */
862 if (sc->ctrl_dev != (struct cdev *)NULL)
863 destroy_dev(sc->ctrl_dev);
864
865 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
866 tw_osli_dbg_dprintf(1, sc,
867 "sysctl_ctx_free returned %d", error);
868
869 }
870
871
872
873 /*
874 * Function name: twa_detach
875 * Description: Called when the controller is being detached from
876 * the pci bus.
877 *
878 * Input: dev -- bus device corresponding to the ctlr
879 * Output: None
880 * Return value: 0 -- success
881 * non-zero-- failure
882 */
883 static TW_INT32
884 twa_detach(device_t dev)
885 {
886 struct twa_softc *sc = device_get_softc(dev);
887 TW_INT32 error;
888
889 tw_osli_dbg_dprintf(3, sc, "entered");
890
891 error = EBUSY;
892 if (sc->state & TW_OSLI_CTLR_STATE_OPEN) {
893 tw_osli_printf(sc, "error = %d",
894 TW_CL_SEVERITY_ERROR_STRING,
895 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
896 0x2014,
897 "Device open",
898 error);
899 goto out;
900 }
901
902 /* Shut the controller down. */
903 if ((error = twa_shutdown(dev)))
904 goto out;
905
906 /* Free all resources associated with this controller. */
907 tw_osli_free_resources(sc);
908 error = 0;
909
910 out:
911 return(error);
912 }
913
914
915
916 /*
917 * Function name: twa_shutdown
918 * Description: Called at unload/shutdown time. Lets the controller
919 * know that we are going down.
920 *
921 * Input: dev -- bus device corresponding to the ctlr
922 * Output: None
923 * Return value: 0 -- success
924 * non-zero-- failure
925 */
926 static TW_INT32
927 twa_shutdown(device_t dev)
928 {
929 struct twa_softc *sc = device_get_softc(dev);
930 TW_INT32 error = 0;
931
932 tw_osli_dbg_dprintf(3, sc, "entered");
933
934 /* Disconnect from the controller. */
935 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
936 tw_osli_printf(sc, "error = %d",
937 TW_CL_SEVERITY_ERROR_STRING,
938 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
939 0x2015,
940 "Failed to shutdown Common Layer/controller",
941 error);
942 }
943 return(error);
944 }
945
946
947
948 /*
949 * Function name: twa_busdma_lock
950 * Description: Function to provide synchronization during busdma_swi.
951 *
952 * Input: lock_arg -- lock mutex sent as argument
953 * op -- operation (lock/unlock) expected of the function
954 * Output: None
955 * Return value: None
956 */
957 TW_VOID
958 twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
959 {
960 struct mtx *lock;
961
962 lock = (struct mtx *)lock_arg;
963 switch (op) {
964 case BUS_DMA_LOCK:
965 mtx_lock_spin(lock);
966 break;
967
968 case BUS_DMA_UNLOCK:
969 mtx_unlock_spin(lock);
970 break;
971
972 default:
973 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
974 }
975 }
976
977
978
979 /*
980 * Function name: twa_pci_intr
981 * Description: Interrupt handler. Wrapper for twa_interrupt.
982 *
983 * Input: arg -- ptr to OSL internal ctlr context
984 * Output: None
985 * Return value: None
986 */
987 static TW_VOID
988 twa_pci_intr(TW_VOID *arg)
989 {
990 struct twa_softc *sc = (struct twa_softc *)arg;
991
992 tw_osli_dbg_dprintf(10, sc, "entered");
993 if (tw_cl_interrupt(&(sc->ctlr_handle)))
994 #ifdef TW_OSLI_DEFERRED_INTR_USED
995 taskqueue_enqueue_fast(taskqueue_fast,
996 &(sc->deferred_intr_callback));
997 #else /* TW_OSLI_DEFERRED_INTR_USED */
998 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
999 #endif /* TW_OSLI_DEFERRED_INTR_USED */
1000 }
1001
1002
1003
1004 #ifdef TW_OSLI_DEFERRED_INTR_USED
1005
1006 /*
1007 * Function name: twa_deferred_intr
1008 * Description: Deferred interrupt handler.
1009 *
1010 * Input: context -- ptr to OSL internal ctlr context
1011 * pending -- not used
1012 * Output: None
1013 * Return value: None
1014 */
1015 static TW_VOID
1016 twa_deferred_intr(TW_VOID *context, TW_INT32 pending)
1017 {
1018 struct twa_softc *sc = (struct twa_softc *)context;
1019
1020 tw_osli_dbg_dprintf(10, sc, "entered");
1021
1022 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
1023 }
1024
1025 #endif /* TW_OSLI_DEFERRED_INTR_USED */
1026
1027
1028
1029 /*
1030 * Function name: tw_osli_fw_passthru
1031 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
1032 *
1033 * Input: sc -- ptr to OSL internal ctlr context
1034 * buf -- ptr to ioctl pkt understood by CL
1035 * Output: None
1036 * Return value: 0 -- success
1037 * non-zero-- failure
1038 */
1039 TW_INT32
1040 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
1041 {
1042 struct tw_osli_req_context *req;
1043 struct tw_osli_ioctl_no_data_buf *user_buf =
1044 (struct tw_osli_ioctl_no_data_buf *)buf;
1045 TW_TIME end_time;
1046 TW_UINT32 timeout = 60;
1047 TW_UINT32 data_buf_size_adjusted;
1048 struct tw_cl_req_packet *req_pkt;
1049 struct tw_cl_passthru_req_packet *pt_req;
1050 TW_INT32 error;
1051
1052 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
1053
1054 if ((req = tw_osli_get_request(sc)) == NULL)
1055 return(EBUSY);
1056
1057 req->req_handle.osl_req_ctxt = req;
1058 req->orig_req = buf;
1059 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
1060
1061 req_pkt = &(req->req_pkt);
1062 req_pkt->status = 0;
1063 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
1064 /* Let the Common Layer retry the request on cmd queue full. */
1065 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
1066
1067 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1068 /*
1069 * Make sure that the data buffer sent to firmware is a
1070 * 512 byte multiple in size.
1071 */
1072 data_buf_size_adjusted =
1073 (user_buf->driver_pkt.buffer_length +
1074 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
1075 if ((req->length = data_buf_size_adjusted)) {
1076 if ((req->data = malloc(data_buf_size_adjusted,
1077 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
1078 error = ENOMEM;
1079 tw_osli_printf(sc, "error = %d",
1080 TW_CL_SEVERITY_ERROR_STRING,
1081 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1082 0x2016,
1083 "Could not alloc mem for "
1084 "fw_passthru data_buf",
1085 error);
1086 goto fw_passthru_err;
1087 }
1088 /* Copy the payload. */
1089 if ((error = copyin((TW_VOID *)(user_buf->pdata),
1090 req->data,
1091 user_buf->driver_pkt.buffer_length)) != 0) {
1092 tw_osli_printf(sc, "error = %d",
1093 TW_CL_SEVERITY_ERROR_STRING,
1094 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1095 0x2017,
1096 "Could not copyin fw_passthru data_buf",
1097 error);
1098 goto fw_passthru_err;
1099 }
1100 pt_req->sgl_entries = 1; /* will be updated during mapping */
1101 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1102 TW_OSLI_REQ_FLAGS_DATA_OUT);
1103 } else
1104 pt_req->sgl_entries = 0; /* no payload */
1105
1106 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1107 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1108
1109 if ((error = tw_osli_map_request(req)))
1110 goto fw_passthru_err;
1111
1112 end_time = tw_osl_get_local_time() + timeout;
1113 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1114 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1115
1116 error = tsleep(req, PRIBIO, "twa_passthru", timeout * hz);
1117
1118 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1119 error = 0;
1120 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1121
1122 if (! error) {
1123 if (((error = req->error_code)) ||
1124 ((error = (req->state !=
1125 TW_OSLI_REQ_STATE_COMPLETE))) ||
1126 ((error = req_pkt->status)))
1127 goto fw_passthru_err;
1128 break;
1129 }
1130
1131 if (req_pkt->status) {
1132 error = req_pkt->status;
1133 goto fw_passthru_err;
1134 }
1135
1136 if (error == EWOULDBLOCK) {
1137 /* Time out! */
1138 tw_osli_printf(sc, "request = %p",
1139 TW_CL_SEVERITY_ERROR_STRING,
1140 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1141 0x2018,
1142 "Passthru request timed out!",
1143 req);
1144 /*
1145 * Should I check here if the timeout happened
1146 * because of yet another reset, and not do a
1147 * second reset?
1148 */
1149 tw_cl_reset_ctlr(&sc->ctlr_handle);
1150 /*
1151 * Don't touch req after a reset. It (and any
1152 * associated data) will already have been
1153 * freed by the callback. Just return.
1154 */
1155 user_buf->driver_pkt.os_status = error;
1156 return(ETIMEDOUT);
1157 }
1158 /*
1159 * Either the request got completed, or we were woken up by a
1160 * signal. Calculate the new timeout, in case it was the latter.
1161 */
1162 timeout = (end_time - tw_osl_get_local_time());
1163 }
1164
1165 /* If there was a payload, copy it back. */
1166 if ((!error) && (req->length))
1167 if ((error = copyout(req->data, user_buf->pdata,
1168 user_buf->driver_pkt.buffer_length)))
1169 tw_osli_printf(sc, "error = %d",
1170 TW_CL_SEVERITY_ERROR_STRING,
1171 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1172 0x2019,
1173 "Could not copyout fw_passthru data_buf",
1174 error);
1175
1176 fw_passthru_err:
1177 /*
1178 * Print the failure message. For some reason, on certain OS versions,
1179 * printing this error message during reset hangs the display (although
1180 * the rest of the system is running fine. So, don't print it if the
1181 * failure was due to a reset.
1182 */
1183 if ((error) && (error != TW_CL_ERR_REQ_BUS_RESET))
1184 tw_osli_printf(sc, "error = %d",
1185 TW_CL_SEVERITY_ERROR_STRING,
1186 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1187 0x201A,
1188 "Firmware passthru failed!",
1189 error);
1190
1191 user_buf->driver_pkt.os_status = error;
1192 /* Free resources. */
1193 if (req->data)
1194 free(req->data, TW_OSLI_MALLOC_CLASS);
1195 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1196 return(error);
1197 }
1198
1199
1200
1201 /*
1202 * Function name: tw_osl_complete_passthru
1203 * Description: Called to complete passthru requests.
1204 *
1205 * Input: req_handle -- ptr to request handle
1206 * Output: None
1207 * Return value: None
1208 */
1209 TW_VOID
1210 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1211 {
1212 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1213 struct twa_softc *sc = req->ctlr;
1214
1215 tw_osli_dbg_dprintf(5, sc, "entered");
1216
1217 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1218 tw_osli_printf(sc, "request = %p, status = %d",
1219 TW_CL_SEVERITY_ERROR_STRING,
1220 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1221 0x201B,
1222 "Unposted command completed!!",
1223 req, req->state);
1224 }
1225
1226 /*
1227 * Remove request from the busy queue. Just mark it complete.
1228 * There's no need to move it into the complete queue as we are
1229 * going to be done with it right now.
1230 */
1231 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1232 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1233
1234 tw_osli_unmap_request(req);
1235
1236 /*
1237 * Don't do a wake up if there was an error even before the request
1238 * was sent down to the Common Layer, and we hadn't gotten an
1239 * EINPROGRESS. The request originator will then be returned an
1240 * error, and he can do the clean-up.
1241 */
1242 if ((req->error_code) &&
1243 (!(req->state & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1244 return;
1245
1246 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1247 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1248 /* Wake up the sleeping command originator. */
1249 tw_osli_dbg_dprintf(5, sc,
1250 "Waking up originator of request %p", req);
1251 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1252 wakeup_one(req);
1253 } else {
1254 /*
1255 * If the request completed even before tsleep
1256 * was called, simply return.
1257 */
1258 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1259 return;
1260
1261 tw_osli_printf(sc, "request = %p",
1262 TW_CL_SEVERITY_ERROR_STRING,
1263 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1264 0x201C,
1265 "Passthru callback called, "
1266 "and caller not sleeping",
1267 req);
1268 }
1269 } else {
1270 tw_osli_printf(sc, "request = %p",
1271 TW_CL_SEVERITY_ERROR_STRING,
1272 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1273 0x201D,
1274 "Passthru callback called for non-passthru request",
1275 req);
1276 }
1277 }
1278
1279
1280
1281 /*
1282 * Function name: tw_osli_get_request
1283 * Description: Gets a request pkt from the free queue.
1284 *
1285 * Input: sc -- ptr to OSL internal ctlr context
1286 * Output: None
1287 * Return value: ptr to request pkt -- success
1288 * NULL -- failure
1289 */
1290 struct tw_osli_req_context *
1291 tw_osli_get_request(struct twa_softc *sc)
1292 {
1293 struct tw_osli_req_context *req;
1294
1295 tw_osli_dbg_dprintf(4, sc, "entered");
1296
1297 /* Get a free request packet. */
1298 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1299
1300 /* Initialize some fields to their defaults. */
1301 if (req) {
1302 req->req_handle.osl_req_ctxt = NULL;
1303 req->req_handle.cl_req_ctxt = NULL;
1304 req->data = NULL;
1305 req->length = 0;
1306 req->real_data = NULL;
1307 req->real_length = 0;
1308 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1309 req->flags = 0;
1310 req->error_code = 0;
1311 req->orig_req = NULL;
1312
1313 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
1314
1315 /* Don't zero dma_mem & dma_mem_phys in req_pkt. */
1316 req->req_pkt.cmd = 0;
1317 req->req_pkt.flags = 0;
1318 req->req_pkt.status = 0;
1319 req->req_pkt.tw_osl_callback = NULL;
1320 bzero(&(req->req_pkt.gen_req_pkt),
1321 sizeof(req->req_pkt.gen_req_pkt));
1322
1323 #else /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
1324
1325 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1326
1327 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
1328 }
1329 return(req);
1330 }
1331
1332
1333
1334 /*
1335 * Function name: twa_map_load_data_callback
1336 * Description: Callback of bus_dmamap_load for the buffer associated
1337 * with data. Updates the cmd pkt (size/sgl_entries
1338 * fields, as applicable) to reflect the number of sg
1339 * elements.
1340 *
1341 * Input: arg -- ptr to OSL internal request context
1342 * segs -- ptr to a list of segment descriptors
1343 * nsegments--# of segments
1344 * error -- 0 if no errors encountered before callback,
1345 * non-zero if errors were encountered
1346 * Output: None
1347 * Return value: None
1348 */
1349 static TW_VOID
1350 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1351 TW_INT32 nsegments, TW_INT32 error)
1352 {
1353 struct tw_osli_req_context *req =
1354 (struct tw_osli_req_context *)arg;
1355 struct twa_softc *sc = req->ctlr;
1356 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1357
1358 tw_osli_dbg_dprintf(10, sc, "entered");
1359
1360 /* Mark the request as currently being processed. */
1361 req->state = TW_OSLI_REQ_STATE_BUSY;
1362 /* Move the request into the busy queue. */
1363 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1364
1365 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1366 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1367 tw_osli_allow_new_requests(sc, (TW_VOID *)(req->orig_req));
1368
1369 if (error == EFBIG) {
1370 req->error_code = error;
1371 goto out;
1372 }
1373
1374 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1375 struct tw_cl_passthru_req_packet *pt_req;
1376
1377 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1378 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1379 BUS_DMASYNC_PREREAD);
1380
1381 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1382 /*
1383 * If we're using an alignment buffer, and we're
1384 * writing data, copy the real data out.
1385 */
1386 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1387 bcopy(req->real_data, req->data, req->real_length);
1388 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1389 BUS_DMASYNC_PREWRITE);
1390 }
1391
1392 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1393 pt_req->sg_list = (TW_UINT8 *)segs;
1394 pt_req->sgl_entries += (nsegments - 1);
1395 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1396 &(req->req_handle));
1397 } else {
1398 struct tw_cl_scsi_req_packet *scsi_req;
1399
1400 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1401 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1402 BUS_DMASYNC_PREREAD);
1403
1404 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1405 /*
1406 * If we're using an alignment buffer, and we're
1407 * writing data, copy the real data out.
1408 */
1409 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1410 bcopy(req->real_data, req->data, req->real_length);
1411 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1412 BUS_DMASYNC_PREWRITE);
1413 }
1414
1415 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1416 scsi_req->sg_list = (TW_UINT8 *)segs;
1417 scsi_req->sgl_entries += (nsegments - 1);
1418 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1419 &(req->req_handle));
1420 }
1421
1422 out:
1423 if (error) {
1424 req->error_code = error;
1425 req_pkt->tw_osl_callback(&(req->req_handle));
1426 /*
1427 * If the caller had been returned EINPROGRESS, and he has
1428 * registered a callback for handling completion, the callback
1429 * will never get called because we were unable to submit the
1430 * request. So, free up the request right here.
1431 */
1432 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1433 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1434 }
1435 }
1436
1437
1438
1439 /*
1440 * Function name: twa_map_load_callback
1441 * Description: Callback of bus_dmamap_load for the buffer associated
1442 * with a cmd pkt.
1443 *
1444 * Input: arg -- ptr to variable to hold phys addr
1445 * segs -- ptr to a list of segment descriptors
1446 * nsegments--# of segments
1447 * error -- 0 if no errors encountered before callback,
1448 * non-zero if errors were encountered
1449 * Output: None
1450 * Return value: None
1451 */
1452 static TW_VOID
1453 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1454 TW_INT32 nsegments, TW_INT32 error)
1455 {
1456 *((bus_addr_t *)arg) = segs[0].ds_addr;
1457 }
1458
1459
1460
1461 /*
1462 * Function name: tw_osli_map_request
1463 * Description: Maps a cmd pkt and data associated with it, into
1464 * DMA'able memory.
1465 *
1466 * Input: req -- ptr to request pkt
1467 * Output: None
1468 * Return value: 0 -- success
1469 * non-zero-- failure
1470 */
1471 TW_INT32
1472 tw_osli_map_request(struct tw_osli_req_context *req)
1473 {
1474 struct twa_softc *sc = req->ctlr;
1475 TW_INT32 error = 0;
1476
1477 tw_osli_dbg_dprintf(10, sc, "entered");
1478
1479 /* If the command involves data, map that too. */
1480 if (req->data != NULL) {
1481 /*
1482 * It's sufficient for the data pointer to be 4-byte aligned
1483 * to work with 9000. However, if 4-byte aligned addresses
1484 * are passed to bus_dmamap_load, we can get back sg elements
1485 * that are not 512-byte multiples in size. So, we will let
1486 * only those buffers that are 512-byte aligned to pass
1487 * through, and bounce the rest, so as to make sure that we
1488 * always get back sg elements that are 512-byte multiples
1489 * in size.
1490 */
1491 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1492 (req->length % sc->sg_size_factor)) {
1493 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1494 /* Save original data pointer and length. */
1495 req->real_data = req->data;
1496 req->real_length = req->length;
1497 req->length = (req->length +
1498 (sc->sg_size_factor - 1)) &
1499 ~(sc->sg_size_factor - 1);
1500 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1501 M_NOWAIT);
1502 if (req->data == NULL) {
1503 tw_osli_printf(sc, "error = %d",
1504 TW_CL_SEVERITY_ERROR_STRING,
1505 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1506 0x201E,
1507 "Failed to allocate memory "
1508 "for bounce buffer",
1509 ENOMEM);
1510 /* Restore original data pointer and length. */
1511 req->data = req->real_data;
1512 req->length = req->real_length;
1513 return(ENOMEM);
1514 }
1515 }
1516
1517 /*
1518 * Map the data buffer into bus space and build the SG list.
1519 */
1520 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1521 /* Lock against multiple simultaneous ioctl calls. */
1522 mtx_lock_spin(sc->io_lock);
1523 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1524 req->data, req->length,
1525 twa_map_load_data_callback, req,
1526 BUS_DMA_WAITOK);
1527 mtx_unlock_spin(sc->io_lock);
1528 } else {
1529 /*
1530 * There's only one CAM I/O thread running at a time.
1531 * So, there's no need to hold the io_lock.
1532 */
1533 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1534 req->data, req->length,
1535 twa_map_load_data_callback, req,
1536 BUS_DMA_WAITOK);
1537 }
1538
1539 if (!error)
1540 error = req->error_code;
1541 else {
1542 if (error == EINPROGRESS) {
1543 /*
1544 * Specifying sc->io_lock as the lockfuncarg
1545 * in ...tag_create should protect the access
1546 * of ...FLAGS_MAPPED from the callback.
1547 */
1548 mtx_lock_spin(sc->io_lock);
1549 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) {
1550 req->flags |=
1551 TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1552 tw_osli_disallow_new_requests(sc);
1553 }
1554 mtx_unlock_spin(sc->io_lock);
1555 error = 0;
1556 } else {
1557 /* Free alignment buffer if it was used. */
1558 if (req->flags &
1559 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1560 free(req->data, TW_OSLI_MALLOC_CLASS);
1561 /*
1562 * Restore original data pointer
1563 * and length.
1564 */
1565 req->data = req->real_data;
1566 req->length = req->real_length;
1567 }
1568 }
1569 }
1570
1571 } else {
1572 /* Mark the request as currently being processed. */
1573 req->state = TW_OSLI_REQ_STATE_BUSY;
1574 /* Move the request into the busy queue. */
1575 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1576 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1577 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1578 &(req->req_pkt), &(req->req_handle));
1579 else
1580 error = tw_cl_start_io(&sc->ctlr_handle,
1581 &(req->req_pkt), &(req->req_handle));
1582 if (error) {
1583 req->error_code = error;
1584 req->req_pkt.tw_osl_callback(&(req->req_handle));
1585 }
1586 }
1587 return(error);
1588 }
1589
1590
1591
1592 /*
1593 * Function name: tw_osli_unmap_request
1594 * Description: Undoes the mapping done by tw_osli_map_request.
1595 *
1596 * Input: req -- ptr to request pkt
1597 * Output: None
1598 * Return value: None
1599 */
1600 TW_VOID
1601 tw_osli_unmap_request(struct tw_osli_req_context *req)
1602 {
1603 struct twa_softc *sc = req->ctlr;
1604
1605 tw_osli_dbg_dprintf(10, sc, "entered");
1606
1607 /* If the command involved data, unmap that too. */
1608 if (req->data != NULL) {
1609 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1610 /* Lock against multiple simultaneous ioctl calls. */
1611 mtx_lock_spin(sc->io_lock);
1612
1613 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1614 bus_dmamap_sync(sc->ioctl_tag,
1615 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1616
1617 /*
1618 * If we are using a bounce buffer, and we are
1619 * reading data, copy the real data in.
1620 */
1621 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1622 bcopy(req->data, req->real_data,
1623 req->real_length);
1624 }
1625
1626 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1627 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1628 BUS_DMASYNC_POSTWRITE);
1629
1630 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1631
1632 mtx_unlock_spin(sc->io_lock);
1633 } else {
1634 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1635 bus_dmamap_sync(sc->dma_tag,
1636 req->dma_map, BUS_DMASYNC_POSTREAD);
1637
1638 /*
1639 * If we are using a bounce buffer, and we are
1640 * reading data, copy the real data in.
1641 */
1642 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1643 bcopy(req->data, req->real_data,
1644 req->real_length);
1645 }
1646 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1647 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1648 BUS_DMASYNC_POSTWRITE);
1649
1650 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1651 }
1652 }
1653
1654 /* Free alignment buffer if it was used. */
1655 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1656 free(req->data, TW_OSLI_MALLOC_CLASS);
1657 /* Restore original data pointer and length. */
1658 req->data = req->real_data;
1659 req->length = req->real_length;
1660 }
1661 }
1662
1663
1664
1665 #ifdef TW_OSL_DEBUG
1666
1667 TW_VOID twa_report_stats(TW_VOID);
1668 TW_VOID twa_reset_stats(TW_VOID);
1669 TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1670 TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1671
1672
1673 /*
1674 * Function name: twa_report_stats
1675 * Description: For being called from ddb. Calls functions that print
1676 * OSL and CL internal stats for the controller.
1677 *
1678 * Input: None
1679 * Output: None
1680 * Return value: None
1681 */
1682 TW_VOID
1683 twa_report_stats(TW_VOID)
1684 {
1685 struct twa_softc *sc;
1686 TW_INT32 i;
1687
1688 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1689 tw_osli_print_ctlr_stats(sc);
1690 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1691 }
1692 }
1693
1694
1695
1696 /*
1697 * Function name: tw_osli_print_ctlr_stats
1698 * Description: For being called from ddb. Prints OSL controller stats
1699 *
1700 * Input: sc -- ptr to OSL internal controller context
1701 * Output: None
1702 * Return value: None
1703 */
1704 TW_VOID
1705 tw_osli_print_ctlr_stats(struct twa_softc *sc)
1706 {
1707 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1708 twa_printf(sc, "OSLq type current max\n");
1709 twa_printf(sc, "free %04d %04d\n",
1710 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1711 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1712 twa_printf(sc, "busy %04d %04d\n",
1713 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1714 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1715 }
1716
1717
1718
1719 /*
1720 * Function name: twa_print_req_info
1721 * Description: For being called from ddb. Calls functions that print
1722 * OSL and CL internal details for the request.
1723 *
1724 * Input: req -- ptr to OSL internal request context
1725 * Output: None
1726 * Return value: None
1727 */
1728 TW_VOID
1729 twa_print_req_info(struct tw_osli_req_context *req)
1730 {
1731 struct twa_softc *sc = req->ctlr;
1732
1733 twa_printf(sc, "OSL details for request:\n");
1734 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1735 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1736 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1737 "next_req = %p, prev_req = %p, dma_map = %p\n",
1738 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1739 req->data, req->length, req->real_data, req->real_length,
1740 req->state, req->flags, req->error_code, req->orig_req,
1741 req->link.next, req->link.prev, req->dma_map);
1742 tw_cl_print_req_info(&(req->req_handle));
1743 }
1744
1745
1746
1747 /*
1748 * Function name: twa_reset_stats
1749 * Description: For being called from ddb.
1750 * Resets some OSL controller stats.
1751 *
1752 * Input: None
1753 * Output: None
1754 * Return value: None
1755 */
1756 TW_VOID
1757 twa_reset_stats(TW_VOID)
1758 {
1759 struct twa_softc *sc;
1760 TW_INT32 i;
1761
1762 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1763 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1764 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1765 tw_cl_reset_stats(&sc->ctlr_handle);
1766 }
1767 }
1768
1769 #endif /* TW_OSL_DEBUG */
Cache object: 0709b7afd1bb18e9c07c6d1c27bde17f
|