1 /*
2 * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/6.2/sys/dev/twa/tw_osl_freebsd.c 165449 2006-12-21 21:00:52Z mjacob $
30 */
31
32 /*
33 * AMCC'S 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 */
37
38
39 /*
40 * FreeBSD specific functions not related to CAM, and other
41 * miscellaneous functions.
42 */
43
44
45 #include <dev/twa/tw_osl_includes.h>
46 #include <dev/twa/tw_cl_fwif.h>
47 #include <dev/twa/tw_cl_ioctl.h>
48 #include <dev/twa/tw_osl_ioctl.h>
49
50 #ifdef TW_OSL_DEBUG
51 TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
52 TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
53 #endif /* TW_OSL_DEBUG */
54
55 MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
56
57
58 static d_open_t twa_open;
59 static d_close_t twa_close;
60 static d_ioctl_t twa_ioctl;
61
62 static struct cdevsw twa_cdevsw = {
63 .d_version = D_VERSION,
64 .d_open = twa_open,
65 .d_close = twa_close,
66 .d_ioctl = twa_ioctl,
67 .d_name = "twa",
68 };
69
70 static devclass_t twa_devclass;
71
72
73 /*
74 * Function name: twa_open
75 * Description: Called when the controller is opened.
76 * Simply marks the controller as open.
77 *
78 * Input: dev -- control device corresponding to the ctlr
79 * flags -- mode of open
80 * fmt -- device type (character/block etc.)
81 * proc -- current process
82 * Output: None
83 * Return value: 0 -- success
84 * non-zero-- failure
85 */
86 static TW_INT32
87 twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
88 {
89 TW_INT32 unit = minor(dev);
90 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
91
92 tw_osli_dbg_dprintf(5, sc, "entered");
93 sc->state |= TW_OSLI_CTLR_STATE_OPEN;
94 return(0);
95 }
96
97
98
99 /*
100 * Function name: twa_close
101 * Description: Called when the controller is closed.
102 * Simply marks the controller as not open.
103 *
104 * Input: dev -- control device corresponding to the ctlr
105 * flags -- mode of corresponding open
106 * fmt -- device type (character/block etc.)
107 * proc -- current process
108 * Output: None
109 * Return value: 0 -- success
110 * non-zero-- failure
111 */
112 static TW_INT32
113 twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
114 {
115 TW_INT32 unit = minor(dev);
116 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
117
118 tw_osli_dbg_dprintf(5, sc, "entered");
119 sc->state &= ~TW_OSLI_CTLR_STATE_OPEN;
120 return(0);
121 }
122
123
124
125 /*
126 * Function name: twa_ioctl
127 * Description: Called when an ioctl is posted to the controller.
128 * Handles any OS Layer specific cmds, passes the rest
129 * on to the Common Layer.
130 *
131 * Input: dev -- control device corresponding to the ctlr
132 * cmd -- ioctl cmd
133 * buf -- ptr to buffer in kernel memory, which is
134 * a copy of the input buffer in user-space
135 * flags -- mode of corresponding open
136 * proc -- current process
137 * Output: buf -- ptr to buffer in kernel memory, which will
138 * be copied to the output buffer in user-space
139 * Return value: 0 -- success
140 * non-zero-- failure
141 */
142 static TW_INT32
143 twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, d_thread_t *proc)
144 {
145 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
146 TW_INT32 error;
147
148 tw_osli_dbg_dprintf(5, sc, "entered");
149
150 switch (cmd) {
151 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
152 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
153 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
154 break;
155
156 case TW_OSL_IOCTL_SCAN_BUS:
157 /* Request CAM for a bus scan. */
158 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
159 error = tw_osli_request_bus_scan(sc);
160 break;
161
162 default:
163 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
164 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
165 break;
166 }
167 return(error);
168 }
169
170
171
172 static TW_INT32 twa_probe(device_t dev);
173 static TW_INT32 twa_attach(device_t dev);
174 static TW_INT32 twa_detach(device_t dev);
175 static TW_INT32 twa_shutdown(device_t dev);
176 static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
177 static TW_VOID twa_pci_intr(TW_VOID *arg);
178 #ifdef TW_OSLI_DEFERRED_INTR_USED
179 static TW_VOID twa_deferred_intr(TW_VOID *context, TW_INT32 pending);
180 #endif /* TW_OSLI_DEFERRED_INTR_USED */
181
182 static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
183 static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
184
185 static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
186 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
187 static TW_VOID twa_map_load_callback(TW_VOID *arg,
188 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
189
190
191 static device_method_t twa_methods[] = {
192 /* Device interface */
193 DEVMETHOD(device_probe, twa_probe),
194 DEVMETHOD(device_attach, twa_attach),
195 DEVMETHOD(device_detach, twa_detach),
196 DEVMETHOD(device_shutdown, twa_shutdown),
197
198 DEVMETHOD(bus_print_child, bus_generic_print_child),
199 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
200 {0, 0}
201 };
202
203 static driver_t twa_pci_driver = {
204 "twa",
205 twa_methods,
206 sizeof(struct twa_softc)
207 };
208
209 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
210
211 MODULE_DEPEND(twa, pci, 1, 1, 1);
212 MODULE_DEPEND(twa, cam, 1, 1, 1);
213
214
215 /*
216 * Function name: twa_probe
217 * Description: Called at driver load time. Claims 9000 ctlrs.
218 *
219 * Input: dev -- bus device corresponding to the ctlr
220 * Output: None
221 * Return value: <= 0 -- success
222 * > 0 -- failure
223 */
224 static TW_INT32
225 twa_probe(device_t dev)
226 {
227 static TW_UINT8 first_ctlr = 1;
228
229 tw_osli_dbg_printf(3, "entered");
230
231 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
232 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
233 /* Print the driver version only once. */
234 if (first_ctlr) {
235 printf("3ware device driver for 9000 series storage "
236 "controllers, version: %s\n",
237 TW_OSL_DRIVER_VERSION_STRING);
238 first_ctlr = 0;
239 }
240 return(0);
241 }
242 return(ENXIO);
243 }
244
245
246
247 /*
248 * Function name: twa_attach
249 * Description: Allocates pci resources; updates sc; adds a node to the
250 * sysctl tree to expose the driver version; makes calls
251 * (to the Common Layer) to initialize ctlr, and to
252 * attach to CAM.
253 *
254 * Input: dev -- bus device corresponding to the ctlr
255 * Output: None
256 * Return value: 0 -- success
257 * non-zero-- failure
258 */
259 static TW_INT32
260 twa_attach(device_t dev)
261 {
262 struct twa_softc *sc = device_get_softc(dev);
263 TW_UINT32 command;
264 TW_INT32 bar_num;
265 TW_INT32 bar0_offset;
266 TW_INT32 bar_size;
267 TW_INT32 error;
268
269 tw_osli_dbg_dprintf(3, sc, "entered");
270
271 sc->ctlr_handle.osl_ctlr_ctxt = sc;
272
273 /* Initialize the softc structure. */
274 sc->bus_dev = dev;
275 sc->device_id = pci_get_device(dev);
276
277 /* Initialize the mutexes right here. */
278 sc->io_lock = &(sc->io_lock_handle);
279 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
280 sc->q_lock = &(sc->q_lock_handle);
281 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
282
283 sysctl_ctx_init(&sc->sysctl_ctxt);
284 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
285 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
286 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
287 if (sc->sysctl_tree == NULL) {
288 tw_osli_printf(sc, "error = %d",
289 TW_CL_SEVERITY_ERROR_STRING,
290 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
291 0x2000,
292 "Cannot add sysctl tree node",
293 ENXIO);
294 return(ENXIO);
295 }
296 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
297 OID_AUTO, "driver_version", CTLFLAG_RD,
298 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
299
300 /* Make sure we are going to be able to talk to this board. */
301 command = pci_read_config(dev, PCIR_COMMAND, 2);
302 if ((command & PCIM_CMD_PORTEN) == 0) {
303 tw_osli_printf(sc, "error = %d",
304 TW_CL_SEVERITY_ERROR_STRING,
305 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
306 0x2001,
307 "Register window not available",
308 ENXIO);
309 tw_osli_free_resources(sc);
310 return(ENXIO);
311 }
312
313 /* Force the busmaster enable bit on, in case the BIOS forgot. */
314 command |= PCIM_CMD_BUSMASTEREN;
315 pci_write_config(dev, PCIR_COMMAND, command, 2);
316
317 /* Allocate the PCI register window. */
318 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
319 &bar_num, &bar0_offset, &bar_size))) {
320 tw_osli_printf(sc, "error = %d",
321 TW_CL_SEVERITY_ERROR_STRING,
322 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
323 0x201F,
324 "Can't get PCI BAR info",
325 error);
326 tw_osli_free_resources(sc);
327 return(error);
328 }
329 sc->reg_res_id = PCIR_BARS + bar0_offset;
330 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
331 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
332 == NULL) {
333 tw_osli_printf(sc, "error = %d",
334 TW_CL_SEVERITY_ERROR_STRING,
335 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
336 0x2002,
337 "Can't allocate register window",
338 ENXIO);
339 tw_osli_free_resources(sc);
340 return(ENXIO);
341 }
342 sc->bus_tag = rman_get_bustag(sc->reg_res);
343 sc->bus_handle = rman_get_bushandle(sc->reg_res);
344
345 /* Allocate and register our interrupt. */
346 sc->irq_res_id = 0;
347 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
348 &(sc->irq_res_id), 0, ~0, 1,
349 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
350 tw_osli_printf(sc, "error = %d",
351 TW_CL_SEVERITY_ERROR_STRING,
352 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
353 0x2003,
354 "Can't allocate interrupt",
355 ENXIO);
356 tw_osli_free_resources(sc);
357 return(ENXIO);
358 }
359 if ((error = bus_setup_intr(sc->bus_dev, sc->irq_res,
360 ((mp_ncpus > 1) ? (INTR_MPSAFE
361 #ifdef TW_OSLI_DEFERRED_INTR_USED
362 | INTR_FAST
363 #endif /* TW_OSLI_DEFERRED_INTR_USED */
364 ) : 0) | INTR_TYPE_CAM,
365 twa_pci_intr, sc, &sc->intr_handle))) {
366 tw_osli_printf(sc, "error = %d",
367 TW_CL_SEVERITY_ERROR_STRING,
368 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
369 0x2004,
370 "Can't set up interrupt",
371 error);
372 tw_osli_free_resources(sc);
373 return(error);
374 }
375
376 #ifdef TW_OSLI_DEFERRED_INTR_USED
377 TASK_INIT(&sc->deferred_intr_callback, 0, twa_deferred_intr, sc);
378 #endif /* TW_OSLI_DEFERRED_INTR_USED */
379
380 if ((error = tw_osli_alloc_mem(sc))) {
381 tw_osli_printf(sc, "error = %d",
382 TW_CL_SEVERITY_ERROR_STRING,
383 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
384 0x2005,
385 "Memory allocation failure",
386 error);
387 tw_osli_free_resources(sc);
388 return(error);
389 }
390
391 /* Initialize the Common Layer for this controller. */
392 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
393 TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
394 sc->non_dma_mem, sc->dma_mem,
395 sc->dma_mem_phys
396 #ifdef TW_OSL_FLASH_FIRMWARE
397 , sc->flash_dma_mem, sc->flash_dma_mem_phys
398 #endif /* TW_OSL_FLASH_FIRMWARE */
399 ))) {
400 tw_osli_printf(sc, "error = %d",
401 TW_CL_SEVERITY_ERROR_STRING,
402 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
403 0x2006,
404 "Failed to initialize Common Layer/controller",
405 error);
406 tw_osli_free_resources(sc);
407 return(error);
408 }
409
410 #ifdef TW_OSL_FLASH_FIRMWARE
411 /* Free any memory allocated for firmware flashing. */
412 if (sc->flash_dma_mem) {
413 bus_dmamap_unload(sc->flash_tag, sc->flash_map);
414 bus_dmamem_free(sc->flash_tag, sc->flash_dma_mem,
415 sc->flash_map);
416 }
417 if (sc->flash_tag)
418 bus_dma_tag_destroy(sc->flash_tag);
419 /*
420 * Set flash_tag and flash_dma_mem to 0, so we don't try freeing them
421 * again, later.
422 */
423 sc->flash_tag = 0;
424 sc->flash_dma_mem = 0;
425 #endif /* TW_OSL_FLASH_FIRMWARE */
426
427 /* Create the control device. */
428 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
429 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
430 "twa%d", device_get_unit(sc->bus_dev));
431 sc->ctrl_dev->si_drv1 = sc;
432
433 if ((error = tw_osli_cam_attach(sc))) {
434 tw_osli_free_resources(sc);
435 tw_osli_printf(sc, "error = %d",
436 TW_CL_SEVERITY_ERROR_STRING,
437 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
438 0x2007,
439 "Failed to initialize CAM",
440 error);
441 return(error);
442 }
443
444 return(0);
445 }
446
447
448
449 /*
450 * Function name: tw_osli_alloc_mem
451 * Description: Allocates memory needed both by CL and OSL.
452 *
453 * Input: sc -- OSL internal controller context
454 * Output: None
455 * Return value: 0 -- success
456 * non-zero-- failure
457 */
458 static TW_INT32
459 tw_osli_alloc_mem(struct twa_softc *sc)
460 {
461 struct tw_osli_req_context *req;
462 TW_UINT32 max_sg_elements;
463 TW_UINT32 non_dma_mem_size;
464 TW_UINT32 dma_mem_size;
465 #ifdef TW_OSL_FLASH_FIRMWARE
466 TW_UINT32 flash_dma_mem_size;
467 #endif /* TW_OSL_FLASH_FIRMWARE */
468 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
469 TW_UINT32 per_req_dma_mem_size;
470 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
471 TW_INT32 error;
472 TW_INT32 i;
473
474 tw_osli_dbg_dprintf(3, sc, "entered");
475
476 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
477 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
478 #ifdef TW_OSL_FLASH_FIRMWARE
479 sc->flags |= TW_CL_FLASH_FIRMWARE;
480 #endif /* TW_OSL_FLASH_FIRMWARE */
481 #ifdef TW_OSLI_DEFERRED_INTR_USED
482 sc->flags |= TW_CL_DEFERRED_INTR_USED;
483 #endif /* TW_OSLI_DEFERRED_INTR_USED */
484
485 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
486 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
487
488 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
489 sc->device_id, TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
490 &(sc->alignment), &(sc->sg_size_factor),
491 &non_dma_mem_size, &dma_mem_size
492 #ifdef TW_OSL_FLASH_FIRMWARE
493 , &flash_dma_mem_size
494 #endif /* TW_OSL_FLASH_FIRMWARE */
495 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
496 , &per_req_dma_mem_size
497 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
498 ))) {
499 tw_osli_printf(sc, "error = %d",
500 TW_CL_SEVERITY_ERROR_STRING,
501 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
502 0x2008,
503 "Can't get Common Layer's memory requirements",
504 error);
505 return(error);
506 }
507
508 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
509 M_WAITOK)) == NULL) {
510 tw_osli_printf(sc, "error = %d",
511 TW_CL_SEVERITY_ERROR_STRING,
512 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
513 0x2009,
514 "Can't allocate non-dma memory",
515 ENOMEM);
516 return(ENOMEM);
517 }
518
519 /* Create the parent dma tag. */
520 if (bus_dma_tag_create(NULL, /* parent */
521 sc->alignment, /* alignment */
522 0, /* boundary */
523 BUS_SPACE_MAXADDR, /* lowaddr */
524 BUS_SPACE_MAXADDR, /* highaddr */
525 NULL, NULL, /* filter, filterarg */
526 TW_CL_MAX_IO_SIZE, /* maxsize */
527 max_sg_elements, /* nsegments */
528 TW_CL_MAX_IO_SIZE, /* maxsegsize */
529 0, /* flags */
530 NULL, /* lockfunc */
531 NULL, /* lockfuncarg */
532 &sc->parent_tag /* tag */)) {
533 tw_osli_printf(sc, "error = %d",
534 TW_CL_SEVERITY_ERROR_STRING,
535 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
536 0x200A,
537 "Can't allocate parent DMA tag",
538 ENOMEM);
539 return(ENOMEM);
540 }
541
542 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
543 if (bus_dma_tag_create(sc->parent_tag, /* parent */
544 sc->alignment, /* alignment */
545 0, /* boundary */
546 BUS_SPACE_MAXADDR, /* lowaddr */
547 BUS_SPACE_MAXADDR, /* highaddr */
548 NULL, NULL, /* filter, filterarg */
549 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
550 (TW_OSLI_MAX_NUM_IOS *
551 per_req_dma_mem_size) +
552 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
553 dma_mem_size, /* maxsize */
554 1, /* nsegments */
555 BUS_SPACE_MAXSIZE, /* maxsegsize */
556 0, /* flags */
557 NULL, /* lockfunc */
558 NULL, /* lockfuncarg */
559 &sc->cmd_tag /* tag */)) {
560 tw_osli_printf(sc, "error = %d",
561 TW_CL_SEVERITY_ERROR_STRING,
562 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
563 0x200B,
564 "Can't allocate DMA tag for Common Layer's "
565 "DMA'able memory",
566 ENOMEM);
567 return(ENOMEM);
568 }
569
570 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
571 BUS_DMA_NOWAIT, &sc->cmd_map)) {
572 /* Try a second time. */
573 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
574 BUS_DMA_NOWAIT, &sc->cmd_map)) {
575 tw_osli_printf(sc, "error = %d",
576 TW_CL_SEVERITY_ERROR_STRING,
577 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
578 0x200C,
579 "Can't allocate DMA'able memory for the"
580 "Common Layer",
581 ENOMEM);
582 return(ENOMEM);
583 }
584 }
585
586 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
587 dma_mem_size, twa_map_load_callback,
588 &sc->dma_mem_phys, 0);
589
590
591 #ifdef TW_OSL_FLASH_FIRMWARE
592 /*
593 * Create a dma tag for Common Layer's DMA'able memory,
594 * used to flash firmware (flash_dma_mem).
595 */
596 if (bus_dma_tag_create(sc->parent_tag, /* parent */
597 sc->alignment, /* alignment */
598 0, /* boundary */
599 BUS_SPACE_MAXADDR, /* lowaddr */
600 BUS_SPACE_MAXADDR, /* highaddr */
601 NULL, NULL, /* filter, filterarg */
602 flash_dma_mem_size, /* maxsize */
603 1, /* nsegments */
604 flash_dma_mem_size, /* maxsegsize */
605 0, /* flags */
606 NULL, /* lockfunc */
607 NULL, /* lockfuncarg */
608 &sc->flash_tag /* tag */)) {
609 tw_osli_printf(sc, "error = %d",
610 TW_CL_SEVERITY_ERROR_STRING,
611 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
612 0x200D,
613 "Can't allocate DMA tag for Common Layer's "
614 "firmware flash memory",
615 ENOMEM);
616 return(ENOMEM);
617 }
618
619 if (bus_dmamem_alloc(sc->flash_tag, &sc->flash_dma_mem,
620 BUS_DMA_NOWAIT, &sc->flash_map)) {
621 tw_osli_printf(sc, "error = %d",
622 TW_CL_SEVERITY_ERROR_STRING,
623 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
624 0x200E,
625 "Can't allocate DMA'able memory for Common Layer's "
626 "firmware flash",
627 ENOMEM);
628 return(ENOMEM);
629 }
630
631 bus_dmamap_load(sc->flash_tag, sc->flash_map, sc->flash_dma_mem,
632 flash_dma_mem_size, twa_map_load_callback,
633 &sc->flash_dma_mem_phys, 0);
634
635 #endif /* TW_OSL_FLASH_FIRMWARE */
636
637 /*
638 * Create a dma tag for data buffers; size will be the maximum
639 * possible I/O size (128kB).
640 */
641 if (bus_dma_tag_create(sc->parent_tag, /* parent */
642 sc->alignment, /* alignment */
643 0, /* boundary */
644 BUS_SPACE_MAXADDR, /* lowaddr */
645 BUS_SPACE_MAXADDR, /* highaddr */
646 NULL, NULL, /* filter, filterarg */
647 TW_CL_MAX_IO_SIZE, /* maxsize */
648 max_sg_elements, /* nsegments */
649 TW_CL_MAX_IO_SIZE, /* maxsegsize */
650 BUS_DMA_ALLOCNOW, /* flags */
651 twa_busdma_lock, /* lockfunc */
652 sc->io_lock, /* lockfuncarg */
653 &sc->dma_tag /* tag */)) {
654 tw_osli_printf(sc, "error = %d",
655 TW_CL_SEVERITY_ERROR_STRING,
656 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
657 0x200F,
658 "Can't allocate DMA tag for data buffers",
659 ENOMEM);
660 return(ENOMEM);
661 }
662
663 /*
664 * Create a dma tag for ioctl data buffers; size will be the maximum
665 * possible I/O size (128kB).
666 */
667 if (bus_dma_tag_create(sc->parent_tag, /* parent */
668 sc->alignment, /* alignment */
669 0, /* boundary */
670 BUS_SPACE_MAXADDR, /* lowaddr */
671 BUS_SPACE_MAXADDR, /* highaddr */
672 NULL, NULL, /* filter, filterarg */
673 TW_CL_MAX_IO_SIZE, /* maxsize */
674 max_sg_elements, /* nsegments */
675 TW_CL_MAX_IO_SIZE, /* maxsegsize */
676 BUS_DMA_ALLOCNOW, /* flags */
677 twa_busdma_lock, /* lockfunc */
678 sc->io_lock, /* lockfuncarg */
679 &sc->ioctl_tag /* tag */)) {
680 tw_osli_printf(sc, "error = %d",
681 TW_CL_SEVERITY_ERROR_STRING,
682 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
683 0x2010,
684 "Can't allocate DMA tag for ioctl data buffers",
685 ENOMEM);
686 return(ENOMEM);
687 }
688
689 /* Create just one map for all ioctl request data buffers. */
690 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
691 tw_osli_printf(sc, "error = %d",
692 TW_CL_SEVERITY_ERROR_STRING,
693 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
694 0x2011,
695 "Can't create ioctl map",
696 ENOMEM);
697 return(ENOMEM);
698 }
699
700
701 /* Initialize request queues. */
702 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
703 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
704
705 if ((sc->req_ctxt_buf = (struct tw_osli_req_context *)
706 malloc((sizeof(struct tw_osli_req_context) *
707 TW_OSLI_MAX_NUM_IOS),
708 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
709 tw_osli_printf(sc, "error = %d",
710 TW_CL_SEVERITY_ERROR_STRING,
711 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
712 0x2012,
713 "Failed to allocate request packets",
714 ENOMEM);
715 return(ENOMEM);
716 }
717 bzero(sc->req_ctxt_buf,
718 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_IOS);
719
720 for (i = 0; i < TW_OSLI_MAX_NUM_IOS; i++) {
721 req = &(sc->req_ctxt_buf[i]);
722 req->ctlr = sc;
723 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
724 tw_osli_printf(sc, "request # = %d, error = %d",
725 TW_CL_SEVERITY_ERROR_STRING,
726 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
727 0x2013,
728 "Can't create dma map",
729 i, ENOMEM);
730 return(ENOMEM);
731 }
732
733 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
734
735 req->req_pkt.dma_mem = ((TW_INT8 *)(sc->dma_mem)) +
736 (i * per_req_dma_mem_size);
737 req->req_pkt.dma_mem_phys = sc->dma_mem_phys +
738 (i * per_req_dma_mem_size);
739
740 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
741
742 /* Insert request into the free queue. */
743 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
744 }
745
746 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
747
748 sc->dma_mem = ((TW_INT8 *)(sc->dma_mem)) +
749 (TW_OSLI_MAX_NUM_IOS * per_req_dma_mem_size);
750 sc->dma_mem_phys += (TW_OSLI_MAX_NUM_IOS * per_req_dma_mem_size);
751
752 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
753
754 return(0);
755 }
756
757
758
759 /*
760 * Function name: tw_osli_free_resources
761 * Description: Performs clean-up at the time of going down.
762 *
763 * Input: sc -- ptr to OSL internal ctlr context
764 * Output: None
765 * Return value: None
766 */
767 static TW_VOID
768 tw_osli_free_resources(struct twa_softc *sc)
769 {
770 struct tw_osli_req_context *req;
771 TW_INT32 error = 0;
772
773 tw_osli_dbg_dprintf(3, sc, "entered");
774
775 /* Detach from CAM */
776 tw_osli_cam_detach(sc);
777
778 if (sc->req_ctxt_buf)
779 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
780 NULL)
781 if ((error = bus_dmamap_destroy(sc->dma_tag,
782 req->dma_map)))
783 tw_osli_dbg_dprintf(1, sc,
784 "dmamap_destroy(dma) returned %d",
785 error);
786
787 if ((sc->ioctl_tag) && (sc->ioctl_map))
788 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
789 tw_osli_dbg_dprintf(1, sc,
790 "dmamap_destroy(ioctl) returned %d", error);
791
792 /* Free all memory allocated so far. */
793 if (sc->req_ctxt_buf)
794 free(sc->req_ctxt_buf, TW_OSLI_MALLOC_CLASS);
795
796 if (sc->non_dma_mem)
797 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
798
799 if (sc->dma_mem) {
800 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
801 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
802 sc->cmd_map);
803 }
804 if (sc->cmd_tag)
805 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
806 tw_osli_dbg_dprintf(1, sc,
807 "dma_tag_destroy(cmd) returned %d", error);
808
809
810 #ifdef TW_OSL_FLASH_FIRMWARE
811
812 if (sc->flash_dma_mem) {
813 /* In case this piece of memory has already been freed. */
814 bus_dmamap_unload(sc->flash_tag, sc->flash_map);
815 bus_dmamem_free(sc->flash_tag, sc->flash_dma_mem,
816 sc->flash_map);
817 }
818 if (sc->flash_tag)
819 if ((error = bus_dma_tag_destroy(sc->flash_tag)))
820 tw_osli_dbg_dprintf(1, sc,
821 "dma_tag_destroy(flash) returned %d", error);
822
823 #endif /* TW_OSL_FLASH_FIRMWARE */
824
825 if (sc->dma_tag)
826 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
827 tw_osli_dbg_dprintf(1, sc,
828 "dma_tag_destroy(dma) returned %d", error);
829
830 if (sc->ioctl_tag)
831 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
832 tw_osli_dbg_dprintf(1, sc,
833 "dma_tag_destroy(ioctl) returned %d", error);
834
835 if (sc->parent_tag)
836 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
837 tw_osli_dbg_dprintf(1, sc,
838 "dma_tag_destroy(parent) returned %d", error);
839
840
841 /* Disconnect the interrupt handler. */
842 if (sc->intr_handle)
843 if ((error = bus_teardown_intr(sc->bus_dev,
844 sc->irq_res, sc->intr_handle)))
845 tw_osli_dbg_dprintf(1, sc,
846 "teardown_intr returned %d", error);
847
848 if (sc->irq_res != NULL)
849 if ((error = bus_release_resource(sc->bus_dev,
850 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
851 tw_osli_dbg_dprintf(1, sc,
852 "release_resource(irq) returned %d", error);
853
854
855 /* Release the register window mapping. */
856 if (sc->reg_res != NULL)
857 if ((error = bus_release_resource(sc->bus_dev,
858 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
859 tw_osli_dbg_dprintf(1, sc,
860 "release_resource(io) returned %d", error);
861
862
863 /* Destroy the control device. */
864 if (sc->ctrl_dev != (struct cdev *)NULL)
865 destroy_dev(sc->ctrl_dev);
866
867 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
868 tw_osli_dbg_dprintf(1, sc,
869 "sysctl_ctx_free returned %d", error);
870
871 }
872
873
874
875 /*
876 * Function name: twa_detach
877 * Description: Called when the controller is being detached from
878 * the pci bus.
879 *
880 * Input: dev -- bus device corresponding to the ctlr
881 * Output: None
882 * Return value: 0 -- success
883 * non-zero-- failure
884 */
885 static TW_INT32
886 twa_detach(device_t dev)
887 {
888 struct twa_softc *sc = device_get_softc(dev);
889 TW_INT32 error;
890
891 tw_osli_dbg_dprintf(3, sc, "entered");
892
893 error = EBUSY;
894 if (sc->state & TW_OSLI_CTLR_STATE_OPEN) {
895 tw_osli_printf(sc, "error = %d",
896 TW_CL_SEVERITY_ERROR_STRING,
897 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
898 0x2014,
899 "Device open",
900 error);
901 goto out;
902 }
903
904 /* Shut the controller down. */
905 if ((error = twa_shutdown(dev)))
906 goto out;
907
908 /* Free all resources associated with this controller. */
909 tw_osli_free_resources(sc);
910 error = 0;
911
912 out:
913 return(error);
914 }
915
916
917
918 /*
919 * Function name: twa_shutdown
920 * Description: Called at unload/shutdown time. Lets the controller
921 * know that we are going down.
922 *
923 * Input: dev -- bus device corresponding to the ctlr
924 * Output: None
925 * Return value: 0 -- success
926 * non-zero-- failure
927 */
928 static TW_INT32
929 twa_shutdown(device_t dev)
930 {
931 struct twa_softc *sc = device_get_softc(dev);
932 TW_INT32 error = 0;
933
934 tw_osli_dbg_dprintf(3, sc, "entered");
935
936 /* Disconnect from the controller. */
937 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
938 tw_osli_printf(sc, "error = %d",
939 TW_CL_SEVERITY_ERROR_STRING,
940 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
941 0x2015,
942 "Failed to shutdown Common Layer/controller",
943 error);
944 }
945 return(error);
946 }
947
948
949
950 /*
951 * Function name: twa_busdma_lock
952 * Description: Function to provide synchronization during busdma_swi.
953 *
954 * Input: lock_arg -- lock mutex sent as argument
955 * op -- operation (lock/unlock) expected of the function
956 * Output: None
957 * Return value: None
958 */
959 TW_VOID
960 twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
961 {
962 struct mtx *lock;
963
964 lock = (struct mtx *)lock_arg;
965 switch (op) {
966 case BUS_DMA_LOCK:
967 mtx_lock_spin(lock);
968 break;
969
970 case BUS_DMA_UNLOCK:
971 mtx_unlock_spin(lock);
972 break;
973
974 default:
975 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
976 }
977 }
978
979
980
981 /*
982 * Function name: twa_pci_intr
983 * Description: Interrupt handler. Wrapper for twa_interrupt.
984 *
985 * Input: arg -- ptr to OSL internal ctlr context
986 * Output: None
987 * Return value: None
988 */
989 static TW_VOID
990 twa_pci_intr(TW_VOID *arg)
991 {
992 struct twa_softc *sc = (struct twa_softc *)arg;
993
994 tw_osli_dbg_dprintf(10, sc, "entered");
995 if (tw_cl_interrupt(&(sc->ctlr_handle)))
996 #ifdef TW_OSLI_DEFERRED_INTR_USED
997 taskqueue_enqueue_fast(taskqueue_fast,
998 &(sc->deferred_intr_callback));
999 #else /* TW_OSLI_DEFERRED_INTR_USED */
1000 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
1001 #endif /* TW_OSLI_DEFERRED_INTR_USED */
1002 }
1003
1004
1005
1006 #ifdef TW_OSLI_DEFERRED_INTR_USED
1007
1008 /*
1009 * Function name: twa_deferred_intr
1010 * Description: Deferred interrupt handler.
1011 *
1012 * Input: context -- ptr to OSL internal ctlr context
1013 * pending -- not used
1014 * Output: None
1015 * Return value: None
1016 */
1017 static TW_VOID
1018 twa_deferred_intr(TW_VOID *context, TW_INT32 pending)
1019 {
1020 struct twa_softc *sc = (struct twa_softc *)context;
1021
1022 tw_osli_dbg_dprintf(10, sc, "entered");
1023
1024 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
1025 }
1026
1027 #endif /* TW_OSLI_DEFERRED_INTR_USED */
1028
1029
1030
1031 /*
1032 * Function name: tw_osli_fw_passthru
1033 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
1034 *
1035 * Input: sc -- ptr to OSL internal ctlr context
1036 * buf -- ptr to ioctl pkt understood by CL
1037 * Output: None
1038 * Return value: 0 -- success
1039 * non-zero-- failure
1040 */
1041 TW_INT32
1042 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
1043 {
1044 struct tw_osli_req_context *req;
1045 struct tw_osli_ioctl_no_data_buf *user_buf =
1046 (struct tw_osli_ioctl_no_data_buf *)buf;
1047 TW_TIME end_time;
1048 TW_UINT32 timeout = 60;
1049 TW_UINT32 data_buf_size_adjusted;
1050 struct tw_cl_req_packet *req_pkt;
1051 struct tw_cl_passthru_req_packet *pt_req;
1052 TW_INT32 error;
1053
1054 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
1055
1056 if ((req = tw_osli_get_request(sc)) == NULL)
1057 return(EBUSY);
1058
1059 req->req_handle.osl_req_ctxt = req;
1060 req->orig_req = buf;
1061 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
1062
1063 req_pkt = &(req->req_pkt);
1064 req_pkt->status = 0;
1065 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
1066 /* Let the Common Layer retry the request on cmd queue full. */
1067 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
1068
1069 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1070 /*
1071 * Make sure that the data buffer sent to firmware is a
1072 * 512 byte multiple in size.
1073 */
1074 data_buf_size_adjusted =
1075 (user_buf->driver_pkt.buffer_length +
1076 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
1077 if ((req->length = data_buf_size_adjusted)) {
1078 if ((req->data = malloc(data_buf_size_adjusted,
1079 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
1080 error = ENOMEM;
1081 tw_osli_printf(sc, "error = %d",
1082 TW_CL_SEVERITY_ERROR_STRING,
1083 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1084 0x2016,
1085 "Could not alloc mem for "
1086 "fw_passthru data_buf",
1087 error);
1088 goto fw_passthru_err;
1089 }
1090 /* Copy the payload. */
1091 if ((error = copyin((TW_VOID *)(user_buf->pdata),
1092 req->data,
1093 user_buf->driver_pkt.buffer_length)) != 0) {
1094 tw_osli_printf(sc, "error = %d",
1095 TW_CL_SEVERITY_ERROR_STRING,
1096 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1097 0x2017,
1098 "Could not copyin fw_passthru data_buf",
1099 error);
1100 goto fw_passthru_err;
1101 }
1102 pt_req->sgl_entries = 1; /* will be updated during mapping */
1103 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1104 TW_OSLI_REQ_FLAGS_DATA_OUT);
1105 } else
1106 pt_req->sgl_entries = 0; /* no payload */
1107
1108 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1109 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1110
1111 if ((error = tw_osli_map_request(req)))
1112 goto fw_passthru_err;
1113
1114 end_time = tw_osl_get_local_time() + timeout;
1115 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1116 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1117
1118 error = tsleep(req, PRIBIO, "twa_passthru", timeout * hz);
1119
1120 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1121 error = 0;
1122 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1123
1124 if (! error) {
1125 if (((error = req->error_code)) ||
1126 ((error = (req->state !=
1127 TW_OSLI_REQ_STATE_COMPLETE))) ||
1128 ((error = req_pkt->status)))
1129 goto fw_passthru_err;
1130 break;
1131 }
1132
1133 if (req_pkt->status) {
1134 error = req_pkt->status;
1135 goto fw_passthru_err;
1136 }
1137
1138 if (error == EWOULDBLOCK) {
1139 /* Time out! */
1140 tw_osli_printf(sc, "request = %p",
1141 TW_CL_SEVERITY_ERROR_STRING,
1142 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1143 0x2018,
1144 "Passthru request timed out!",
1145 req);
1146 /*
1147 * Should I check here if the timeout happened
1148 * because of yet another reset, and not do a
1149 * second reset?
1150 */
1151 tw_cl_reset_ctlr(&sc->ctlr_handle);
1152 /*
1153 * Don't touch req after a reset. It (and any
1154 * associated data) will already have been
1155 * freed by the callback. Just return.
1156 */
1157 user_buf->driver_pkt.os_status = error;
1158 return(ETIMEDOUT);
1159 }
1160 /*
1161 * Either the request got completed, or we were woken up by a
1162 * signal. Calculate the new timeout, in case it was the latter.
1163 */
1164 timeout = (end_time - tw_osl_get_local_time());
1165 }
1166
1167 /* If there was a payload, copy it back. */
1168 if ((!error) && (req->length))
1169 if ((error = copyout(req->data, user_buf->pdata,
1170 user_buf->driver_pkt.buffer_length)))
1171 tw_osli_printf(sc, "error = %d",
1172 TW_CL_SEVERITY_ERROR_STRING,
1173 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1174 0x2019,
1175 "Could not copyout fw_passthru data_buf",
1176 error);
1177
1178 fw_passthru_err:
1179 /*
1180 * Print the failure message. For some reason, on certain OS versions,
1181 * printing this error message during reset hangs the display (although
1182 * the rest of the system is running fine. So, don't print it if the
1183 * failure was due to a reset.
1184 */
1185 if ((error) && (error != TW_CL_ERR_REQ_BUS_RESET))
1186 tw_osli_printf(sc, "error = %d",
1187 TW_CL_SEVERITY_ERROR_STRING,
1188 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1189 0x201A,
1190 "Firmware passthru failed!",
1191 error);
1192
1193 user_buf->driver_pkt.os_status = error;
1194 /* Free resources. */
1195 if (req->data)
1196 free(req->data, TW_OSLI_MALLOC_CLASS);
1197 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1198 return(error);
1199 }
1200
1201
1202
1203 /*
1204 * Function name: tw_osl_complete_passthru
1205 * Description: Called to complete passthru requests.
1206 *
1207 * Input: req_handle -- ptr to request handle
1208 * Output: None
1209 * Return value: None
1210 */
1211 TW_VOID
1212 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1213 {
1214 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1215 struct twa_softc *sc = req->ctlr;
1216
1217 tw_osli_dbg_dprintf(5, sc, "entered");
1218
1219 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1220 tw_osli_printf(sc, "request = %p, status = %d",
1221 TW_CL_SEVERITY_ERROR_STRING,
1222 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1223 0x201B,
1224 "Unposted command completed!!",
1225 req, req->state);
1226 }
1227
1228 /*
1229 * Remove request from the busy queue. Just mark it complete.
1230 * There's no need to move it into the complete queue as we are
1231 * going to be done with it right now.
1232 */
1233 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1234 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1235
1236 tw_osli_unmap_request(req);
1237
1238 /*
1239 * Don't do a wake up if there was an error even before the request
1240 * was sent down to the Common Layer, and we hadn't gotten an
1241 * EINPROGRESS. The request originator will then be returned an
1242 * error, and he can do the clean-up.
1243 */
1244 if ((req->error_code) &&
1245 (!(req->state & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1246 return;
1247
1248 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1249 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1250 /* Wake up the sleeping command originator. */
1251 tw_osli_dbg_dprintf(5, sc,
1252 "Waking up originator of request %p", req);
1253 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1254 wakeup_one(req);
1255 } else {
1256 /*
1257 * If the request completed even before tsleep
1258 * was called, simply return.
1259 */
1260 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1261 return;
1262
1263 tw_osli_printf(sc, "request = %p",
1264 TW_CL_SEVERITY_ERROR_STRING,
1265 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1266 0x201C,
1267 "Passthru callback called, "
1268 "and caller not sleeping",
1269 req);
1270 }
1271 } else {
1272 tw_osli_printf(sc, "request = %p",
1273 TW_CL_SEVERITY_ERROR_STRING,
1274 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1275 0x201D,
1276 "Passthru callback called for non-passthru request",
1277 req);
1278 }
1279 }
1280
1281
1282
1283 /*
1284 * Function name: tw_osli_get_request
1285 * Description: Gets a request pkt from the free queue.
1286 *
1287 * Input: sc -- ptr to OSL internal ctlr context
1288 * Output: None
1289 * Return value: ptr to request pkt -- success
1290 * NULL -- failure
1291 */
1292 struct tw_osli_req_context *
1293 tw_osli_get_request(struct twa_softc *sc)
1294 {
1295 struct tw_osli_req_context *req;
1296
1297 tw_osli_dbg_dprintf(4, sc, "entered");
1298
1299 /* Get a free request packet. */
1300 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1301
1302 /* Initialize some fields to their defaults. */
1303 if (req) {
1304 req->req_handle.osl_req_ctxt = NULL;
1305 req->req_handle.cl_req_ctxt = NULL;
1306 req->data = NULL;
1307 req->length = 0;
1308 req->real_data = NULL;
1309 req->real_length = 0;
1310 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1311 req->flags = 0;
1312 req->error_code = 0;
1313 req->orig_req = NULL;
1314
1315 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
1316
1317 /* Don't zero dma_mem & dma_mem_phys in req_pkt. */
1318 req->req_pkt.cmd = 0;
1319 req->req_pkt.flags = 0;
1320 req->req_pkt.status = 0;
1321 req->req_pkt.tw_osl_callback = NULL;
1322 bzero(&(req->req_pkt.gen_req_pkt),
1323 sizeof(req->req_pkt.gen_req_pkt));
1324
1325 #else /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
1326
1327 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1328
1329 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
1330 }
1331 return(req);
1332 }
1333
1334
1335
1336 /*
1337 * Function name: twa_map_load_data_callback
1338 * Description: Callback of bus_dmamap_load for the buffer associated
1339 * with data. Updates the cmd pkt (size/sgl_entries
1340 * fields, as applicable) to reflect the number of sg
1341 * elements.
1342 *
1343 * Input: arg -- ptr to OSL internal request context
1344 * segs -- ptr to a list of segment descriptors
1345 * nsegments--# of segments
1346 * error -- 0 if no errors encountered before callback,
1347 * non-zero if errors were encountered
1348 * Output: None
1349 * Return value: None
1350 */
1351 static TW_VOID
1352 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1353 TW_INT32 nsegments, TW_INT32 error)
1354 {
1355 struct tw_osli_req_context *req =
1356 (struct tw_osli_req_context *)arg;
1357 struct twa_softc *sc = req->ctlr;
1358 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1359
1360 tw_osli_dbg_dprintf(10, sc, "entered");
1361
1362 /* Mark the request as currently being processed. */
1363 req->state = TW_OSLI_REQ_STATE_BUSY;
1364 /* Move the request into the busy queue. */
1365 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1366
1367 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1368 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1369 tw_osli_allow_new_requests(sc, (TW_VOID *)(req->orig_req));
1370
1371 if (error == EFBIG) {
1372 req->error_code = error;
1373 goto out;
1374 }
1375
1376 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1377 struct tw_cl_passthru_req_packet *pt_req;
1378
1379 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1380 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1381 BUS_DMASYNC_PREREAD);
1382
1383 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1384 /*
1385 * If we're using an alignment buffer, and we're
1386 * writing data, copy the real data out.
1387 */
1388 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1389 bcopy(req->real_data, req->data, req->real_length);
1390 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1391 BUS_DMASYNC_PREWRITE);
1392 }
1393
1394 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1395 pt_req->sg_list = (TW_UINT8 *)segs;
1396 pt_req->sgl_entries += (nsegments - 1);
1397 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1398 &(req->req_handle));
1399 } else {
1400 struct tw_cl_scsi_req_packet *scsi_req;
1401
1402 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1403 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1404 BUS_DMASYNC_PREREAD);
1405
1406 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1407 /*
1408 * If we're using an alignment buffer, and we're
1409 * writing data, copy the real data out.
1410 */
1411 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1412 bcopy(req->real_data, req->data, req->real_length);
1413 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1414 BUS_DMASYNC_PREWRITE);
1415 }
1416
1417 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1418 scsi_req->sg_list = (TW_UINT8 *)segs;
1419 scsi_req->sgl_entries += (nsegments - 1);
1420 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1421 &(req->req_handle));
1422 }
1423
1424 out:
1425 if (error) {
1426 req->error_code = error;
1427 req_pkt->tw_osl_callback(&(req->req_handle));
1428 /*
1429 * If the caller had been returned EINPROGRESS, and he has
1430 * registered a callback for handling completion, the callback
1431 * will never get called because we were unable to submit the
1432 * request. So, free up the request right here.
1433 */
1434 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1435 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1436 }
1437 }
1438
1439
1440
1441 /*
1442 * Function name: twa_map_load_callback
1443 * Description: Callback of bus_dmamap_load for the buffer associated
1444 * with a cmd pkt.
1445 *
1446 * Input: arg -- ptr to variable to hold phys addr
1447 * segs -- ptr to a list of segment descriptors
1448 * nsegments--# of segments
1449 * error -- 0 if no errors encountered before callback,
1450 * non-zero if errors were encountered
1451 * Output: None
1452 * Return value: None
1453 */
1454 static TW_VOID
1455 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1456 TW_INT32 nsegments, TW_INT32 error)
1457 {
1458 *((bus_addr_t *)arg) = segs[0].ds_addr;
1459 }
1460
1461
1462
1463 /*
1464 * Function name: tw_osli_map_request
1465 * Description: Maps a cmd pkt and data associated with it, into
1466 * DMA'able memory.
1467 *
1468 * Input: req -- ptr to request pkt
1469 * Output: None
1470 * Return value: 0 -- success
1471 * non-zero-- failure
1472 */
1473 TW_INT32
1474 tw_osli_map_request(struct tw_osli_req_context *req)
1475 {
1476 struct twa_softc *sc = req->ctlr;
1477 TW_INT32 error = 0;
1478
1479 tw_osli_dbg_dprintf(10, sc, "entered");
1480
1481 /* If the command involves data, map that too. */
1482 if (req->data != NULL) {
1483 /*
1484 * It's sufficient for the data pointer to be 4-byte aligned
1485 * to work with 9000. However, if 4-byte aligned addresses
1486 * are passed to bus_dmamap_load, we can get back sg elements
1487 * that are not 512-byte multiples in size. So, we will let
1488 * only those buffers that are 512-byte aligned to pass
1489 * through, and bounce the rest, so as to make sure that we
1490 * always get back sg elements that are 512-byte multiples
1491 * in size.
1492 */
1493 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1494 (req->length % sc->sg_size_factor)) {
1495 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1496 /* Save original data pointer and length. */
1497 req->real_data = req->data;
1498 req->real_length = req->length;
1499 req->length = (req->length +
1500 (sc->sg_size_factor - 1)) &
1501 ~(sc->sg_size_factor - 1);
1502 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1503 M_NOWAIT);
1504 if (req->data == NULL) {
1505 tw_osli_printf(sc, "error = %d",
1506 TW_CL_SEVERITY_ERROR_STRING,
1507 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1508 0x201E,
1509 "Failed to allocate memory "
1510 "for bounce buffer",
1511 ENOMEM);
1512 /* Restore original data pointer and length. */
1513 req->data = req->real_data;
1514 req->length = req->real_length;
1515 return(ENOMEM);
1516 }
1517 }
1518
1519 /*
1520 * Map the data buffer into bus space and build the SG list.
1521 */
1522 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1523 /* Lock against multiple simultaneous ioctl calls. */
1524 mtx_lock_spin(sc->io_lock);
1525 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1526 req->data, req->length,
1527 twa_map_load_data_callback, req,
1528 BUS_DMA_WAITOK);
1529 mtx_unlock_spin(sc->io_lock);
1530 } else {
1531 /*
1532 * There's only one CAM I/O thread running at a time.
1533 * So, there's no need to hold the io_lock.
1534 */
1535 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1536 req->data, req->length,
1537 twa_map_load_data_callback, req,
1538 BUS_DMA_WAITOK);
1539 }
1540
1541 if (!error)
1542 error = req->error_code;
1543 else {
1544 if (error == EINPROGRESS) {
1545 /*
1546 * Specifying sc->io_lock as the lockfuncarg
1547 * in ...tag_create should protect the access
1548 * of ...FLAGS_MAPPED from the callback.
1549 */
1550 mtx_lock_spin(sc->io_lock);
1551 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) {
1552 req->flags |=
1553 TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1554 tw_osli_disallow_new_requests(sc);
1555 }
1556 mtx_unlock_spin(sc->io_lock);
1557 error = 0;
1558 } else {
1559 /* Free alignment buffer if it was used. */
1560 if (req->flags &
1561 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1562 free(req->data, TW_OSLI_MALLOC_CLASS);
1563 /*
1564 * Restore original data pointer
1565 * and length.
1566 */
1567 req->data = req->real_data;
1568 req->length = req->real_length;
1569 }
1570 }
1571 }
1572
1573 } else {
1574 /* Mark the request as currently being processed. */
1575 req->state = TW_OSLI_REQ_STATE_BUSY;
1576 /* Move the request into the busy queue. */
1577 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1578 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1579 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1580 &(req->req_pkt), &(req->req_handle));
1581 else
1582 error = tw_cl_start_io(&sc->ctlr_handle,
1583 &(req->req_pkt), &(req->req_handle));
1584 if (error) {
1585 req->error_code = error;
1586 req->req_pkt.tw_osl_callback(&(req->req_handle));
1587 }
1588 }
1589 return(error);
1590 }
1591
1592
1593
1594 /*
1595 * Function name: tw_osli_unmap_request
1596 * Description: Undoes the mapping done by tw_osli_map_request.
1597 *
1598 * Input: req -- ptr to request pkt
1599 * Output: None
1600 * Return value: None
1601 */
1602 TW_VOID
1603 tw_osli_unmap_request(struct tw_osli_req_context *req)
1604 {
1605 struct twa_softc *sc = req->ctlr;
1606
1607 tw_osli_dbg_dprintf(10, sc, "entered");
1608
1609 /* If the command involved data, unmap that too. */
1610 if (req->data != NULL) {
1611 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1612 /* Lock against multiple simultaneous ioctl calls. */
1613 mtx_lock_spin(sc->io_lock);
1614
1615 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1616 bus_dmamap_sync(sc->ioctl_tag,
1617 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1618
1619 /*
1620 * If we are using a bounce buffer, and we are
1621 * reading data, copy the real data in.
1622 */
1623 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1624 bcopy(req->data, req->real_data,
1625 req->real_length);
1626 }
1627
1628 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1629 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1630 BUS_DMASYNC_POSTWRITE);
1631
1632 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1633
1634 mtx_unlock_spin(sc->io_lock);
1635 } else {
1636 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1637 bus_dmamap_sync(sc->dma_tag,
1638 req->dma_map, BUS_DMASYNC_POSTREAD);
1639
1640 /*
1641 * If we are using a bounce buffer, and we are
1642 * reading data, copy the real data in.
1643 */
1644 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1645 bcopy(req->data, req->real_data,
1646 req->real_length);
1647 }
1648 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1649 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1650 BUS_DMASYNC_POSTWRITE);
1651
1652 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1653 }
1654 }
1655
1656 /* Free alignment buffer if it was used. */
1657 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1658 free(req->data, TW_OSLI_MALLOC_CLASS);
1659 /* Restore original data pointer and length. */
1660 req->data = req->real_data;
1661 req->length = req->real_length;
1662 }
1663 }
1664
1665
1666
1667 #ifdef TW_OSL_DEBUG
1668
1669 TW_VOID twa_report_stats(TW_VOID);
1670 TW_VOID twa_reset_stats(TW_VOID);
1671 TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1672 TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1673
1674
1675 /*
1676 * Function name: twa_report_stats
1677 * Description: For being called from ddb. Calls functions that print
1678 * OSL and CL internal stats for the controller.
1679 *
1680 * Input: None
1681 * Output: None
1682 * Return value: None
1683 */
1684 TW_VOID
1685 twa_report_stats(TW_VOID)
1686 {
1687 struct twa_softc *sc;
1688 TW_INT32 i;
1689
1690 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1691 tw_osli_print_ctlr_stats(sc);
1692 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1693 }
1694 }
1695
1696
1697
1698 /*
1699 * Function name: tw_osli_print_ctlr_stats
1700 * Description: For being called from ddb. Prints OSL controller stats
1701 *
1702 * Input: sc -- ptr to OSL internal controller context
1703 * Output: None
1704 * Return value: None
1705 */
1706 TW_VOID
1707 tw_osli_print_ctlr_stats(struct twa_softc *sc)
1708 {
1709 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1710 twa_printf(sc, "OSLq type current max\n");
1711 twa_printf(sc, "free %04d %04d\n",
1712 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1713 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1714 twa_printf(sc, "busy %04d %04d\n",
1715 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1716 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1717 }
1718
1719
1720
1721 /*
1722 * Function name: twa_print_req_info
1723 * Description: For being called from ddb. Calls functions that print
1724 * OSL and CL internal details for the request.
1725 *
1726 * Input: req -- ptr to OSL internal request context
1727 * Output: None
1728 * Return value: None
1729 */
1730 TW_VOID
1731 twa_print_req_info(struct tw_osli_req_context *req)
1732 {
1733 struct twa_softc *sc = req->ctlr;
1734
1735 twa_printf(sc, "OSL details for request:\n");
1736 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1737 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1738 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1739 "next_req = %p, prev_req = %p, dma_map = %p\n",
1740 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1741 req->data, req->length, req->real_data, req->real_length,
1742 req->state, req->flags, req->error_code, req->orig_req,
1743 req->link.next, req->link.prev, req->dma_map);
1744 tw_cl_print_req_info(&(req->req_handle));
1745 }
1746
1747
1748
1749 /*
1750 * Function name: twa_reset_stats
1751 * Description: For being called from ddb.
1752 * Resets some OSL controller stats.
1753 *
1754 * Input: None
1755 * Output: None
1756 * Return value: None
1757 */
1758 TW_VOID
1759 twa_reset_stats(TW_VOID)
1760 {
1761 struct twa_softc *sc;
1762 TW_INT32 i;
1763
1764 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1765 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1766 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1767 tw_cl_reset_stats(&sc->ctlr_handle);
1768 }
1769 }
1770
1771 #endif /* TW_OSL_DEBUG */
Cache object: b35d052150dc3635f9ef39373e39e2ce
|