1 /*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/7.4/sys/dev/twa/tw_osl_freebsd.c 212678 2010-09-15 17:25:51Z delphij $
30 */
31
32 /*
33 * AMCC'S 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 * Modifications by: Adam Radford
37 * Modifications by: Manjunath Ranganathaiah
38 */
39
40
41 /*
42 * FreeBSD specific functions not related to CAM, and other
43 * miscellaneous functions.
44 */
45
46
47 #include <dev/twa/tw_osl_includes.h>
48 #include <dev/twa/tw_cl_fwif.h>
49 #include <dev/twa/tw_cl_ioctl.h>
50 #include <dev/twa/tw_osl_ioctl.h>
51
52 #ifdef TW_OSL_DEBUG
53 TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
54 TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
55 #endif /* TW_OSL_DEBUG */
56
57 MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
58
59
60 static d_open_t twa_open;
61 static d_close_t twa_close;
62 static d_ioctl_t twa_ioctl;
63
64 static struct cdevsw twa_cdevsw = {
65 .d_version = D_VERSION,
66 .d_open = twa_open,
67 .d_close = twa_close,
68 .d_ioctl = twa_ioctl,
69 .d_name = "twa",
70 };
71
72 static devclass_t twa_devclass;
73
74
75 /*
76 * Function name: twa_open
77 * Description: Called when the controller is opened.
78 * Simply marks the controller as open.
79 *
80 * Input: dev -- control device corresponding to the ctlr
81 * flags -- mode of open
82 * fmt -- device type (character/block etc.)
83 * proc -- current process
84 * Output: None
85 * Return value: 0 -- success
86 * non-zero-- failure
87 */
88 static TW_INT32
89 twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
90 {
91 TW_INT32 unit = minor(dev);
92 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
93
94 tw_osli_dbg_dprintf(5, sc, "entered");
95 sc->open = TW_CL_TRUE;
96 return(0);
97 }
98
99
100
101 /*
102 * Function name: twa_close
103 * Description: Called when the controller is closed.
104 * Simply marks the controller as not open.
105 *
106 * Input: dev -- control device corresponding to the ctlr
107 * flags -- mode of corresponding open
108 * fmt -- device type (character/block etc.)
109 * proc -- current process
110 * Output: None
111 * Return value: 0 -- success
112 * non-zero-- failure
113 */
114 static TW_INT32
115 twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
116 {
117 TW_INT32 unit = minor(dev);
118 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
119
120 tw_osli_dbg_dprintf(5, sc, "entered");
121 sc->open = TW_CL_FALSE;
122 return(0);
123 }
124
125
126
127 /*
128 * Function name: twa_ioctl
129 * Description: Called when an ioctl is posted to the controller.
130 * Handles any OS Layer specific cmds, passes the rest
131 * on to the Common Layer.
132 *
133 * Input: dev -- control device corresponding to the ctlr
134 * cmd -- ioctl cmd
135 * buf -- ptr to buffer in kernel memory, which is
136 * a copy of the input buffer in user-space
137 * flags -- mode of corresponding open
138 * proc -- current process
139 * Output: buf -- ptr to buffer in kernel memory, which will
140 * be copied to the output buffer in user-space
141 * Return value: 0 -- success
142 * non-zero-- failure
143 */
144 static TW_INT32
145 twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, d_thread_t *proc)
146 {
147 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
148 TW_INT32 error;
149
150 tw_osli_dbg_dprintf(5, sc, "entered");
151
152 switch (cmd) {
153 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
154 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
155 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
156 break;
157
158 case TW_OSL_IOCTL_SCAN_BUS:
159 /* Request CAM for a bus scan. */
160 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
161 error = tw_osli_request_bus_scan(sc);
162 break;
163
164 default:
165 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
166 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
167 break;
168 }
169 return(error);
170 }
171
172
173
174 static TW_INT32 twa_probe(device_t dev);
175 static TW_INT32 twa_attach(device_t dev);
176 static TW_INT32 twa_detach(device_t dev);
177 static TW_INT32 twa_shutdown(device_t dev);
178 static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
179 static TW_VOID twa_pci_intr(TW_VOID *arg);
180 static TW_VOID twa_watchdog(TW_VOID *arg);
181 int twa_setup_intr(struct twa_softc *sc);
182 int twa_teardown_intr(struct twa_softc *sc);
183
184 static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
185 static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
186
187 static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
188 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
189 static TW_VOID twa_map_load_callback(TW_VOID *arg,
190 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
191
192
193 static device_method_t twa_methods[] = {
194 /* Device interface */
195 DEVMETHOD(device_probe, twa_probe),
196 DEVMETHOD(device_attach, twa_attach),
197 DEVMETHOD(device_detach, twa_detach),
198 DEVMETHOD(device_shutdown, twa_shutdown),
199
200 DEVMETHOD(bus_print_child, bus_generic_print_child),
201 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
202 {0, 0}
203 };
204
205 static driver_t twa_pci_driver = {
206 "twa",
207 twa_methods,
208 sizeof(struct twa_softc)
209 };
210
211 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
212 MODULE_DEPEND(twa, cam, 1, 1, 1);
213 MODULE_DEPEND(twa, pci, 1, 1, 1);
214
215
216 /*
217 * Function name: twa_probe
218 * Description: Called at driver load time. Claims 9000 ctlrs.
219 *
220 * Input: dev -- bus device corresponding to the ctlr
221 * Output: None
222 * Return value: <= 0 -- success
223 * > 0 -- failure
224 */
225 static TW_INT32
226 twa_probe(device_t dev)
227 {
228 static TW_UINT8 first_ctlr = 1;
229
230 tw_osli_dbg_printf(3, "entered");
231
232 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
233 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
234 /* Print the driver version only once. */
235 if (first_ctlr) {
236 printf("3ware device driver for 9000 series storage "
237 "controllers, version: %s\n",
238 TW_OSL_DRIVER_VERSION_STRING);
239 first_ctlr = 0;
240 }
241 return(0);
242 }
243 return(ENXIO);
244 }
245
246 int twa_setup_intr(struct twa_softc *sc)
247 {
248 int error = 0;
249
250 if (!(sc->intr_handle) && (sc->irq_res)) {
251 error = bus_setup_intr(sc->bus_dev, sc->irq_res,
252 INTR_TYPE_CAM | INTR_MPSAFE,
253 NULL, twa_pci_intr,
254 sc, &sc->intr_handle);
255 }
256 return( error );
257 }
258
259
260 int twa_teardown_intr(struct twa_softc *sc)
261 {
262 int error = 0;
263
264 if ((sc->intr_handle) && (sc->irq_res)) {
265 error = bus_teardown_intr(sc->bus_dev,
266 sc->irq_res, sc->intr_handle);
267 sc->intr_handle = NULL;
268 }
269 return( error );
270 }
271
272
273
274 /*
275 * Function name: twa_attach
276 * Description: Allocates pci resources; updates sc; adds a node to the
277 * sysctl tree to expose the driver version; makes calls
278 * (to the Common Layer) to initialize ctlr, and to
279 * attach to CAM.
280 *
281 * Input: dev -- bus device corresponding to the ctlr
282 * Output: None
283 * Return value: 0 -- success
284 * non-zero-- failure
285 */
286 static TW_INT32
287 twa_attach(device_t dev)
288 {
289 struct twa_softc *sc = device_get_softc(dev);
290 TW_UINT32 command;
291 TW_INT32 bar_num;
292 TW_INT32 bar0_offset;
293 TW_INT32 bar_size;
294 TW_INT32 error;
295
296 tw_osli_dbg_dprintf(3, sc, "entered");
297
298 sc->ctlr_handle.osl_ctlr_ctxt = sc;
299
300 /* Initialize the softc structure. */
301 sc->bus_dev = dev;
302 sc->device_id = pci_get_device(dev);
303
304 /* Initialize the mutexes right here. */
305 sc->io_lock = &(sc->io_lock_handle);
306 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
307 sc->q_lock = &(sc->q_lock_handle);
308 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
309 sc->sim_lock = &(sc->sim_lock_handle);
310 mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);
311
312 sysctl_ctx_init(&sc->sysctl_ctxt);
313 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
314 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
315 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
316 if (sc->sysctl_tree == NULL) {
317 tw_osli_printf(sc, "error = %d",
318 TW_CL_SEVERITY_ERROR_STRING,
319 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
320 0x2000,
321 "Cannot add sysctl tree node",
322 ENXIO);
323 return(ENXIO);
324 }
325 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
326 OID_AUTO, "driver_version", CTLFLAG_RD,
327 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
328
329 /* Make sure we are going to be able to talk to this board. */
330 command = pci_read_config(dev, PCIR_COMMAND, 2);
331 if ((command & PCIM_CMD_PORTEN) == 0) {
332 tw_osli_printf(sc, "error = %d",
333 TW_CL_SEVERITY_ERROR_STRING,
334 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
335 0x2001,
336 "Register window not available",
337 ENXIO);
338 tw_osli_free_resources(sc);
339 return(ENXIO);
340 }
341
342 /* Force the busmaster enable bit on, in case the BIOS forgot. */
343 command |= PCIM_CMD_BUSMASTEREN;
344 pci_write_config(dev, PCIR_COMMAND, command, 2);
345
346 /* Allocate the PCI register window. */
347 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
348 &bar_num, &bar0_offset, &bar_size))) {
349 tw_osli_printf(sc, "error = %d",
350 TW_CL_SEVERITY_ERROR_STRING,
351 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
352 0x201F,
353 "Can't get PCI BAR info",
354 error);
355 tw_osli_free_resources(sc);
356 return(error);
357 }
358 sc->reg_res_id = PCIR_BARS + bar0_offset;
359 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
360 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
361 == NULL) {
362 tw_osli_printf(sc, "error = %d",
363 TW_CL_SEVERITY_ERROR_STRING,
364 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
365 0x2002,
366 "Can't allocate register window",
367 ENXIO);
368 tw_osli_free_resources(sc);
369 return(ENXIO);
370 }
371 sc->bus_tag = rman_get_bustag(sc->reg_res);
372 sc->bus_handle = rman_get_bushandle(sc->reg_res);
373
374 /* Allocate and register our interrupt. */
375 sc->irq_res_id = 0;
376 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
377 &(sc->irq_res_id), 0, ~0, 1,
378 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
379 tw_osli_printf(sc, "error = %d",
380 TW_CL_SEVERITY_ERROR_STRING,
381 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
382 0x2003,
383 "Can't allocate interrupt",
384 ENXIO);
385 tw_osli_free_resources(sc);
386 return(ENXIO);
387 }
388 if ((error = twa_setup_intr(sc))) {
389 tw_osli_printf(sc, "error = %d",
390 TW_CL_SEVERITY_ERROR_STRING,
391 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
392 0x2004,
393 "Can't set up interrupt",
394 error);
395 tw_osli_free_resources(sc);
396 return(error);
397 }
398
399 if ((error = tw_osli_alloc_mem(sc))) {
400 tw_osli_printf(sc, "error = %d",
401 TW_CL_SEVERITY_ERROR_STRING,
402 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
403 0x2005,
404 "Memory allocation failure",
405 error);
406 tw_osli_free_resources(sc);
407 return(error);
408 }
409
410 /* Initialize the Common Layer for this controller. */
411 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
412 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
413 sc->non_dma_mem, sc->dma_mem,
414 sc->dma_mem_phys
415 ))) {
416 tw_osli_printf(sc, "error = %d",
417 TW_CL_SEVERITY_ERROR_STRING,
418 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
419 0x2006,
420 "Failed to initialize Common Layer/controller",
421 error);
422 tw_osli_free_resources(sc);
423 return(error);
424 }
425
426 /* Create the control device. */
427 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
428 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
429 "twa%d", device_get_unit(sc->bus_dev));
430 sc->ctrl_dev->si_drv1 = sc;
431
432 if ((error = tw_osli_cam_attach(sc))) {
433 tw_osli_free_resources(sc);
434 tw_osli_printf(sc, "error = %d",
435 TW_CL_SEVERITY_ERROR_STRING,
436 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
437 0x2007,
438 "Failed to initialize CAM",
439 error);
440 return(error);
441 }
442
443 sc->watchdog_index = 0;
444 callout_init(&(sc->watchdog_callout[0]), CALLOUT_MPSAFE);
445 callout_init(&(sc->watchdog_callout[1]), CALLOUT_MPSAFE);
446 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
447
448 return(0);
449 }
450
451
452 static TW_VOID
453 twa_watchdog(TW_VOID *arg)
454 {
455 struct tw_cl_ctlr_handle *ctlr_handle =
456 (struct tw_cl_ctlr_handle *)arg;
457 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
458 int i;
459 int i_need_a_reset = 0;
460 int driver_is_active = 0;
461 int my_watchdog_was_pending = 1234;
462 TW_UINT64 current_time;
463 struct tw_osli_req_context *my_req;
464
465
466 //==============================================================================
467 current_time = (TW_UINT64) (tw_osl_get_local_time());
468
469 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
470 my_req = &(sc->req_ctx_buf[i]);
471
472 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
473 (my_req->deadline) &&
474 (my_req->deadline < current_time)) {
475 tw_cl_set_reset_needed(ctlr_handle);
476 #ifdef TW_OSL_DEBUG
477 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
478 #else /* TW_OSL_DEBUG */
479 device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
480 #endif /* TW_OSL_DEBUG */
481 break;
482 }
483 }
484 //==============================================================================
485
486 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
487
488 i = (int) ((sc->watchdog_index++) & 1);
489
490 driver_is_active = tw_cl_is_active(ctlr_handle);
491
492 if (i_need_a_reset) {
493 #ifdef TW_OSL_DEBUG
494 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
495 #endif /* TW_OSL_DEBUG */
496 my_watchdog_was_pending =
497 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
498 tw_cl_reset_ctlr(ctlr_handle);
499 #ifdef TW_OSL_DEBUG
500 device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
501 #endif /* TW_OSL_DEBUG */
502 } else if (driver_is_active) {
503 my_watchdog_was_pending =
504 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle);
505 }
506 #ifdef TW_OSL_DEBUG
507 if (i_need_a_reset || my_watchdog_was_pending)
508 device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
509 "driver_is_active = %d, my_watchdog_was_pending = %d\n",
510 i_need_a_reset, driver_is_active, my_watchdog_was_pending);
511 #endif /* TW_OSL_DEBUG */
512 }
513
514
515 /*
516 * Function name: tw_osli_alloc_mem
517 * Description: Allocates memory needed both by CL and OSL.
518 *
519 * Input: sc -- OSL internal controller context
520 * Output: None
521 * Return value: 0 -- success
522 * non-zero-- failure
523 */
524 static TW_INT32
525 tw_osli_alloc_mem(struct twa_softc *sc)
526 {
527 struct tw_osli_req_context *req;
528 TW_UINT32 max_sg_elements;
529 TW_UINT32 non_dma_mem_size;
530 TW_UINT32 dma_mem_size;
531 TW_INT32 error;
532 TW_INT32 i;
533
534 tw_osli_dbg_dprintf(3, sc, "entered");
535
536 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
537 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
538
539 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
540 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
541
542 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
543 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
544 &(sc->alignment), &(sc->sg_size_factor),
545 &non_dma_mem_size, &dma_mem_size
546 ))) {
547 tw_osli_printf(sc, "error = %d",
548 TW_CL_SEVERITY_ERROR_STRING,
549 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
550 0x2008,
551 "Can't get Common Layer's memory requirements",
552 error);
553 return(error);
554 }
555
556 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
557 M_WAITOK)) == NULL) {
558 tw_osli_printf(sc, "error = %d",
559 TW_CL_SEVERITY_ERROR_STRING,
560 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
561 0x2009,
562 "Can't allocate non-dma memory",
563 ENOMEM);
564 return(ENOMEM);
565 }
566
567 /* Create the parent dma tag. */
568 if (bus_dma_tag_create(NULL, /* parent */
569 sc->alignment, /* alignment */
570 TW_OSLI_DMA_BOUNDARY, /* boundary */
571 BUS_SPACE_MAXADDR, /* lowaddr */
572 BUS_SPACE_MAXADDR, /* highaddr */
573 NULL, NULL, /* filter, filterarg */
574 TW_CL_MAX_IO_SIZE, /* maxsize */
575 max_sg_elements, /* nsegments */
576 TW_CL_MAX_IO_SIZE, /* maxsegsize */
577 0, /* flags */
578 NULL, /* lockfunc */
579 NULL, /* lockfuncarg */
580 &sc->parent_tag /* tag */)) {
581 tw_osli_printf(sc, "error = %d",
582 TW_CL_SEVERITY_ERROR_STRING,
583 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
584 0x200A,
585 "Can't allocate parent DMA tag",
586 ENOMEM);
587 return(ENOMEM);
588 }
589
590 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
591 if (bus_dma_tag_create(sc->parent_tag, /* parent */
592 sc->alignment, /* alignment */
593 0, /* boundary */
594 BUS_SPACE_MAXADDR, /* lowaddr */
595 BUS_SPACE_MAXADDR, /* highaddr */
596 NULL, NULL, /* filter, filterarg */
597 dma_mem_size, /* maxsize */
598 1, /* nsegments */
599 BUS_SPACE_MAXSIZE, /* maxsegsize */
600 0, /* flags */
601 NULL, /* lockfunc */
602 NULL, /* lockfuncarg */
603 &sc->cmd_tag /* tag */)) {
604 tw_osli_printf(sc, "error = %d",
605 TW_CL_SEVERITY_ERROR_STRING,
606 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
607 0x200B,
608 "Can't allocate DMA tag for Common Layer's "
609 "DMA'able memory",
610 ENOMEM);
611 return(ENOMEM);
612 }
613
614 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
615 BUS_DMA_NOWAIT, &sc->cmd_map)) {
616 /* Try a second time. */
617 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
618 BUS_DMA_NOWAIT, &sc->cmd_map)) {
619 tw_osli_printf(sc, "error = %d",
620 TW_CL_SEVERITY_ERROR_STRING,
621 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
622 0x200C,
623 "Can't allocate DMA'able memory for the"
624 "Common Layer",
625 ENOMEM);
626 return(ENOMEM);
627 }
628 }
629
630 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
631 dma_mem_size, twa_map_load_callback,
632 &sc->dma_mem_phys, 0);
633
634 /*
635 * Create a dma tag for data buffers; size will be the maximum
636 * possible I/O size (128kB).
637 */
638 if (bus_dma_tag_create(sc->parent_tag, /* parent */
639 sc->alignment, /* alignment */
640 0, /* boundary */
641 BUS_SPACE_MAXADDR, /* lowaddr */
642 BUS_SPACE_MAXADDR, /* highaddr */
643 NULL, NULL, /* filter, filterarg */
644 TW_CL_MAX_IO_SIZE, /* maxsize */
645 max_sg_elements, /* nsegments */
646 TW_CL_MAX_IO_SIZE, /* maxsegsize */
647 BUS_DMA_ALLOCNOW, /* flags */
648 twa_busdma_lock, /* lockfunc */
649 sc->io_lock, /* lockfuncarg */
650 &sc->dma_tag /* tag */)) {
651 tw_osli_printf(sc, "error = %d",
652 TW_CL_SEVERITY_ERROR_STRING,
653 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
654 0x200F,
655 "Can't allocate DMA tag for data buffers",
656 ENOMEM);
657 return(ENOMEM);
658 }
659
660 /*
661 * Create a dma tag for ioctl data buffers; size will be the maximum
662 * possible I/O size (128kB).
663 */
664 if (bus_dma_tag_create(sc->parent_tag, /* parent */
665 sc->alignment, /* alignment */
666 0, /* boundary */
667 BUS_SPACE_MAXADDR, /* lowaddr */
668 BUS_SPACE_MAXADDR, /* highaddr */
669 NULL, NULL, /* filter, filterarg */
670 TW_CL_MAX_IO_SIZE, /* maxsize */
671 max_sg_elements, /* nsegments */
672 TW_CL_MAX_IO_SIZE, /* maxsegsize */
673 BUS_DMA_ALLOCNOW, /* flags */
674 twa_busdma_lock, /* lockfunc */
675 sc->io_lock, /* lockfuncarg */
676 &sc->ioctl_tag /* tag */)) {
677 tw_osli_printf(sc, "error = %d",
678 TW_CL_SEVERITY_ERROR_STRING,
679 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
680 0x2010,
681 "Can't allocate DMA tag for ioctl data buffers",
682 ENOMEM);
683 return(ENOMEM);
684 }
685
686 /* Create just one map for all ioctl request data buffers. */
687 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
688 tw_osli_printf(sc, "error = %d",
689 TW_CL_SEVERITY_ERROR_STRING,
690 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
691 0x2011,
692 "Can't create ioctl map",
693 ENOMEM);
694 return(ENOMEM);
695 }
696
697
698 /* Initialize request queues. */
699 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
700 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
701
702 if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
703 malloc((sizeof(struct tw_osli_req_context) *
704 TW_OSLI_MAX_NUM_REQUESTS),
705 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
706 tw_osli_printf(sc, "error = %d",
707 TW_CL_SEVERITY_ERROR_STRING,
708 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
709 0x2012,
710 "Failed to allocate request packets",
711 ENOMEM);
712 return(ENOMEM);
713 }
714 bzero(sc->req_ctx_buf,
715 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);
716
717 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
718 req = &(sc->req_ctx_buf[i]);
719 req->ctlr = sc;
720 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
721 tw_osli_printf(sc, "request # = %d, error = %d",
722 TW_CL_SEVERITY_ERROR_STRING,
723 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
724 0x2013,
725 "Can't create dma map",
726 i, ENOMEM);
727 return(ENOMEM);
728 }
729
730 /* Initialize the ioctl wakeup/ timeout mutex */
731 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
732 mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);
733
734 /* Insert request into the free queue. */
735 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
736 }
737
738 return(0);
739 }
740
741
742
743 /*
744 * Function name: tw_osli_free_resources
745 * Description: Performs clean-up at the time of going down.
746 *
747 * Input: sc -- ptr to OSL internal ctlr context
748 * Output: None
749 * Return value: None
750 */
751 static TW_VOID
752 tw_osli_free_resources(struct twa_softc *sc)
753 {
754 struct tw_osli_req_context *req;
755 TW_INT32 error = 0;
756
757 tw_osli_dbg_dprintf(3, sc, "entered");
758
759 /* Detach from CAM */
760 tw_osli_cam_detach(sc);
761
762 if (sc->req_ctx_buf)
763 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
764 NULL) {
765 mtx_destroy(req->ioctl_wake_timeout_lock);
766
767 if ((error = bus_dmamap_destroy(sc->dma_tag,
768 req->dma_map)))
769 tw_osli_dbg_dprintf(1, sc,
770 "dmamap_destroy(dma) returned %d",
771 error);
772 }
773
774 if ((sc->ioctl_tag) && (sc->ioctl_map))
775 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
776 tw_osli_dbg_dprintf(1, sc,
777 "dmamap_destroy(ioctl) returned %d", error);
778
779 /* Free all memory allocated so far. */
780 if (sc->req_ctx_buf)
781 free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
782
783 if (sc->non_dma_mem)
784 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
785
786 if (sc->dma_mem) {
787 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
788 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
789 sc->cmd_map);
790 }
791 if (sc->cmd_tag)
792 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
793 tw_osli_dbg_dprintf(1, sc,
794 "dma_tag_destroy(cmd) returned %d", error);
795
796 if (sc->dma_tag)
797 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
798 tw_osli_dbg_dprintf(1, sc,
799 "dma_tag_destroy(dma) returned %d", error);
800
801 if (sc->ioctl_tag)
802 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
803 tw_osli_dbg_dprintf(1, sc,
804 "dma_tag_destroy(ioctl) returned %d", error);
805
806 if (sc->parent_tag)
807 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
808 tw_osli_dbg_dprintf(1, sc,
809 "dma_tag_destroy(parent) returned %d", error);
810
811
812 /* Disconnect the interrupt handler. */
813 if ((error = twa_teardown_intr(sc)))
814 tw_osli_dbg_dprintf(1, sc,
815 "teardown_intr returned %d", error);
816
817 if (sc->irq_res != NULL)
818 if ((error = bus_release_resource(sc->bus_dev,
819 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
820 tw_osli_dbg_dprintf(1, sc,
821 "release_resource(irq) returned %d", error);
822
823
824 /* Release the register window mapping. */
825 if (sc->reg_res != NULL)
826 if ((error = bus_release_resource(sc->bus_dev,
827 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
828 tw_osli_dbg_dprintf(1, sc,
829 "release_resource(io) returned %d", error);
830
831
832 /* Destroy the control device. */
833 if (sc->ctrl_dev != (struct cdev *)NULL)
834 destroy_dev(sc->ctrl_dev);
835
836 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
837 tw_osli_dbg_dprintf(1, sc,
838 "sysctl_ctx_free returned %d", error);
839
840 }
841
842
843
844 /*
845 * Function name: twa_detach
846 * Description: Called when the controller is being detached from
847 * the pci bus.
848 *
849 * Input: dev -- bus device corresponding to the ctlr
850 * Output: None
851 * Return value: 0 -- success
852 * non-zero-- failure
853 */
854 static TW_INT32
855 twa_detach(device_t dev)
856 {
857 struct twa_softc *sc = device_get_softc(dev);
858 TW_INT32 error;
859
860 tw_osli_dbg_dprintf(3, sc, "entered");
861
862 error = EBUSY;
863 if (sc->open) {
864 tw_osli_printf(sc, "error = %d",
865 TW_CL_SEVERITY_ERROR_STRING,
866 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
867 0x2014,
868 "Device open",
869 error);
870 goto out;
871 }
872
873 /* Shut the controller down. */
874 if ((error = twa_shutdown(dev)))
875 goto out;
876
877 /* Free all resources associated with this controller. */
878 tw_osli_free_resources(sc);
879 error = 0;
880
881 out:
882 return(error);
883 }
884
885
886
887 /*
888 * Function name: twa_shutdown
889 * Description: Called at unload/shutdown time. Lets the controller
890 * know that we are going down.
891 *
892 * Input: dev -- bus device corresponding to the ctlr
893 * Output: None
894 * Return value: 0 -- success
895 * non-zero-- failure
896 */
897 static TW_INT32
898 twa_shutdown(device_t dev)
899 {
900 struct twa_softc *sc = device_get_softc(dev);
901 TW_INT32 error = 0;
902
903 tw_osli_dbg_dprintf(3, sc, "entered");
904
905 /* Disconnect interrupts. */
906 error = twa_teardown_intr(sc);
907
908 /* Stop watchdog task. */
909 callout_drain(&(sc->watchdog_callout[0]));
910 callout_drain(&(sc->watchdog_callout[1]));
911
912 /* Disconnect from the controller. */
913 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
914 tw_osli_printf(sc, "error = %d",
915 TW_CL_SEVERITY_ERROR_STRING,
916 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
917 0x2015,
918 "Failed to shutdown Common Layer/controller",
919 error);
920 }
921 return(error);
922 }
923
924
925
926 /*
927 * Function name: twa_busdma_lock
928 * Description: Function to provide synchronization during busdma_swi.
929 *
930 * Input: lock_arg -- lock mutex sent as argument
931 * op -- operation (lock/unlock) expected of the function
932 * Output: None
933 * Return value: None
934 */
935 TW_VOID
936 twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
937 {
938 struct mtx *lock;
939
940 lock = (struct mtx *)lock_arg;
941 switch (op) {
942 case BUS_DMA_LOCK:
943 mtx_lock_spin(lock);
944 break;
945
946 case BUS_DMA_UNLOCK:
947 mtx_unlock_spin(lock);
948 break;
949
950 default:
951 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
952 }
953 }
954
955
956 /*
957 * Function name: twa_pci_intr
958 * Description: Interrupt handler. Wrapper for twa_interrupt.
959 *
960 * Input: arg -- ptr to OSL internal ctlr context
961 * Output: None
962 * Return value: None
963 */
964 static TW_VOID
965 twa_pci_intr(TW_VOID *arg)
966 {
967 struct twa_softc *sc = (struct twa_softc *)arg;
968
969 tw_osli_dbg_dprintf(10, sc, "entered");
970 tw_cl_interrupt(&(sc->ctlr_handle));
971 }
972
973
974 /*
975 * Function name: tw_osli_fw_passthru
976 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
977 *
978 * Input: sc -- ptr to OSL internal ctlr context
979 * buf -- ptr to ioctl pkt understood by CL
980 * Output: None
981 * Return value: 0 -- success
982 * non-zero-- failure
983 */
984 TW_INT32
985 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
986 {
987 struct tw_osli_req_context *req;
988 struct tw_osli_ioctl_no_data_buf *user_buf =
989 (struct tw_osli_ioctl_no_data_buf *)buf;
990 TW_TIME end_time;
991 TW_UINT32 timeout = 60;
992 TW_UINT32 data_buf_size_adjusted;
993 struct tw_cl_req_packet *req_pkt;
994 struct tw_cl_passthru_req_packet *pt_req;
995 TW_INT32 error;
996
997 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
998
999 if ((req = tw_osli_get_request(sc)) == NULL)
1000 return(EBUSY);
1001
1002 req->req_handle.osl_req_ctxt = req;
1003 req->orig_req = buf;
1004 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
1005
1006 req_pkt = &(req->req_pkt);
1007 req_pkt->status = 0;
1008 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
1009 /* Let the Common Layer retry the request on cmd queue full. */
1010 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
1011
1012 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1013 /*
1014 * Make sure that the data buffer sent to firmware is a
1015 * 512 byte multiple in size.
1016 */
1017 data_buf_size_adjusted =
1018 (user_buf->driver_pkt.buffer_length +
1019 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
1020 if ((req->length = data_buf_size_adjusted)) {
1021 if ((req->data = malloc(data_buf_size_adjusted,
1022 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
1023 error = ENOMEM;
1024 tw_osli_printf(sc, "error = %d",
1025 TW_CL_SEVERITY_ERROR_STRING,
1026 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1027 0x2016,
1028 "Could not alloc mem for "
1029 "fw_passthru data_buf",
1030 error);
1031 goto fw_passthru_err;
1032 }
1033 /* Copy the payload. */
1034 if ((error = copyin((TW_VOID *)(user_buf->pdata),
1035 req->data,
1036 user_buf->driver_pkt.buffer_length)) != 0) {
1037 tw_osli_printf(sc, "error = %d",
1038 TW_CL_SEVERITY_ERROR_STRING,
1039 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1040 0x2017,
1041 "Could not copyin fw_passthru data_buf",
1042 error);
1043 goto fw_passthru_err;
1044 }
1045 pt_req->sgl_entries = 1; /* will be updated during mapping */
1046 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1047 TW_OSLI_REQ_FLAGS_DATA_OUT);
1048 } else
1049 pt_req->sgl_entries = 0; /* no payload */
1050
1051 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1052 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1053
1054 if ((error = tw_osli_map_request(req)))
1055 goto fw_passthru_err;
1056
1057 end_time = tw_osl_get_local_time() + timeout;
1058 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1059 mtx_lock(req->ioctl_wake_timeout_lock);
1060 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1061
1062 error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
1063 "twa_passthru", timeout*hz);
1064 mtx_unlock(req->ioctl_wake_timeout_lock);
1065
1066 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1067 error = 0;
1068 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1069
1070 if (! error) {
1071 if (((error = req->error_code)) ||
1072 ((error = (req->state !=
1073 TW_OSLI_REQ_STATE_COMPLETE))) ||
1074 ((error = req_pkt->status)))
1075 goto fw_passthru_err;
1076 break;
1077 }
1078
1079 if (req_pkt->status) {
1080 error = req_pkt->status;
1081 goto fw_passthru_err;
1082 }
1083
1084 if (error == EWOULDBLOCK) {
1085 /* Time out! */
1086 if ((!(req->error_code)) &&
1087 (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
1088 (!(req_pkt->status)) ) {
1089 #ifdef TW_OSL_DEBUG
1090 tw_osli_printf(sc, "request = %p",
1091 TW_CL_SEVERITY_ERROR_STRING,
1092 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1093 0x7777,
1094 "FALSE Passthru timeout!",
1095 req);
1096 #endif /* TW_OSL_DEBUG */
1097 error = 0; /* False error */
1098 break;
1099 }
1100 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1101 #ifdef TW_OSL_DEBUG
1102 tw_osli_printf(sc, "request = %p",
1103 TW_CL_SEVERITY_ERROR_STRING,
1104 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1105 0x2018,
1106 "Passthru request timed out!",
1107 req);
1108 #else /* TW_OSL_DEBUG */
1109 device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1110 #endif /* TW_OSL_DEBUG */
1111 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1112 }
1113
1114 error = 0;
1115 end_time = tw_osl_get_local_time() + timeout;
1116 continue;
1117 /*
1118 * Don't touch req after a reset. It (and any
1119 * associated data) will be
1120 * unmapped by the callback.
1121 */
1122 }
1123 /*
1124 * Either the request got completed, or we were woken up by a
1125 * signal. Calculate the new timeout, in case it was the latter.
1126 */
1127 timeout = (end_time - tw_osl_get_local_time());
1128 } /* End of while loop */
1129
1130 /* If there was a payload, copy it back. */
1131 if ((!error) && (req->length))
1132 if ((error = copyout(req->data, user_buf->pdata,
1133 user_buf->driver_pkt.buffer_length)))
1134 tw_osli_printf(sc, "error = %d",
1135 TW_CL_SEVERITY_ERROR_STRING,
1136 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1137 0x2019,
1138 "Could not copyout fw_passthru data_buf",
1139 error);
1140
1141 fw_passthru_err:
1142
1143 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1144 error = EBUSY;
1145
1146 user_buf->driver_pkt.os_status = error;
1147 /* Free resources. */
1148 if (req->data)
1149 free(req->data, TW_OSLI_MALLOC_CLASS);
1150 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1151 return(error);
1152 }
1153
1154
1155
1156 /*
1157 * Function name: tw_osl_complete_passthru
1158 * Description: Called to complete passthru requests.
1159 *
1160 * Input: req_handle -- ptr to request handle
1161 * Output: None
1162 * Return value: None
1163 */
1164 TW_VOID
1165 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1166 {
1167 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1168 struct tw_cl_req_packet *req_pkt =
1169 (struct tw_cl_req_packet *)(&req->req_pkt);
1170 struct twa_softc *sc = req->ctlr;
1171
1172 tw_osli_dbg_dprintf(5, sc, "entered");
1173
1174 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1175 tw_osli_printf(sc, "request = %p, status = %d",
1176 TW_CL_SEVERITY_ERROR_STRING,
1177 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1178 0x201B,
1179 "Unposted command completed!!",
1180 req, req->state);
1181 }
1182
1183 /*
1184 * Remove request from the busy queue. Just mark it complete.
1185 * There's no need to move it into the complete queue as we are
1186 * going to be done with it right now.
1187 */
1188 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1189 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1190
1191 tw_osli_unmap_request(req);
1192
1193 /*
1194 * Don't do a wake up if there was an error even before the request
1195 * was sent down to the Common Layer, and we hadn't gotten an
1196 * EINPROGRESS. The request originator will then be returned an
1197 * error, and he can do the clean-up.
1198 */
1199 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1200 return;
1201
1202 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1203 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1204 /* Wake up the sleeping command originator. */
1205 tw_osli_dbg_dprintf(5, sc,
1206 "Waking up originator of request %p", req);
1207 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1208 wakeup_one(req);
1209 } else {
1210 /*
1211 * If the request completed even before mtx_sleep
1212 * was called, simply return.
1213 */
1214 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1215 return;
1216
1217 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1218 return;
1219
1220 tw_osli_printf(sc, "request = %p",
1221 TW_CL_SEVERITY_ERROR_STRING,
1222 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1223 0x201C,
1224 "Passthru callback called, "
1225 "and caller not sleeping",
1226 req);
1227 }
1228 } else {
1229 tw_osli_printf(sc, "request = %p",
1230 TW_CL_SEVERITY_ERROR_STRING,
1231 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1232 0x201D,
1233 "Passthru callback called for non-passthru request",
1234 req);
1235 }
1236 }
1237
1238
1239
1240 /*
1241 * Function name: tw_osli_get_request
1242 * Description: Gets a request pkt from the free queue.
1243 *
1244 * Input: sc -- ptr to OSL internal ctlr context
1245 * Output: None
1246 * Return value: ptr to request pkt -- success
1247 * NULL -- failure
1248 */
1249 struct tw_osli_req_context *
1250 tw_osli_get_request(struct twa_softc *sc)
1251 {
1252 struct tw_osli_req_context *req;
1253
1254 tw_osli_dbg_dprintf(4, sc, "entered");
1255
1256 /* Get a free request packet. */
1257 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1258
1259 /* Initialize some fields to their defaults. */
1260 if (req) {
1261 req->req_handle.osl_req_ctxt = NULL;
1262 req->req_handle.cl_req_ctxt = NULL;
1263 req->req_handle.is_io = 0;
1264 req->data = NULL;
1265 req->length = 0;
1266 req->deadline = 0;
1267 req->real_data = NULL;
1268 req->real_length = 0;
1269 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1270 req->flags = 0;
1271 req->error_code = 0;
1272 req->orig_req = NULL;
1273
1274 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1275
1276 }
1277 return(req);
1278 }
1279
1280
1281
1282 /*
1283 * Function name: twa_map_load_data_callback
1284 * Description: Callback of bus_dmamap_load for the buffer associated
1285 * with data. Updates the cmd pkt (size/sgl_entries
1286 * fields, as applicable) to reflect the number of sg
1287 * elements.
1288 *
1289 * Input: arg -- ptr to OSL internal request context
1290 * segs -- ptr to a list of segment descriptors
1291 * nsegments--# of segments
1292 * error -- 0 if no errors encountered before callback,
1293 * non-zero if errors were encountered
1294 * Output: None
1295 * Return value: None
1296 */
1297 static TW_VOID
1298 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1299 TW_INT32 nsegments, TW_INT32 error)
1300 {
1301 struct tw_osli_req_context *req =
1302 (struct tw_osli_req_context *)arg;
1303 struct twa_softc *sc = req->ctlr;
1304 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1305
1306 tw_osli_dbg_dprintf(10, sc, "entered");
1307
1308 if (error == EINVAL) {
1309 req->error_code = error;
1310 return;
1311 }
1312
1313 /* Mark the request as currently being processed. */
1314 req->state = TW_OSLI_REQ_STATE_BUSY;
1315 /* Move the request into the busy queue. */
1316 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1317
1318 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1319
1320 if (error == EFBIG) {
1321 req->error_code = error;
1322 goto out;
1323 }
1324
1325 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1326 struct tw_cl_passthru_req_packet *pt_req;
1327
1328 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1329 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1330 BUS_DMASYNC_PREREAD);
1331
1332 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1333 /*
1334 * If we're using an alignment buffer, and we're
1335 * writing data, copy the real data out.
1336 */
1337 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1338 bcopy(req->real_data, req->data, req->real_length);
1339 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1340 BUS_DMASYNC_PREWRITE);
1341 }
1342
1343 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1344 pt_req->sg_list = (TW_UINT8 *)segs;
1345 pt_req->sgl_entries += (nsegments - 1);
1346 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1347 &(req->req_handle));
1348 } else {
1349 struct tw_cl_scsi_req_packet *scsi_req;
1350
1351 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1352 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1353 BUS_DMASYNC_PREREAD);
1354
1355 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1356 /*
1357 * If we're using an alignment buffer, and we're
1358 * writing data, copy the real data out.
1359 */
1360 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1361 bcopy(req->real_data, req->data, req->real_length);
1362 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1363 BUS_DMASYNC_PREWRITE);
1364 }
1365
1366 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1367 scsi_req->sg_list = (TW_UINT8 *)segs;
1368 scsi_req->sgl_entries += (nsegments - 1);
1369 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1370 &(req->req_handle));
1371 }
1372
1373 out:
1374 if (error) {
1375 req->error_code = error;
1376 req_pkt->tw_osl_callback(&(req->req_handle));
1377 /*
1378 * If the caller had been returned EINPROGRESS, and he has
1379 * registered a callback for handling completion, the callback
1380 * will never get called because we were unable to submit the
1381 * request. So, free up the request right here.
1382 */
1383 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1384 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1385 }
1386 }
1387
1388
1389
1390 /*
1391 * Function name: twa_map_load_callback
1392 * Description: Callback of bus_dmamap_load for the buffer associated
1393 * with a cmd pkt.
1394 *
1395 * Input: arg -- ptr to variable to hold phys addr
1396 * segs -- ptr to a list of segment descriptors
1397 * nsegments--# of segments
1398 * error -- 0 if no errors encountered before callback,
1399 * non-zero if errors were encountered
1400 * Output: None
1401 * Return value: None
1402 */
1403 static TW_VOID
1404 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1405 TW_INT32 nsegments, TW_INT32 error)
1406 {
1407 *((bus_addr_t *)arg) = segs[0].ds_addr;
1408 }
1409
1410
1411
1412 /*
1413 * Function name: tw_osli_map_request
1414 * Description: Maps a cmd pkt and data associated with it, into
1415 * DMA'able memory.
1416 *
1417 * Input: req -- ptr to request pkt
1418 * Output: None
1419 * Return value: 0 -- success
1420 * non-zero-- failure
1421 */
1422 TW_INT32
1423 tw_osli_map_request(struct tw_osli_req_context *req)
1424 {
1425 struct twa_softc *sc = req->ctlr;
1426 TW_INT32 error = 0;
1427
1428 tw_osli_dbg_dprintf(10, sc, "entered");
1429
1430 /* If the command involves data, map that too. */
1431 if (req->data != NULL) {
1432 /*
1433 * It's sufficient for the data pointer to be 4-byte aligned
1434 * to work with 9000. However, if 4-byte aligned addresses
1435 * are passed to bus_dmamap_load, we can get back sg elements
1436 * that are not 512-byte multiples in size. So, we will let
1437 * only those buffers that are 512-byte aligned to pass
1438 * through, and bounce the rest, so as to make sure that we
1439 * always get back sg elements that are 512-byte multiples
1440 * in size.
1441 */
1442 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1443 (req->length % sc->sg_size_factor)) {
1444 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1445 /* Save original data pointer and length. */
1446 req->real_data = req->data;
1447 req->real_length = req->length;
1448 req->length = (req->length +
1449 (sc->sg_size_factor - 1)) &
1450 ~(sc->sg_size_factor - 1);
1451 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1452 M_NOWAIT);
1453 if (req->data == NULL) {
1454 tw_osli_printf(sc, "error = %d",
1455 TW_CL_SEVERITY_ERROR_STRING,
1456 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1457 0x201E,
1458 "Failed to allocate memory "
1459 "for bounce buffer",
1460 ENOMEM);
1461 /* Restore original data pointer and length. */
1462 req->data = req->real_data;
1463 req->length = req->real_length;
1464 return(ENOMEM);
1465 }
1466 }
1467
1468 /*
1469 * Map the data buffer into bus space and build the SG list.
1470 */
1471 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1472 /* Lock against multiple simultaneous ioctl calls. */
1473 mtx_lock_spin(sc->io_lock);
1474 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1475 req->data, req->length,
1476 twa_map_load_data_callback, req,
1477 BUS_DMA_WAITOK);
1478 mtx_unlock_spin(sc->io_lock);
1479 } else {
1480 /*
1481 * There's only one CAM I/O thread running at a time.
1482 * So, there's no need to hold the io_lock.
1483 */
1484 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1485 req->data, req->length,
1486 twa_map_load_data_callback, req,
1487 BUS_DMA_WAITOK);
1488 }
1489
1490 if (!error)
1491 error = req->error_code;
1492 else {
1493 if (error == EINPROGRESS) {
1494 /*
1495 * Specifying sc->io_lock as the lockfuncarg
1496 * in ...tag_create should protect the access
1497 * of ...FLAGS_MAPPED from the callback.
1498 */
1499 mtx_lock_spin(sc->io_lock);
1500 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1501 req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1502 tw_osli_disallow_new_requests(sc, &(req->req_handle));
1503 mtx_unlock_spin(sc->io_lock);
1504 error = 0;
1505 } else {
1506 tw_osli_printf(sc, "error = %d",
1507 TW_CL_SEVERITY_ERROR_STRING,
1508 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1509 0x9999,
1510 "Failed to map DMA memory "
1511 "for I/O request",
1512 error);
1513 req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1514 /* Free alignment buffer if it was used. */
1515 if (req->flags &
1516 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1517 free(req->data, TW_OSLI_MALLOC_CLASS);
1518 /*
1519 * Restore original data pointer
1520 * and length.
1521 */
1522 req->data = req->real_data;
1523 req->length = req->real_length;
1524 }
1525 }
1526 }
1527
1528 } else {
1529 /* Mark the request as currently being processed. */
1530 req->state = TW_OSLI_REQ_STATE_BUSY;
1531 /* Move the request into the busy queue. */
1532 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1533 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1534 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1535 &(req->req_pkt), &(req->req_handle));
1536 else
1537 error = tw_cl_start_io(&sc->ctlr_handle,
1538 &(req->req_pkt), &(req->req_handle));
1539 if (error) {
1540 req->error_code = error;
1541 req->req_pkt.tw_osl_callback(&(req->req_handle));
1542 }
1543 }
1544 return(error);
1545 }
1546
1547
1548
1549 /*
1550 * Function name: tw_osli_unmap_request
1551 * Description: Undoes the mapping done by tw_osli_map_request.
1552 *
1553 * Input: req -- ptr to request pkt
1554 * Output: None
1555 * Return value: None
1556 */
1557 TW_VOID
1558 tw_osli_unmap_request(struct tw_osli_req_context *req)
1559 {
1560 struct twa_softc *sc = req->ctlr;
1561
1562 tw_osli_dbg_dprintf(10, sc, "entered");
1563
1564 /* If the command involved data, unmap that too. */
1565 if (req->data != NULL) {
1566 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1567 /* Lock against multiple simultaneous ioctl calls. */
1568 mtx_lock_spin(sc->io_lock);
1569
1570 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1571 bus_dmamap_sync(sc->ioctl_tag,
1572 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1573
1574 /*
1575 * If we are using a bounce buffer, and we are
1576 * reading data, copy the real data in.
1577 */
1578 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1579 bcopy(req->data, req->real_data,
1580 req->real_length);
1581 }
1582
1583 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1584 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1585 BUS_DMASYNC_POSTWRITE);
1586
1587 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1588
1589 mtx_unlock_spin(sc->io_lock);
1590 } else {
1591 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1592 bus_dmamap_sync(sc->dma_tag,
1593 req->dma_map, BUS_DMASYNC_POSTREAD);
1594
1595 /*
1596 * If we are using a bounce buffer, and we are
1597 * reading data, copy the real data in.
1598 */
1599 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1600 bcopy(req->data, req->real_data,
1601 req->real_length);
1602 }
1603 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1604 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1605 BUS_DMASYNC_POSTWRITE);
1606
1607 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1608 }
1609 }
1610
1611 /* Free alignment buffer if it was used. */
1612 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1613 free(req->data, TW_OSLI_MALLOC_CLASS);
1614 /* Restore original data pointer and length. */
1615 req->data = req->real_data;
1616 req->length = req->real_length;
1617 }
1618 }
1619
1620
1621
1622 #ifdef TW_OSL_DEBUG
1623
1624 TW_VOID twa_report_stats(TW_VOID);
1625 TW_VOID twa_reset_stats(TW_VOID);
1626 TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1627 TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1628
1629
1630 /*
1631 * Function name: twa_report_stats
1632 * Description: For being called from ddb. Calls functions that print
1633 * OSL and CL internal stats for the controller.
1634 *
1635 * Input: None
1636 * Output: None
1637 * Return value: None
1638 */
1639 TW_VOID
1640 twa_report_stats(TW_VOID)
1641 {
1642 struct twa_softc *sc;
1643 TW_INT32 i;
1644
1645 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1646 tw_osli_print_ctlr_stats(sc);
1647 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1648 }
1649 }
1650
1651
1652
1653 /*
1654 * Function name: tw_osli_print_ctlr_stats
1655 * Description: For being called from ddb. Prints OSL controller stats
1656 *
1657 * Input: sc -- ptr to OSL internal controller context
1658 * Output: None
1659 * Return value: None
1660 */
1661 TW_VOID
1662 tw_osli_print_ctlr_stats(struct twa_softc *sc)
1663 {
1664 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1665 twa_printf(sc, "OSLq type current max\n");
1666 twa_printf(sc, "free %04d %04d\n",
1667 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1668 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1669 twa_printf(sc, "busy %04d %04d\n",
1670 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1671 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1672 }
1673
1674
1675
1676 /*
1677 * Function name: twa_print_req_info
1678 * Description: For being called from ddb. Calls functions that print
1679 * OSL and CL internal details for the request.
1680 *
1681 * Input: req -- ptr to OSL internal request context
1682 * Output: None
1683 * Return value: None
1684 */
1685 TW_VOID
1686 twa_print_req_info(struct tw_osli_req_context *req)
1687 {
1688 struct twa_softc *sc = req->ctlr;
1689
1690 twa_printf(sc, "OSL details for request:\n");
1691 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1692 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1693 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1694 "next_req = %p, prev_req = %p, dma_map = %p\n",
1695 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1696 req->data, req->length, req->real_data, req->real_length,
1697 req->state, req->flags, req->error_code, req->orig_req,
1698 req->link.next, req->link.prev, req->dma_map);
1699 tw_cl_print_req_info(&(req->req_handle));
1700 }
1701
1702
1703
1704 /*
1705 * Function name: twa_reset_stats
1706 * Description: For being called from ddb.
1707 * Resets some OSL controller stats.
1708 *
1709 * Input: None
1710 * Output: None
1711 * Return value: None
1712 */
1713 TW_VOID
1714 twa_reset_stats(TW_VOID)
1715 {
1716 struct twa_softc *sc;
1717 TW_INT32 i;
1718
1719 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1720 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1721 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1722 tw_cl_reset_stats(&sc->ctlr_handle);
1723 }
1724 }
1725
1726 #endif /* TW_OSL_DEBUG */
Cache object: 2eb94e5601634986c12f0f37b1a71411
|