1 /*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/10.0/sys/dev/twa/tw_osl_freebsd.c 254263 2013-08-12 23:30:01Z scottl $");
32
33 /*
34 * AMCC'S 3ware driver for 9000 series storage controllers.
35 *
36 * Author: Vinod Kashyap
37 * Modifications by: Adam Radford
38 * Modifications by: Manjunath Ranganathaiah
39 */
40
41
42 /*
43 * FreeBSD specific functions not related to CAM, and other
44 * miscellaneous functions.
45 */
46
47
48 #include <dev/twa/tw_osl_includes.h>
49 #include <dev/twa/tw_cl_fwif.h>
50 #include <dev/twa/tw_cl_ioctl.h>
51 #include <dev/twa/tw_osl_ioctl.h>
52
53 #ifdef TW_OSL_DEBUG
54 TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
55 TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
56 #endif /* TW_OSL_DEBUG */
57
58 static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
59
60
61 static d_open_t twa_open;
62 static d_close_t twa_close;
63 static d_ioctl_t twa_ioctl;
64
65 static struct cdevsw twa_cdevsw = {
66 .d_version = D_VERSION,
67 .d_open = twa_open,
68 .d_close = twa_close,
69 .d_ioctl = twa_ioctl,
70 .d_name = "twa",
71 };
72
73 static devclass_t twa_devclass;
74
75
76 /*
77 * Function name: twa_open
78 * Description: Called when the controller is opened.
79 * Simply marks the controller as open.
80 *
81 * Input: dev -- control device corresponding to the ctlr
82 * flags -- mode of open
83 * fmt -- device type (character/block etc.)
84 * proc -- current process
85 * Output: None
86 * Return value: 0 -- success
87 * non-zero-- failure
88 */
89 static TW_INT32
90 twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
91 {
92 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
93
94 tw_osli_dbg_dprintf(5, sc, "entered");
95 sc->open = TW_CL_TRUE;
96 return(0);
97 }
98
99
100
101 /*
102 * Function name: twa_close
103 * Description: Called when the controller is closed.
104 * Simply marks the controller as not open.
105 *
106 * Input: dev -- control device corresponding to the ctlr
107 * flags -- mode of corresponding open
108 * fmt -- device type (character/block etc.)
109 * proc -- current process
110 * Output: None
111 * Return value: 0 -- success
112 * non-zero-- failure
113 */
114 static TW_INT32
115 twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
116 {
117 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
118
119 tw_osli_dbg_dprintf(5, sc, "entered");
120 sc->open = TW_CL_FALSE;
121 return(0);
122 }
123
124
125
126 /*
127 * Function name: twa_ioctl
128 * Description: Called when an ioctl is posted to the controller.
129 * Handles any OS Layer specific cmds, passes the rest
130 * on to the Common Layer.
131 *
132 * Input: dev -- control device corresponding to the ctlr
133 * cmd -- ioctl cmd
134 * buf -- ptr to buffer in kernel memory, which is
135 * a copy of the input buffer in user-space
136 * flags -- mode of corresponding open
137 * proc -- current process
138 * Output: buf -- ptr to buffer in kernel memory, which will
139 * be copied to the output buffer in user-space
140 * Return value: 0 -- success
141 * non-zero-- failure
142 */
143 static TW_INT32
144 twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc)
145 {
146 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
147 TW_INT32 error;
148
149 tw_osli_dbg_dprintf(5, sc, "entered");
150
151 switch (cmd) {
152 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
153 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
154 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
155 break;
156
157 case TW_OSL_IOCTL_SCAN_BUS:
158 /* Request CAM for a bus scan. */
159 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
160 error = tw_osli_request_bus_scan(sc);
161 break;
162
163 default:
164 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
165 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
166 break;
167 }
168 return(error);
169 }
170
171
172
173 static TW_INT32 twa_probe(device_t dev);
174 static TW_INT32 twa_attach(device_t dev);
175 static TW_INT32 twa_detach(device_t dev);
176 static TW_INT32 twa_shutdown(device_t dev);
177 static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
178 static TW_VOID twa_pci_intr(TW_VOID *arg);
179 static TW_VOID twa_watchdog(TW_VOID *arg);
180 int twa_setup_intr(struct twa_softc *sc);
181 int twa_teardown_intr(struct twa_softc *sc);
182
183 static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
184 static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
185
186 static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
187 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
188 static TW_VOID twa_map_load_callback(TW_VOID *arg,
189 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
190
191
192 static device_method_t twa_methods[] = {
193 /* Device interface */
194 DEVMETHOD(device_probe, twa_probe),
195 DEVMETHOD(device_attach, twa_attach),
196 DEVMETHOD(device_detach, twa_detach),
197 DEVMETHOD(device_shutdown, twa_shutdown),
198
199 DEVMETHOD_END
200 };
201
202 static driver_t twa_pci_driver = {
203 "twa",
204 twa_methods,
205 sizeof(struct twa_softc)
206 };
207
208 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
209 MODULE_DEPEND(twa, cam, 1, 1, 1);
210 MODULE_DEPEND(twa, pci, 1, 1, 1);
211
212
213 /*
214 * Function name: twa_probe
215 * Description: Called at driver load time. Claims 9000 ctlrs.
216 *
217 * Input: dev -- bus device corresponding to the ctlr
218 * Output: None
219 * Return value: <= 0 -- success
220 * > 0 -- failure
221 */
222 static TW_INT32
223 twa_probe(device_t dev)
224 {
225 static TW_UINT8 first_ctlr = 1;
226
227 tw_osli_dbg_printf(3, "entered");
228
229 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
230 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
231 /* Print the driver version only once. */
232 if (first_ctlr) {
233 printf("3ware device driver for 9000 series storage "
234 "controllers, version: %s\n",
235 TW_OSL_DRIVER_VERSION_STRING);
236 first_ctlr = 0;
237 }
238 return(0);
239 }
240 return(ENXIO);
241 }
242
243 int twa_setup_intr(struct twa_softc *sc)
244 {
245 int error = 0;
246
247 if (!(sc->intr_handle) && (sc->irq_res)) {
248 error = bus_setup_intr(sc->bus_dev, sc->irq_res,
249 INTR_TYPE_CAM | INTR_MPSAFE,
250 NULL, twa_pci_intr,
251 sc, &sc->intr_handle);
252 }
253 return( error );
254 }
255
256
257 int twa_teardown_intr(struct twa_softc *sc)
258 {
259 int error = 0;
260
261 if ((sc->intr_handle) && (sc->irq_res)) {
262 error = bus_teardown_intr(sc->bus_dev,
263 sc->irq_res, sc->intr_handle);
264 sc->intr_handle = NULL;
265 }
266 return( error );
267 }
268
269
270
271 /*
272 * Function name: twa_attach
273 * Description: Allocates pci resources; updates sc; adds a node to the
274 * sysctl tree to expose the driver version; makes calls
275 * (to the Common Layer) to initialize ctlr, and to
276 * attach to CAM.
277 *
278 * Input: dev -- bus device corresponding to the ctlr
279 * Output: None
280 * Return value: 0 -- success
281 * non-zero-- failure
282 */
283 static TW_INT32
284 twa_attach(device_t dev)
285 {
286 struct twa_softc *sc = device_get_softc(dev);
287 TW_INT32 bar_num;
288 TW_INT32 bar0_offset;
289 TW_INT32 bar_size;
290 TW_INT32 error;
291
292 tw_osli_dbg_dprintf(3, sc, "entered");
293
294 sc->ctlr_handle.osl_ctlr_ctxt = sc;
295
296 /* Initialize the softc structure. */
297 sc->bus_dev = dev;
298 sc->device_id = pci_get_device(dev);
299
300 /* Initialize the mutexes right here. */
301 sc->io_lock = &(sc->io_lock_handle);
302 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
303 sc->q_lock = &(sc->q_lock_handle);
304 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
305 sc->sim_lock = &(sc->sim_lock_handle);
306 mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);
307
308 sysctl_ctx_init(&sc->sysctl_ctxt);
309 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
310 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
311 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
312 if (sc->sysctl_tree == NULL) {
313 tw_osli_printf(sc, "error = %d",
314 TW_CL_SEVERITY_ERROR_STRING,
315 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
316 0x2000,
317 "Cannot add sysctl tree node",
318 ENXIO);
319 return(ENXIO);
320 }
321 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
322 OID_AUTO, "driver_version", CTLFLAG_RD,
323 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
324
325 /* Force the busmaster enable bit on, in case the BIOS forgot. */
326 pci_enable_busmaster(dev);
327
328 /* Allocate the PCI register window. */
329 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
330 &bar_num, &bar0_offset, &bar_size))) {
331 tw_osli_printf(sc, "error = %d",
332 TW_CL_SEVERITY_ERROR_STRING,
333 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
334 0x201F,
335 "Can't get PCI BAR info",
336 error);
337 tw_osli_free_resources(sc);
338 return(error);
339 }
340 sc->reg_res_id = PCIR_BARS + bar0_offset;
341 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
342 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
343 == NULL) {
344 tw_osli_printf(sc, "error = %d",
345 TW_CL_SEVERITY_ERROR_STRING,
346 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
347 0x2002,
348 "Can't allocate register window",
349 ENXIO);
350 tw_osli_free_resources(sc);
351 return(ENXIO);
352 }
353 sc->bus_tag = rman_get_bustag(sc->reg_res);
354 sc->bus_handle = rman_get_bushandle(sc->reg_res);
355
356 /* Allocate and register our interrupt. */
357 sc->irq_res_id = 0;
358 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
359 &(sc->irq_res_id), 0, ~0, 1,
360 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
361 tw_osli_printf(sc, "error = %d",
362 TW_CL_SEVERITY_ERROR_STRING,
363 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
364 0x2003,
365 "Can't allocate interrupt",
366 ENXIO);
367 tw_osli_free_resources(sc);
368 return(ENXIO);
369 }
370 if ((error = twa_setup_intr(sc))) {
371 tw_osli_printf(sc, "error = %d",
372 TW_CL_SEVERITY_ERROR_STRING,
373 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
374 0x2004,
375 "Can't set up interrupt",
376 error);
377 tw_osli_free_resources(sc);
378 return(error);
379 }
380
381 if ((error = tw_osli_alloc_mem(sc))) {
382 tw_osli_printf(sc, "error = %d",
383 TW_CL_SEVERITY_ERROR_STRING,
384 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
385 0x2005,
386 "Memory allocation failure",
387 error);
388 tw_osli_free_resources(sc);
389 return(error);
390 }
391
392 /* Initialize the Common Layer for this controller. */
393 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
394 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
395 sc->non_dma_mem, sc->dma_mem,
396 sc->dma_mem_phys
397 ))) {
398 tw_osli_printf(sc, "error = %d",
399 TW_CL_SEVERITY_ERROR_STRING,
400 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
401 0x2006,
402 "Failed to initialize Common Layer/controller",
403 error);
404 tw_osli_free_resources(sc);
405 return(error);
406 }
407
408 /* Create the control device. */
409 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
410 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
411 "twa%d", device_get_unit(sc->bus_dev));
412 sc->ctrl_dev->si_drv1 = sc;
413
414 if ((error = tw_osli_cam_attach(sc))) {
415 tw_osli_free_resources(sc);
416 tw_osli_printf(sc, "error = %d",
417 TW_CL_SEVERITY_ERROR_STRING,
418 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
419 0x2007,
420 "Failed to initialize CAM",
421 error);
422 return(error);
423 }
424
425 sc->watchdog_index = 0;
426 callout_init(&(sc->watchdog_callout[0]), CALLOUT_MPSAFE);
427 callout_init(&(sc->watchdog_callout[1]), CALLOUT_MPSAFE);
428 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
429
430 return(0);
431 }
432
433
434 static TW_VOID
435 twa_watchdog(TW_VOID *arg)
436 {
437 struct tw_cl_ctlr_handle *ctlr_handle =
438 (struct tw_cl_ctlr_handle *)arg;
439 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
440 int i;
441 int i_need_a_reset = 0;
442 int driver_is_active = 0;
443 int my_watchdog_was_pending = 1234;
444 TW_UINT64 current_time;
445 struct tw_osli_req_context *my_req;
446
447
448 //==============================================================================
449 current_time = (TW_UINT64) (tw_osl_get_local_time());
450
451 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
452 my_req = &(sc->req_ctx_buf[i]);
453
454 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
455 (my_req->deadline) &&
456 (my_req->deadline < current_time)) {
457 tw_cl_set_reset_needed(ctlr_handle);
458 #ifdef TW_OSL_DEBUG
459 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
460 #else /* TW_OSL_DEBUG */
461 device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
462 #endif /* TW_OSL_DEBUG */
463 break;
464 }
465 }
466 //==============================================================================
467
468 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
469
470 i = (int) ((sc->watchdog_index++) & 1);
471
472 driver_is_active = tw_cl_is_active(ctlr_handle);
473
474 if (i_need_a_reset) {
475 #ifdef TW_OSL_DEBUG
476 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
477 #endif /* TW_OSL_DEBUG */
478 my_watchdog_was_pending =
479 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
480 tw_cl_reset_ctlr(ctlr_handle);
481 #ifdef TW_OSL_DEBUG
482 device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
483 #endif /* TW_OSL_DEBUG */
484 } else if (driver_is_active) {
485 my_watchdog_was_pending =
486 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle);
487 }
488 #ifdef TW_OSL_DEBUG
489 if (i_need_a_reset || my_watchdog_was_pending)
490 device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
491 "driver_is_active = %d, my_watchdog_was_pending = %d\n",
492 i_need_a_reset, driver_is_active, my_watchdog_was_pending);
493 #endif /* TW_OSL_DEBUG */
494 }
495
496
497 /*
498 * Function name: tw_osli_alloc_mem
499 * Description: Allocates memory needed both by CL and OSL.
500 *
501 * Input: sc -- OSL internal controller context
502 * Output: None
503 * Return value: 0 -- success
504 * non-zero-- failure
505 */
506 static TW_INT32
507 tw_osli_alloc_mem(struct twa_softc *sc)
508 {
509 struct tw_osli_req_context *req;
510 TW_UINT32 max_sg_elements;
511 TW_UINT32 non_dma_mem_size;
512 TW_UINT32 dma_mem_size;
513 TW_INT32 error;
514 TW_INT32 i;
515
516 tw_osli_dbg_dprintf(3, sc, "entered");
517
518 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
519 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
520
521 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
522 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
523
524 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
525 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
526 &(sc->alignment), &(sc->sg_size_factor),
527 &non_dma_mem_size, &dma_mem_size
528 ))) {
529 tw_osli_printf(sc, "error = %d",
530 TW_CL_SEVERITY_ERROR_STRING,
531 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
532 0x2008,
533 "Can't get Common Layer's memory requirements",
534 error);
535 return(error);
536 }
537
538 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
539 M_WAITOK)) == NULL) {
540 tw_osli_printf(sc, "error = %d",
541 TW_CL_SEVERITY_ERROR_STRING,
542 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
543 0x2009,
544 "Can't allocate non-dma memory",
545 ENOMEM);
546 return(ENOMEM);
547 }
548
549 /* Create the parent dma tag. */
550 if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */
551 sc->alignment, /* alignment */
552 0, /* boundary */
553 BUS_SPACE_MAXADDR, /* lowaddr */
554 BUS_SPACE_MAXADDR, /* highaddr */
555 NULL, NULL, /* filter, filterarg */
556 TW_CL_MAX_IO_SIZE, /* maxsize */
557 max_sg_elements, /* nsegments */
558 TW_CL_MAX_IO_SIZE, /* maxsegsize */
559 0, /* flags */
560 NULL, /* lockfunc */
561 NULL, /* lockfuncarg */
562 &sc->parent_tag /* tag */)) {
563 tw_osli_printf(sc, "error = %d",
564 TW_CL_SEVERITY_ERROR_STRING,
565 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
566 0x200A,
567 "Can't allocate parent DMA tag",
568 ENOMEM);
569 return(ENOMEM);
570 }
571
572 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
573 if (bus_dma_tag_create(sc->parent_tag, /* parent */
574 sc->alignment, /* alignment */
575 0, /* boundary */
576 BUS_SPACE_MAXADDR, /* lowaddr */
577 BUS_SPACE_MAXADDR, /* highaddr */
578 NULL, NULL, /* filter, filterarg */
579 dma_mem_size, /* maxsize */
580 1, /* nsegments */
581 BUS_SPACE_MAXSIZE, /* maxsegsize */
582 0, /* flags */
583 NULL, /* lockfunc */
584 NULL, /* lockfuncarg */
585 &sc->cmd_tag /* tag */)) {
586 tw_osli_printf(sc, "error = %d",
587 TW_CL_SEVERITY_ERROR_STRING,
588 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
589 0x200B,
590 "Can't allocate DMA tag for Common Layer's "
591 "DMA'able memory",
592 ENOMEM);
593 return(ENOMEM);
594 }
595
596 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
597 BUS_DMA_NOWAIT, &sc->cmd_map)) {
598 /* Try a second time. */
599 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
600 BUS_DMA_NOWAIT, &sc->cmd_map)) {
601 tw_osli_printf(sc, "error = %d",
602 TW_CL_SEVERITY_ERROR_STRING,
603 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
604 0x200C,
605 "Can't allocate DMA'able memory for the"
606 "Common Layer",
607 ENOMEM);
608 return(ENOMEM);
609 }
610 }
611
612 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
613 dma_mem_size, twa_map_load_callback,
614 &sc->dma_mem_phys, 0);
615
616 /*
617 * Create a dma tag for data buffers; size will be the maximum
618 * possible I/O size (128kB).
619 */
620 if (bus_dma_tag_create(sc->parent_tag, /* parent */
621 sc->alignment, /* alignment */
622 0, /* boundary */
623 BUS_SPACE_MAXADDR, /* lowaddr */
624 BUS_SPACE_MAXADDR, /* highaddr */
625 NULL, NULL, /* filter, filterarg */
626 TW_CL_MAX_IO_SIZE, /* maxsize */
627 max_sg_elements, /* nsegments */
628 TW_CL_MAX_IO_SIZE, /* maxsegsize */
629 BUS_DMA_ALLOCNOW, /* flags */
630 twa_busdma_lock, /* lockfunc */
631 sc->io_lock, /* lockfuncarg */
632 &sc->dma_tag /* tag */)) {
633 tw_osli_printf(sc, "error = %d",
634 TW_CL_SEVERITY_ERROR_STRING,
635 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
636 0x200F,
637 "Can't allocate DMA tag for data buffers",
638 ENOMEM);
639 return(ENOMEM);
640 }
641
642 /*
643 * Create a dma tag for ioctl data buffers; size will be the maximum
644 * possible I/O size (128kB).
645 */
646 if (bus_dma_tag_create(sc->parent_tag, /* parent */
647 sc->alignment, /* alignment */
648 0, /* boundary */
649 BUS_SPACE_MAXADDR, /* lowaddr */
650 BUS_SPACE_MAXADDR, /* highaddr */
651 NULL, NULL, /* filter, filterarg */
652 TW_CL_MAX_IO_SIZE, /* maxsize */
653 max_sg_elements, /* nsegments */
654 TW_CL_MAX_IO_SIZE, /* maxsegsize */
655 BUS_DMA_ALLOCNOW, /* flags */
656 twa_busdma_lock, /* lockfunc */
657 sc->io_lock, /* lockfuncarg */
658 &sc->ioctl_tag /* tag */)) {
659 tw_osli_printf(sc, "error = %d",
660 TW_CL_SEVERITY_ERROR_STRING,
661 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
662 0x2010,
663 "Can't allocate DMA tag for ioctl data buffers",
664 ENOMEM);
665 return(ENOMEM);
666 }
667
668 /* Create just one map for all ioctl request data buffers. */
669 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
670 tw_osli_printf(sc, "error = %d",
671 TW_CL_SEVERITY_ERROR_STRING,
672 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
673 0x2011,
674 "Can't create ioctl map",
675 ENOMEM);
676 return(ENOMEM);
677 }
678
679
680 /* Initialize request queues. */
681 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
682 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
683
684 if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
685 malloc((sizeof(struct tw_osli_req_context) *
686 TW_OSLI_MAX_NUM_REQUESTS),
687 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
688 tw_osli_printf(sc, "error = %d",
689 TW_CL_SEVERITY_ERROR_STRING,
690 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
691 0x2012,
692 "Failed to allocate request packets",
693 ENOMEM);
694 return(ENOMEM);
695 }
696 bzero(sc->req_ctx_buf,
697 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);
698
699 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
700 req = &(sc->req_ctx_buf[i]);
701 req->ctlr = sc;
702 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
703 tw_osli_printf(sc, "request # = %d, error = %d",
704 TW_CL_SEVERITY_ERROR_STRING,
705 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
706 0x2013,
707 "Can't create dma map",
708 i, ENOMEM);
709 return(ENOMEM);
710 }
711
712 /* Initialize the ioctl wakeup/ timeout mutex */
713 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
714 mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);
715
716 /* Insert request into the free queue. */
717 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
718 }
719
720 return(0);
721 }
722
723
724
725 /*
726 * Function name: tw_osli_free_resources
727 * Description: Performs clean-up at the time of going down.
728 *
729 * Input: sc -- ptr to OSL internal ctlr context
730 * Output: None
731 * Return value: None
732 */
733 static TW_VOID
734 tw_osli_free_resources(struct twa_softc *sc)
735 {
736 struct tw_osli_req_context *req;
737 TW_INT32 error = 0;
738
739 tw_osli_dbg_dprintf(3, sc, "entered");
740
741 /* Detach from CAM */
742 tw_osli_cam_detach(sc);
743
744 if (sc->req_ctx_buf)
745 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
746 NULL) {
747 mtx_destroy(req->ioctl_wake_timeout_lock);
748
749 if ((error = bus_dmamap_destroy(sc->dma_tag,
750 req->dma_map)))
751 tw_osli_dbg_dprintf(1, sc,
752 "dmamap_destroy(dma) returned %d",
753 error);
754 }
755
756 if ((sc->ioctl_tag) && (sc->ioctl_map))
757 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
758 tw_osli_dbg_dprintf(1, sc,
759 "dmamap_destroy(ioctl) returned %d", error);
760
761 /* Free all memory allocated so far. */
762 if (sc->req_ctx_buf)
763 free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
764
765 if (sc->non_dma_mem)
766 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
767
768 if (sc->dma_mem) {
769 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
770 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
771 sc->cmd_map);
772 }
773 if (sc->cmd_tag)
774 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
775 tw_osli_dbg_dprintf(1, sc,
776 "dma_tag_destroy(cmd) returned %d", error);
777
778 if (sc->dma_tag)
779 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
780 tw_osli_dbg_dprintf(1, sc,
781 "dma_tag_destroy(dma) returned %d", error);
782
783 if (sc->ioctl_tag)
784 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
785 tw_osli_dbg_dprintf(1, sc,
786 "dma_tag_destroy(ioctl) returned %d", error);
787
788 if (sc->parent_tag)
789 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
790 tw_osli_dbg_dprintf(1, sc,
791 "dma_tag_destroy(parent) returned %d", error);
792
793
794 /* Disconnect the interrupt handler. */
795 if ((error = twa_teardown_intr(sc)))
796 tw_osli_dbg_dprintf(1, sc,
797 "teardown_intr returned %d", error);
798
799 if (sc->irq_res != NULL)
800 if ((error = bus_release_resource(sc->bus_dev,
801 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
802 tw_osli_dbg_dprintf(1, sc,
803 "release_resource(irq) returned %d", error);
804
805
806 /* Release the register window mapping. */
807 if (sc->reg_res != NULL)
808 if ((error = bus_release_resource(sc->bus_dev,
809 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
810 tw_osli_dbg_dprintf(1, sc,
811 "release_resource(io) returned %d", error);
812
813
814 /* Destroy the control device. */
815 if (sc->ctrl_dev != (struct cdev *)NULL)
816 destroy_dev(sc->ctrl_dev);
817
818 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
819 tw_osli_dbg_dprintf(1, sc,
820 "sysctl_ctx_free returned %d", error);
821
822 }
823
824
825
826 /*
827 * Function name: twa_detach
828 * Description: Called when the controller is being detached from
829 * the pci bus.
830 *
831 * Input: dev -- bus device corresponding to the ctlr
832 * Output: None
833 * Return value: 0 -- success
834 * non-zero-- failure
835 */
836 static TW_INT32
837 twa_detach(device_t dev)
838 {
839 struct twa_softc *sc = device_get_softc(dev);
840 TW_INT32 error;
841
842 tw_osli_dbg_dprintf(3, sc, "entered");
843
844 error = EBUSY;
845 if (sc->open) {
846 tw_osli_printf(sc, "error = %d",
847 TW_CL_SEVERITY_ERROR_STRING,
848 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
849 0x2014,
850 "Device open",
851 error);
852 goto out;
853 }
854
855 /* Shut the controller down. */
856 if ((error = twa_shutdown(dev)))
857 goto out;
858
859 /* Free all resources associated with this controller. */
860 tw_osli_free_resources(sc);
861 error = 0;
862
863 out:
864 return(error);
865 }
866
867
868
869 /*
870 * Function name: twa_shutdown
871 * Description: Called at unload/shutdown time. Lets the controller
872 * know that we are going down.
873 *
874 * Input: dev -- bus device corresponding to the ctlr
875 * Output: None
876 * Return value: 0 -- success
877 * non-zero-- failure
878 */
879 static TW_INT32
880 twa_shutdown(device_t dev)
881 {
882 struct twa_softc *sc = device_get_softc(dev);
883 TW_INT32 error = 0;
884
885 tw_osli_dbg_dprintf(3, sc, "entered");
886
887 /* Disconnect interrupts. */
888 error = twa_teardown_intr(sc);
889
890 /* Stop watchdog task. */
891 callout_drain(&(sc->watchdog_callout[0]));
892 callout_drain(&(sc->watchdog_callout[1]));
893
894 /* Disconnect from the controller. */
895 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
896 tw_osli_printf(sc, "error = %d",
897 TW_CL_SEVERITY_ERROR_STRING,
898 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
899 0x2015,
900 "Failed to shutdown Common Layer/controller",
901 error);
902 }
903 return(error);
904 }
905
906
907
908 /*
909 * Function name: twa_busdma_lock
910 * Description: Function to provide synchronization during busdma_swi.
911 *
912 * Input: lock_arg -- lock mutex sent as argument
913 * op -- operation (lock/unlock) expected of the function
914 * Output: None
915 * Return value: None
916 */
917 TW_VOID
918 twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
919 {
920 struct mtx *lock;
921
922 lock = (struct mtx *)lock_arg;
923 switch (op) {
924 case BUS_DMA_LOCK:
925 mtx_lock_spin(lock);
926 break;
927
928 case BUS_DMA_UNLOCK:
929 mtx_unlock_spin(lock);
930 break;
931
932 default:
933 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
934 }
935 }
936
937
938 /*
939 * Function name: twa_pci_intr
940 * Description: Interrupt handler. Wrapper for twa_interrupt.
941 *
942 * Input: arg -- ptr to OSL internal ctlr context
943 * Output: None
944 * Return value: None
945 */
946 static TW_VOID
947 twa_pci_intr(TW_VOID *arg)
948 {
949 struct twa_softc *sc = (struct twa_softc *)arg;
950
951 tw_osli_dbg_dprintf(10, sc, "entered");
952 tw_cl_interrupt(&(sc->ctlr_handle));
953 }
954
955
956 /*
957 * Function name: tw_osli_fw_passthru
958 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
959 *
960 * Input: sc -- ptr to OSL internal ctlr context
961 * buf -- ptr to ioctl pkt understood by CL
962 * Output: None
963 * Return value: 0 -- success
964 * non-zero-- failure
965 */
966 TW_INT32
967 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
968 {
969 struct tw_osli_req_context *req;
970 struct tw_osli_ioctl_no_data_buf *user_buf =
971 (struct tw_osli_ioctl_no_data_buf *)buf;
972 TW_TIME end_time;
973 TW_UINT32 timeout = 60;
974 TW_UINT32 data_buf_size_adjusted;
975 struct tw_cl_req_packet *req_pkt;
976 struct tw_cl_passthru_req_packet *pt_req;
977 TW_INT32 error;
978
979 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
980
981 if ((req = tw_osli_get_request(sc)) == NULL)
982 return(EBUSY);
983
984 req->req_handle.osl_req_ctxt = req;
985 req->orig_req = buf;
986 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
987
988 req_pkt = &(req->req_pkt);
989 req_pkt->status = 0;
990 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
991 /* Let the Common Layer retry the request on cmd queue full. */
992 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
993
994 pt_req = &(req_pkt->gen_req_pkt.pt_req);
995 /*
996 * Make sure that the data buffer sent to firmware is a
997 * 512 byte multiple in size.
998 */
999 data_buf_size_adjusted =
1000 (user_buf->driver_pkt.buffer_length +
1001 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
1002 if ((req->length = data_buf_size_adjusted)) {
1003 if ((req->data = malloc(data_buf_size_adjusted,
1004 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
1005 error = ENOMEM;
1006 tw_osli_printf(sc, "error = %d",
1007 TW_CL_SEVERITY_ERROR_STRING,
1008 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1009 0x2016,
1010 "Could not alloc mem for "
1011 "fw_passthru data_buf",
1012 error);
1013 goto fw_passthru_err;
1014 }
1015 /* Copy the payload. */
1016 if ((error = copyin((TW_VOID *)(user_buf->pdata),
1017 req->data,
1018 user_buf->driver_pkt.buffer_length)) != 0) {
1019 tw_osli_printf(sc, "error = %d",
1020 TW_CL_SEVERITY_ERROR_STRING,
1021 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1022 0x2017,
1023 "Could not copyin fw_passthru data_buf",
1024 error);
1025 goto fw_passthru_err;
1026 }
1027 pt_req->sgl_entries = 1; /* will be updated during mapping */
1028 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1029 TW_OSLI_REQ_FLAGS_DATA_OUT);
1030 } else
1031 pt_req->sgl_entries = 0; /* no payload */
1032
1033 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1034 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1035
1036 if ((error = tw_osli_map_request(req)))
1037 goto fw_passthru_err;
1038
1039 end_time = tw_osl_get_local_time() + timeout;
1040 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1041 mtx_lock(req->ioctl_wake_timeout_lock);
1042 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1043
1044 error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
1045 "twa_passthru", timeout*hz);
1046 mtx_unlock(req->ioctl_wake_timeout_lock);
1047
1048 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1049 error = 0;
1050 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1051
1052 if (! error) {
1053 if (((error = req->error_code)) ||
1054 ((error = (req->state !=
1055 TW_OSLI_REQ_STATE_COMPLETE))) ||
1056 ((error = req_pkt->status)))
1057 goto fw_passthru_err;
1058 break;
1059 }
1060
1061 if (req_pkt->status) {
1062 error = req_pkt->status;
1063 goto fw_passthru_err;
1064 }
1065
1066 if (error == EWOULDBLOCK) {
1067 /* Time out! */
1068 if ((!(req->error_code)) &&
1069 (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
1070 (!(req_pkt->status)) ) {
1071 #ifdef TW_OSL_DEBUG
1072 tw_osli_printf(sc, "request = %p",
1073 TW_CL_SEVERITY_ERROR_STRING,
1074 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1075 0x7777,
1076 "FALSE Passthru timeout!",
1077 req);
1078 #endif /* TW_OSL_DEBUG */
1079 error = 0; /* False error */
1080 break;
1081 }
1082 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1083 #ifdef TW_OSL_DEBUG
1084 tw_osli_printf(sc, "request = %p",
1085 TW_CL_SEVERITY_ERROR_STRING,
1086 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1087 0x2018,
1088 "Passthru request timed out!",
1089 req);
1090 #else /* TW_OSL_DEBUG */
1091 device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1092 #endif /* TW_OSL_DEBUG */
1093 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1094 }
1095
1096 error = 0;
1097 end_time = tw_osl_get_local_time() + timeout;
1098 continue;
1099 /*
1100 * Don't touch req after a reset. It (and any
1101 * associated data) will be
1102 * unmapped by the callback.
1103 */
1104 }
1105 /*
1106 * Either the request got completed, or we were woken up by a
1107 * signal. Calculate the new timeout, in case it was the latter.
1108 */
1109 timeout = (end_time - tw_osl_get_local_time());
1110 } /* End of while loop */
1111
1112 /* If there was a payload, copy it back. */
1113 if ((!error) && (req->length))
1114 if ((error = copyout(req->data, user_buf->pdata,
1115 user_buf->driver_pkt.buffer_length)))
1116 tw_osli_printf(sc, "error = %d",
1117 TW_CL_SEVERITY_ERROR_STRING,
1118 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1119 0x2019,
1120 "Could not copyout fw_passthru data_buf",
1121 error);
1122
1123 fw_passthru_err:
1124
1125 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1126 error = EBUSY;
1127
1128 user_buf->driver_pkt.os_status = error;
1129 /* Free resources. */
1130 if (req->data)
1131 free(req->data, TW_OSLI_MALLOC_CLASS);
1132 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1133 return(error);
1134 }
1135
1136
1137
1138 /*
1139 * Function name: tw_osl_complete_passthru
1140 * Description: Called to complete passthru requests.
1141 *
1142 * Input: req_handle -- ptr to request handle
1143 * Output: None
1144 * Return value: None
1145 */
1146 TW_VOID
1147 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1148 {
1149 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1150 struct tw_cl_req_packet *req_pkt =
1151 (struct tw_cl_req_packet *)(&req->req_pkt);
1152 struct twa_softc *sc = req->ctlr;
1153
1154 tw_osli_dbg_dprintf(5, sc, "entered");
1155
1156 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1157 tw_osli_printf(sc, "request = %p, status = %d",
1158 TW_CL_SEVERITY_ERROR_STRING,
1159 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1160 0x201B,
1161 "Unposted command completed!!",
1162 req, req->state);
1163 }
1164
1165 /*
1166 * Remove request from the busy queue. Just mark it complete.
1167 * There's no need to move it into the complete queue as we are
1168 * going to be done with it right now.
1169 */
1170 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1171 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1172
1173 tw_osli_unmap_request(req);
1174
1175 /*
1176 * Don't do a wake up if there was an error even before the request
1177 * was sent down to the Common Layer, and we hadn't gotten an
1178 * EINPROGRESS. The request originator will then be returned an
1179 * error, and he can do the clean-up.
1180 */
1181 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1182 return;
1183
1184 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1185 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1186 /* Wake up the sleeping command originator. */
1187 tw_osli_dbg_dprintf(5, sc,
1188 "Waking up originator of request %p", req);
1189 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1190 wakeup_one(req);
1191 } else {
1192 /*
1193 * If the request completed even before mtx_sleep
1194 * was called, simply return.
1195 */
1196 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1197 return;
1198
1199 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1200 return;
1201
1202 tw_osli_printf(sc, "request = %p",
1203 TW_CL_SEVERITY_ERROR_STRING,
1204 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1205 0x201C,
1206 "Passthru callback called, "
1207 "and caller not sleeping",
1208 req);
1209 }
1210 } else {
1211 tw_osli_printf(sc, "request = %p",
1212 TW_CL_SEVERITY_ERROR_STRING,
1213 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1214 0x201D,
1215 "Passthru callback called for non-passthru request",
1216 req);
1217 }
1218 }
1219
1220
1221
1222 /*
1223 * Function name: tw_osli_get_request
1224 * Description: Gets a request pkt from the free queue.
1225 *
1226 * Input: sc -- ptr to OSL internal ctlr context
1227 * Output: None
1228 * Return value: ptr to request pkt -- success
1229 * NULL -- failure
1230 */
1231 struct tw_osli_req_context *
1232 tw_osli_get_request(struct twa_softc *sc)
1233 {
1234 struct tw_osli_req_context *req;
1235
1236 tw_osli_dbg_dprintf(4, sc, "entered");
1237
1238 /* Get a free request packet. */
1239 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1240
1241 /* Initialize some fields to their defaults. */
1242 if (req) {
1243 req->req_handle.osl_req_ctxt = NULL;
1244 req->req_handle.cl_req_ctxt = NULL;
1245 req->req_handle.is_io = 0;
1246 req->data = NULL;
1247 req->length = 0;
1248 req->deadline = 0;
1249 req->real_data = NULL;
1250 req->real_length = 0;
1251 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1252 req->flags = 0;
1253 req->error_code = 0;
1254 req->orig_req = NULL;
1255
1256 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1257
1258 }
1259 return(req);
1260 }
1261
1262
1263
1264 /*
1265 * Function name: twa_map_load_data_callback
1266 * Description: Callback of bus_dmamap_load for the buffer associated
1267 * with data. Updates the cmd pkt (size/sgl_entries
1268 * fields, as applicable) to reflect the number of sg
1269 * elements.
1270 *
1271 * Input: arg -- ptr to OSL internal request context
1272 * segs -- ptr to a list of segment descriptors
1273 * nsegments--# of segments
1274 * error -- 0 if no errors encountered before callback,
1275 * non-zero if errors were encountered
1276 * Output: None
1277 * Return value: None
1278 */
1279 static TW_VOID
1280 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1281 TW_INT32 nsegments, TW_INT32 error)
1282 {
1283 struct tw_osli_req_context *req =
1284 (struct tw_osli_req_context *)arg;
1285 struct twa_softc *sc = req->ctlr;
1286 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1287
1288 tw_osli_dbg_dprintf(10, sc, "entered");
1289
1290 if (error == EINVAL) {
1291 req->error_code = error;
1292 return;
1293 }
1294
1295 /* Mark the request as currently being processed. */
1296 req->state = TW_OSLI_REQ_STATE_BUSY;
1297 /* Move the request into the busy queue. */
1298 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1299
1300 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1301
1302 if (error == EFBIG) {
1303 req->error_code = error;
1304 goto out;
1305 }
1306
1307 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1308 struct tw_cl_passthru_req_packet *pt_req;
1309
1310 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1311 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1312 BUS_DMASYNC_PREREAD);
1313
1314 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1315 /*
1316 * If we're using an alignment buffer, and we're
1317 * writing data, copy the real data out.
1318 */
1319 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1320 bcopy(req->real_data, req->data, req->real_length);
1321 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1322 BUS_DMASYNC_PREWRITE);
1323 }
1324
1325 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1326 pt_req->sg_list = (TW_UINT8 *)segs;
1327 pt_req->sgl_entries += (nsegments - 1);
1328 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1329 &(req->req_handle));
1330 } else {
1331 struct tw_cl_scsi_req_packet *scsi_req;
1332
1333 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1334 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1335 BUS_DMASYNC_PREREAD);
1336
1337 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1338 /*
1339 * If we're using an alignment buffer, and we're
1340 * writing data, copy the real data out.
1341 */
1342 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1343 bcopy(req->real_data, req->data, req->real_length);
1344 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1345 BUS_DMASYNC_PREWRITE);
1346 }
1347
1348 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1349 scsi_req->sg_list = (TW_UINT8 *)segs;
1350 scsi_req->sgl_entries += (nsegments - 1);
1351 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1352 &(req->req_handle));
1353 }
1354
1355 out:
1356 if (error) {
1357 req->error_code = error;
1358 req_pkt->tw_osl_callback(&(req->req_handle));
1359 /*
1360 * If the caller had been returned EINPROGRESS, and he has
1361 * registered a callback for handling completion, the callback
1362 * will never get called because we were unable to submit the
1363 * request. So, free up the request right here.
1364 */
1365 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1366 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1367 }
1368 }
1369
1370
1371
1372 /*
1373 * Function name: twa_map_load_callback
1374 * Description: Callback of bus_dmamap_load for the buffer associated
1375 * with a cmd pkt.
1376 *
1377 * Input: arg -- ptr to variable to hold phys addr
1378 * segs -- ptr to a list of segment descriptors
1379 * nsegments--# of segments
1380 * error -- 0 if no errors encountered before callback,
1381 * non-zero if errors were encountered
1382 * Output: None
1383 * Return value: None
1384 */
1385 static TW_VOID
1386 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1387 TW_INT32 nsegments, TW_INT32 error)
1388 {
1389 *((bus_addr_t *)arg) = segs[0].ds_addr;
1390 }
1391
1392
1393
1394 /*
1395 * Function name: tw_osli_map_request
1396 * Description: Maps a cmd pkt and data associated with it, into
1397 * DMA'able memory.
1398 *
1399 * Input: req -- ptr to request pkt
1400 * Output: None
1401 * Return value: 0 -- success
1402 * non-zero-- failure
1403 */
1404 TW_INT32
1405 tw_osli_map_request(struct tw_osli_req_context *req)
1406 {
1407 struct twa_softc *sc = req->ctlr;
1408 TW_INT32 error = 0;
1409
1410 tw_osli_dbg_dprintf(10, sc, "entered");
1411
1412 /* If the command involves data, map that too. */
1413 if (req->data != NULL) {
1414 /*
1415 * It's sufficient for the data pointer to be 4-byte aligned
1416 * to work with 9000. However, if 4-byte aligned addresses
1417 * are passed to bus_dmamap_load, we can get back sg elements
1418 * that are not 512-byte multiples in size. So, we will let
1419 * only those buffers that are 512-byte aligned to pass
1420 * through, and bounce the rest, so as to make sure that we
1421 * always get back sg elements that are 512-byte multiples
1422 * in size.
1423 */
1424 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1425 (req->length % sc->sg_size_factor)) {
1426 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1427 /* Save original data pointer and length. */
1428 req->real_data = req->data;
1429 req->real_length = req->length;
1430 req->length = (req->length +
1431 (sc->sg_size_factor - 1)) &
1432 ~(sc->sg_size_factor - 1);
1433 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1434 M_NOWAIT);
1435 if (req->data == NULL) {
1436 tw_osli_printf(sc, "error = %d",
1437 TW_CL_SEVERITY_ERROR_STRING,
1438 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1439 0x201E,
1440 "Failed to allocate memory "
1441 "for bounce buffer",
1442 ENOMEM);
1443 /* Restore original data pointer and length. */
1444 req->data = req->real_data;
1445 req->length = req->real_length;
1446 return(ENOMEM);
1447 }
1448 }
1449
1450 /*
1451 * Map the data buffer into bus space and build the SG list.
1452 */
1453 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1454 /* Lock against multiple simultaneous ioctl calls. */
1455 mtx_lock_spin(sc->io_lock);
1456 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1457 req->data, req->length,
1458 twa_map_load_data_callback, req,
1459 BUS_DMA_WAITOK);
1460 mtx_unlock_spin(sc->io_lock);
1461 } else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) {
1462 error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map,
1463 req->orig_req, twa_map_load_data_callback, req,
1464 BUS_DMA_WAITOK);
1465 } else {
1466 /*
1467 * There's only one CAM I/O thread running at a time.
1468 * So, there's no need to hold the io_lock.
1469 */
1470 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1471 req->data, req->length,
1472 twa_map_load_data_callback, req,
1473 BUS_DMA_WAITOK);
1474 }
1475
1476 if (!error)
1477 error = req->error_code;
1478 else {
1479 if (error == EINPROGRESS) {
1480 /*
1481 * Specifying sc->io_lock as the lockfuncarg
1482 * in ...tag_create should protect the access
1483 * of ...FLAGS_MAPPED from the callback.
1484 */
1485 mtx_lock_spin(sc->io_lock);
1486 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1487 req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1488 tw_osli_disallow_new_requests(sc, &(req->req_handle));
1489 mtx_unlock_spin(sc->io_lock);
1490 error = 0;
1491 } else {
1492 tw_osli_printf(sc, "error = %d",
1493 TW_CL_SEVERITY_ERROR_STRING,
1494 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1495 0x9999,
1496 "Failed to map DMA memory "
1497 "for I/O request",
1498 error);
1499 req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1500 /* Free alignment buffer if it was used. */
1501 if (req->flags &
1502 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1503 free(req->data, TW_OSLI_MALLOC_CLASS);
1504 /*
1505 * Restore original data pointer
1506 * and length.
1507 */
1508 req->data = req->real_data;
1509 req->length = req->real_length;
1510 }
1511 }
1512 }
1513
1514 } else {
1515 /* Mark the request as currently being processed. */
1516 req->state = TW_OSLI_REQ_STATE_BUSY;
1517 /* Move the request into the busy queue. */
1518 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1519 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1520 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1521 &(req->req_pkt), &(req->req_handle));
1522 else
1523 error = tw_cl_start_io(&sc->ctlr_handle,
1524 &(req->req_pkt), &(req->req_handle));
1525 if (error) {
1526 req->error_code = error;
1527 req->req_pkt.tw_osl_callback(&(req->req_handle));
1528 }
1529 }
1530 return(error);
1531 }
1532
1533
1534
1535 /*
1536 * Function name: tw_osli_unmap_request
1537 * Description: Undoes the mapping done by tw_osli_map_request.
1538 *
1539 * Input: req -- ptr to request pkt
1540 * Output: None
1541 * Return value: None
1542 */
1543 TW_VOID
1544 tw_osli_unmap_request(struct tw_osli_req_context *req)
1545 {
1546 struct twa_softc *sc = req->ctlr;
1547
1548 tw_osli_dbg_dprintf(10, sc, "entered");
1549
1550 /* If the command involved data, unmap that too. */
1551 if (req->data != NULL) {
1552 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1553 /* Lock against multiple simultaneous ioctl calls. */
1554 mtx_lock_spin(sc->io_lock);
1555
1556 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1557 bus_dmamap_sync(sc->ioctl_tag,
1558 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1559
1560 /*
1561 * If we are using a bounce buffer, and we are
1562 * reading data, copy the real data in.
1563 */
1564 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1565 bcopy(req->data, req->real_data,
1566 req->real_length);
1567 }
1568
1569 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1570 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1571 BUS_DMASYNC_POSTWRITE);
1572
1573 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1574
1575 mtx_unlock_spin(sc->io_lock);
1576 } else {
1577 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1578 bus_dmamap_sync(sc->dma_tag,
1579 req->dma_map, BUS_DMASYNC_POSTREAD);
1580
1581 /*
1582 * If we are using a bounce buffer, and we are
1583 * reading data, copy the real data in.
1584 */
1585 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1586 bcopy(req->data, req->real_data,
1587 req->real_length);
1588 }
1589 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1590 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1591 BUS_DMASYNC_POSTWRITE);
1592
1593 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1594 }
1595 }
1596
1597 /* Free alignment buffer if it was used. */
1598 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1599 free(req->data, TW_OSLI_MALLOC_CLASS);
1600 /* Restore original data pointer and length. */
1601 req->data = req->real_data;
1602 req->length = req->real_length;
1603 }
1604 }
1605
1606
1607
1608 #ifdef TW_OSL_DEBUG
1609
1610 TW_VOID twa_report_stats(TW_VOID);
1611 TW_VOID twa_reset_stats(TW_VOID);
1612 TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1613 TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1614
1615
1616 /*
1617 * Function name: twa_report_stats
1618 * Description: For being called from ddb. Calls functions that print
1619 * OSL and CL internal stats for the controller.
1620 *
1621 * Input: None
1622 * Output: None
1623 * Return value: None
1624 */
1625 TW_VOID
1626 twa_report_stats(TW_VOID)
1627 {
1628 struct twa_softc *sc;
1629 TW_INT32 i;
1630
1631 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1632 tw_osli_print_ctlr_stats(sc);
1633 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1634 }
1635 }
1636
1637
1638
1639 /*
1640 * Function name: tw_osli_print_ctlr_stats
1641 * Description: For being called from ddb. Prints OSL controller stats
1642 *
1643 * Input: sc -- ptr to OSL internal controller context
1644 * Output: None
1645 * Return value: None
1646 */
1647 TW_VOID
1648 tw_osli_print_ctlr_stats(struct twa_softc *sc)
1649 {
1650 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1651 twa_printf(sc, "OSLq type current max\n");
1652 twa_printf(sc, "free %04d %04d\n",
1653 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1654 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1655 twa_printf(sc, "busy %04d %04d\n",
1656 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1657 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1658 }
1659
1660
1661
1662 /*
1663 * Function name: twa_print_req_info
1664 * Description: For being called from ddb. Calls functions that print
1665 * OSL and CL internal details for the request.
1666 *
1667 * Input: req -- ptr to OSL internal request context
1668 * Output: None
1669 * Return value: None
1670 */
1671 TW_VOID
1672 twa_print_req_info(struct tw_osli_req_context *req)
1673 {
1674 struct twa_softc *sc = req->ctlr;
1675
1676 twa_printf(sc, "OSL details for request:\n");
1677 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1678 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1679 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1680 "next_req = %p, prev_req = %p, dma_map = %p\n",
1681 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1682 req->data, req->length, req->real_data, req->real_length,
1683 req->state, req->flags, req->error_code, req->orig_req,
1684 req->link.next, req->link.prev, req->dma_map);
1685 tw_cl_print_req_info(&(req->req_handle));
1686 }
1687
1688
1689
1690 /*
1691 * Function name: twa_reset_stats
1692 * Description: For being called from ddb.
1693 * Resets some OSL controller stats.
1694 *
1695 * Input: None
1696 * Output: None
1697 * Return value: None
1698 */
1699 TW_VOID
1700 twa_reset_stats(TW_VOID)
1701 {
1702 struct twa_softc *sc;
1703 TW_INT32 i;
1704
1705 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1706 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1707 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1708 tw_cl_reset_stats(&sc->ctlr_handle);
1709 }
1710 }
1711
1712 #endif /* TW_OSL_DEBUG */
Cache object: 005b6a4e7d4f3b64fbe8cab388b297cf
|