1 /*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 /*
33 * AMCC'S 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 * Modifications by: Adam Radford
37 */
38
39
40 /*
41 * FreeBSD specific functions not related to CAM, and other
42 * miscellaneous functions.
43 */
44
45
46 #include <dev/twa/tw_osl_includes.h>
47 #include <dev/twa/tw_cl_fwif.h>
48 #include <dev/twa/tw_cl_ioctl.h>
49 #include <dev/twa/tw_osl_ioctl.h>
50
51 #ifdef TW_OSL_DEBUG
52 TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
53 TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
54 #endif /* TW_OSL_DEBUG */
55
56 MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
57
58
59 static d_open_t twa_open;
60 static d_close_t twa_close;
61 static d_ioctl_t twa_ioctl;
62
63 static struct cdevsw twa_cdevsw = {
64 .d_version = D_VERSION,
65 .d_open = twa_open,
66 .d_close = twa_close,
67 .d_ioctl = twa_ioctl,
68 .d_name = "twa",
69 };
70
71 static devclass_t twa_devclass;
72
73
74 /*
75 * Function name: twa_open
76 * Description: Called when the controller is opened.
77 * Simply marks the controller as open.
78 *
79 * Input: dev -- control device corresponding to the ctlr
80 * flags -- mode of open
81 * fmt -- device type (character/block etc.)
82 * proc -- current process
83 * Output: None
84 * Return value: 0 -- success
85 * non-zero-- failure
86 */
87 static TW_INT32
88 twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
89 {
90 TW_INT32 unit = minor(dev);
91 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
92
93 tw_osli_dbg_dprintf(5, sc, "entered");
94 sc->state |= TW_OSLI_CTLR_STATE_OPEN;
95 return(0);
96 }
97
98
99
100 /*
101 * Function name: twa_close
102 * Description: Called when the controller is closed.
103 * Simply marks the controller as not open.
104 *
105 * Input: dev -- control device corresponding to the ctlr
106 * flags -- mode of corresponding open
107 * fmt -- device type (character/block etc.)
108 * proc -- current process
109 * Output: None
110 * Return value: 0 -- success
111 * non-zero-- failure
112 */
113 static TW_INT32
114 twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
115 {
116 TW_INT32 unit = minor(dev);
117 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
118
119 tw_osli_dbg_dprintf(5, sc, "entered");
120 sc->state &= ~TW_OSLI_CTLR_STATE_OPEN;
121 return(0);
122 }
123
124
125
126 /*
127 * Function name: twa_ioctl
128 * Description: Called when an ioctl is posted to the controller.
129 * Handles any OS Layer specific cmds, passes the rest
130 * on to the Common Layer.
131 *
132 * Input: dev -- control device corresponding to the ctlr
133 * cmd -- ioctl cmd
134 * buf -- ptr to buffer in kernel memory, which is
135 * a copy of the input buffer in user-space
136 * flags -- mode of corresponding open
137 * proc -- current process
138 * Output: buf -- ptr to buffer in kernel memory, which will
139 * be copied to the output buffer in user-space
140 * Return value: 0 -- success
141 * non-zero-- failure
142 */
143 static TW_INT32
144 twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, d_thread_t *proc)
145 {
146 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
147 TW_INT32 error;
148
149 tw_osli_dbg_dprintf(5, sc, "entered");
150
151 switch (cmd) {
152 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
153 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
154 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
155 break;
156
157 case TW_OSL_IOCTL_SCAN_BUS:
158 /* Request CAM for a bus scan. */
159 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
160 error = tw_osli_request_bus_scan(sc);
161 break;
162
163 default:
164 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
165 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
166 break;
167 }
168 return(error);
169 }
170
171
172
173 static TW_INT32 twa_probe(device_t dev);
174 static TW_INT32 twa_attach(device_t dev);
175 static TW_INT32 twa_detach(device_t dev);
176 static TW_INT32 twa_shutdown(device_t dev);
177 static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
178 static TW_VOID twa_pci_intr(TW_VOID *arg);
179 #ifdef TW_OSLI_DEFERRED_INTR_USED
180 static TW_VOID twa_deferred_intr(TW_VOID *context, TW_INT32 pending);
181 #endif /* TW_OSLI_DEFERRED_INTR_USED */
182
183 static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
184 static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
185
186 static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
187 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
188 static TW_VOID twa_map_load_callback(TW_VOID *arg,
189 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
190
191
192 static device_method_t twa_methods[] = {
193 /* Device interface */
194 DEVMETHOD(device_probe, twa_probe),
195 DEVMETHOD(device_attach, twa_attach),
196 DEVMETHOD(device_detach, twa_detach),
197 DEVMETHOD(device_shutdown, twa_shutdown),
198
199 DEVMETHOD(bus_print_child, bus_generic_print_child),
200 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
201 {0, 0}
202 };
203
204 static driver_t twa_pci_driver = {
205 "twa",
206 twa_methods,
207 sizeof(struct twa_softc)
208 };
209
210 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
211 MODULE_DEPEND(twa, cam, 1, 1, 1);
212 MODULE_DEPEND(twa, pci, 1, 1, 1);
213
214
215 /*
216 * Function name: twa_probe
217 * Description: Called at driver load time. Claims 9000 ctlrs.
218 *
219 * Input: dev -- bus device corresponding to the ctlr
220 * Output: None
221 * Return value: <= 0 -- success
222 * > 0 -- failure
223 */
224 static TW_INT32
225 twa_probe(device_t dev)
226 {
227 static TW_UINT8 first_ctlr = 1;
228
229 tw_osli_dbg_printf(3, "entered");
230
231 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
232 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
233 /* Print the driver version only once. */
234 if (first_ctlr) {
235 printf("3ware device driver for 9000 series storage "
236 "controllers, version: %s\n",
237 TW_OSL_DRIVER_VERSION_STRING);
238 first_ctlr = 0;
239 }
240 return(0);
241 }
242 return(ENXIO);
243 }
244
245
246
247 /*
248 * Function name: twa_attach
249 * Description: Allocates pci resources; updates sc; adds a node to the
250 * sysctl tree to expose the driver version; makes calls
251 * (to the Common Layer) to initialize ctlr, and to
252 * attach to CAM.
253 *
254 * Input: dev -- bus device corresponding to the ctlr
255 * Output: None
256 * Return value: 0 -- success
257 * non-zero-- failure
258 */
259 static TW_INT32
260 twa_attach(device_t dev)
261 {
262 struct twa_softc *sc = device_get_softc(dev);
263 TW_UINT32 command;
264 TW_INT32 bar_num;
265 TW_INT32 bar0_offset;
266 TW_INT32 bar_size;
267 TW_INT32 error;
268
269 tw_osli_dbg_dprintf(3, sc, "entered");
270
271 sc->ctlr_handle.osl_ctlr_ctxt = sc;
272
273 /* Initialize the softc structure. */
274 sc->bus_dev = dev;
275 sc->device_id = pci_get_device(dev);
276
277 /* Initialize the mutexes right here. */
278 sc->io_lock = &(sc->io_lock_handle);
279 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
280 sc->q_lock = &(sc->q_lock_handle);
281 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
282
283 sysctl_ctx_init(&sc->sysctl_ctxt);
284 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
285 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
286 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
287 if (sc->sysctl_tree == NULL) {
288 tw_osli_printf(sc, "error = %d",
289 TW_CL_SEVERITY_ERROR_STRING,
290 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
291 0x2000,
292 "Cannot add sysctl tree node",
293 ENXIO);
294 return(ENXIO);
295 }
296 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
297 OID_AUTO, "driver_version", CTLFLAG_RD,
298 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
299
300 /* Make sure we are going to be able to talk to this board. */
301 command = pci_read_config(dev, PCIR_COMMAND, 2);
302 if ((command & PCIM_CMD_PORTEN) == 0) {
303 tw_osli_printf(sc, "error = %d",
304 TW_CL_SEVERITY_ERROR_STRING,
305 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
306 0x2001,
307 "Register window not available",
308 ENXIO);
309 tw_osli_free_resources(sc);
310 return(ENXIO);
311 }
312
313 /* Force the busmaster enable bit on, in case the BIOS forgot. */
314 command |= PCIM_CMD_BUSMASTEREN;
315 pci_write_config(dev, PCIR_COMMAND, command, 2);
316
317 /* Allocate the PCI register window. */
318 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
319 &bar_num, &bar0_offset, &bar_size))) {
320 tw_osli_printf(sc, "error = %d",
321 TW_CL_SEVERITY_ERROR_STRING,
322 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
323 0x201F,
324 "Can't get PCI BAR info",
325 error);
326 tw_osli_free_resources(sc);
327 return(error);
328 }
329 sc->reg_res_id = PCIR_BARS + bar0_offset;
330 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
331 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
332 == NULL) {
333 tw_osli_printf(sc, "error = %d",
334 TW_CL_SEVERITY_ERROR_STRING,
335 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
336 0x2002,
337 "Can't allocate register window",
338 ENXIO);
339 tw_osli_free_resources(sc);
340 return(ENXIO);
341 }
342 sc->bus_tag = rman_get_bustag(sc->reg_res);
343 sc->bus_handle = rman_get_bushandle(sc->reg_res);
344
345 /* Allocate and register our interrupt. */
346 sc->irq_res_id = 0;
347 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
348 &(sc->irq_res_id), 0, ~0, 1,
349 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
350 tw_osli_printf(sc, "error = %d",
351 TW_CL_SEVERITY_ERROR_STRING,
352 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
353 0x2003,
354 "Can't allocate interrupt",
355 ENXIO);
356 tw_osli_free_resources(sc);
357 return(ENXIO);
358 }
359 if ((error = bus_setup_intr(sc->bus_dev, sc->irq_res,
360 ((mp_ncpus > 1) ? (INTR_MPSAFE
361 #ifdef TW_OSLI_DEFERRED_INTR_USED
362 | INTR_FAST
363 #endif /* TW_OSLI_DEFERRED_INTR_USED */
364 ) : 0) | INTR_TYPE_CAM,
365 twa_pci_intr, sc, &sc->intr_handle))) {
366 tw_osli_printf(sc, "error = %d",
367 TW_CL_SEVERITY_ERROR_STRING,
368 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
369 0x2004,
370 "Can't set up interrupt",
371 error);
372 tw_osli_free_resources(sc);
373 return(error);
374 }
375
376 #ifdef TW_OSLI_DEFERRED_INTR_USED
377 TASK_INIT(&sc->deferred_intr_callback, 0, twa_deferred_intr, sc);
378 #endif /* TW_OSLI_DEFERRED_INTR_USED */
379
380 if ((error = tw_osli_alloc_mem(sc))) {
381 tw_osli_printf(sc, "error = %d",
382 TW_CL_SEVERITY_ERROR_STRING,
383 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
384 0x2005,
385 "Memory allocation failure",
386 error);
387 tw_osli_free_resources(sc);
388 return(error);
389 }
390
391 /* Initialize the Common Layer for this controller. */
392 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
393 TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
394 sc->non_dma_mem, sc->dma_mem,
395 sc->dma_mem_phys
396 ))) {
397 tw_osli_printf(sc, "error = %d",
398 TW_CL_SEVERITY_ERROR_STRING,
399 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
400 0x2006,
401 "Failed to initialize Common Layer/controller",
402 error);
403 tw_osli_free_resources(sc);
404 return(error);
405 }
406
407 /* Create the control device. */
408 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
409 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
410 "twa%d", device_get_unit(sc->bus_dev));
411 sc->ctrl_dev->si_drv1 = sc;
412
413 if ((error = tw_osli_cam_attach(sc))) {
414 tw_osli_free_resources(sc);
415 tw_osli_printf(sc, "error = %d",
416 TW_CL_SEVERITY_ERROR_STRING,
417 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
418 0x2007,
419 "Failed to initialize CAM",
420 error);
421 return(error);
422 }
423
424 return(0);
425 }
426
427
428
429 /*
430 * Function name: tw_osli_alloc_mem
431 * Description: Allocates memory needed both by CL and OSL.
432 *
433 * Input: sc -- OSL internal controller context
434 * Output: None
435 * Return value: 0 -- success
436 * non-zero-- failure
437 */
438 static TW_INT32
439 tw_osli_alloc_mem(struct twa_softc *sc)
440 {
441 struct tw_osli_req_context *req;
442 TW_UINT32 max_sg_elements;
443 TW_UINT32 non_dma_mem_size;
444 TW_UINT32 dma_mem_size;
445 TW_INT32 error;
446 TW_INT32 i;
447
448 tw_osli_dbg_dprintf(3, sc, "entered");
449
450 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
451 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
452 #ifdef TW_OSLI_DEFERRED_INTR_USED
453 sc->flags |= TW_CL_DEFERRED_INTR_USED;
454 #endif /* TW_OSLI_DEFERRED_INTR_USED */
455
456 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
457 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
458
459 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
460 sc->device_id, TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
461 &(sc->alignment), &(sc->sg_size_factor),
462 &non_dma_mem_size, &dma_mem_size
463 ))) {
464 tw_osli_printf(sc, "error = %d",
465 TW_CL_SEVERITY_ERROR_STRING,
466 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
467 0x2008,
468 "Can't get Common Layer's memory requirements",
469 error);
470 return(error);
471 }
472
473 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
474 M_WAITOK)) == NULL) {
475 tw_osli_printf(sc, "error = %d",
476 TW_CL_SEVERITY_ERROR_STRING,
477 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
478 0x2009,
479 "Can't allocate non-dma memory",
480 ENOMEM);
481 return(ENOMEM);
482 }
483
484 /* Create the parent dma tag. */
485 if (bus_dma_tag_create(NULL, /* parent */
486 sc->alignment, /* alignment */
487 TW_OSLI_DMA_BOUNDARY, /* boundary */
488 BUS_SPACE_MAXADDR, /* lowaddr */
489 BUS_SPACE_MAXADDR, /* highaddr */
490 NULL, NULL, /* filter, filterarg */
491 TW_CL_MAX_IO_SIZE, /* maxsize */
492 max_sg_elements, /* nsegments */
493 TW_CL_MAX_IO_SIZE, /* maxsegsize */
494 0, /* flags */
495 NULL, /* lockfunc */
496 NULL, /* lockfuncarg */
497 &sc->parent_tag /* tag */)) {
498 tw_osli_printf(sc, "error = %d",
499 TW_CL_SEVERITY_ERROR_STRING,
500 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
501 0x200A,
502 "Can't allocate parent DMA tag",
503 ENOMEM);
504 return(ENOMEM);
505 }
506
507 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
508 if (bus_dma_tag_create(sc->parent_tag, /* parent */
509 sc->alignment, /* alignment */
510 0, /* boundary */
511 BUS_SPACE_MAXADDR, /* lowaddr */
512 BUS_SPACE_MAXADDR, /* highaddr */
513 NULL, NULL, /* filter, filterarg */
514 dma_mem_size, /* maxsize */
515 1, /* nsegments */
516 BUS_SPACE_MAXSIZE, /* maxsegsize */
517 0, /* flags */
518 NULL, /* lockfunc */
519 NULL, /* lockfuncarg */
520 &sc->cmd_tag /* tag */)) {
521 tw_osli_printf(sc, "error = %d",
522 TW_CL_SEVERITY_ERROR_STRING,
523 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
524 0x200B,
525 "Can't allocate DMA tag for Common Layer's "
526 "DMA'able memory",
527 ENOMEM);
528 return(ENOMEM);
529 }
530
531 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
532 BUS_DMA_NOWAIT, &sc->cmd_map)) {
533 /* Try a second time. */
534 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
535 BUS_DMA_NOWAIT, &sc->cmd_map)) {
536 tw_osli_printf(sc, "error = %d",
537 TW_CL_SEVERITY_ERROR_STRING,
538 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
539 0x200C,
540 "Can't allocate DMA'able memory for the"
541 "Common Layer",
542 ENOMEM);
543 return(ENOMEM);
544 }
545 }
546
547 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
548 dma_mem_size, twa_map_load_callback,
549 &sc->dma_mem_phys, 0);
550
551 /*
552 * Create a dma tag for data buffers; size will be the maximum
553 * possible I/O size (128kB).
554 */
555 if (bus_dma_tag_create(sc->parent_tag, /* parent */
556 sc->alignment, /* alignment */
557 0, /* boundary */
558 BUS_SPACE_MAXADDR, /* lowaddr */
559 BUS_SPACE_MAXADDR, /* highaddr */
560 NULL, NULL, /* filter, filterarg */
561 TW_CL_MAX_IO_SIZE, /* maxsize */
562 max_sg_elements, /* nsegments */
563 TW_CL_MAX_IO_SIZE, /* maxsegsize */
564 BUS_DMA_ALLOCNOW, /* flags */
565 twa_busdma_lock, /* lockfunc */
566 sc->io_lock, /* lockfuncarg */
567 &sc->dma_tag /* tag */)) {
568 tw_osli_printf(sc, "error = %d",
569 TW_CL_SEVERITY_ERROR_STRING,
570 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
571 0x200F,
572 "Can't allocate DMA tag for data buffers",
573 ENOMEM);
574 return(ENOMEM);
575 }
576
577 /*
578 * Create a dma tag for ioctl data buffers; size will be the maximum
579 * possible I/O size (128kB).
580 */
581 if (bus_dma_tag_create(sc->parent_tag, /* parent */
582 sc->alignment, /* alignment */
583 0, /* boundary */
584 BUS_SPACE_MAXADDR, /* lowaddr */
585 BUS_SPACE_MAXADDR, /* highaddr */
586 NULL, NULL, /* filter, filterarg */
587 TW_CL_MAX_IO_SIZE, /* maxsize */
588 max_sg_elements, /* nsegments */
589 TW_CL_MAX_IO_SIZE, /* maxsegsize */
590 BUS_DMA_ALLOCNOW, /* flags */
591 twa_busdma_lock, /* lockfunc */
592 sc->io_lock, /* lockfuncarg */
593 &sc->ioctl_tag /* tag */)) {
594 tw_osli_printf(sc, "error = %d",
595 TW_CL_SEVERITY_ERROR_STRING,
596 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
597 0x2010,
598 "Can't allocate DMA tag for ioctl data buffers",
599 ENOMEM);
600 return(ENOMEM);
601 }
602
603 /* Create just one map for all ioctl request data buffers. */
604 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
605 tw_osli_printf(sc, "error = %d",
606 TW_CL_SEVERITY_ERROR_STRING,
607 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
608 0x2011,
609 "Can't create ioctl map",
610 ENOMEM);
611 return(ENOMEM);
612 }
613
614
615 /* Initialize request queues. */
616 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
617 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
618
619 if ((sc->req_ctxt_buf = (struct tw_osli_req_context *)
620 malloc((sizeof(struct tw_osli_req_context) *
621 TW_OSLI_MAX_NUM_IOS),
622 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
623 tw_osli_printf(sc, "error = %d",
624 TW_CL_SEVERITY_ERROR_STRING,
625 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
626 0x2012,
627 "Failed to allocate request packets",
628 ENOMEM);
629 return(ENOMEM);
630 }
631 bzero(sc->req_ctxt_buf,
632 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_IOS);
633
634 for (i = 0; i < TW_OSLI_MAX_NUM_IOS; i++) {
635 req = &(sc->req_ctxt_buf[i]);
636 req->ctlr = sc;
637 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
638 tw_osli_printf(sc, "request # = %d, error = %d",
639 TW_CL_SEVERITY_ERROR_STRING,
640 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
641 0x2013,
642 "Can't create dma map",
643 i, ENOMEM);
644 return(ENOMEM);
645 }
646
647 /* Insert request into the free queue. */
648 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
649 }
650
651 return(0);
652 }
653
654
655
656 /*
657 * Function name: tw_osli_free_resources
658 * Description: Performs clean-up at the time of going down.
659 *
660 * Input: sc -- ptr to OSL internal ctlr context
661 * Output: None
662 * Return value: None
663 */
664 static TW_VOID
665 tw_osli_free_resources(struct twa_softc *sc)
666 {
667 struct tw_osli_req_context *req;
668 TW_INT32 error = 0;
669
670 tw_osli_dbg_dprintf(3, sc, "entered");
671
672 /* Detach from CAM */
673 tw_osli_cam_detach(sc);
674
675 if (sc->req_ctxt_buf)
676 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
677 NULL)
678 if ((error = bus_dmamap_destroy(sc->dma_tag,
679 req->dma_map)))
680 tw_osli_dbg_dprintf(1, sc,
681 "dmamap_destroy(dma) returned %d",
682 error);
683
684 if ((sc->ioctl_tag) && (sc->ioctl_map))
685 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
686 tw_osli_dbg_dprintf(1, sc,
687 "dmamap_destroy(ioctl) returned %d", error);
688
689 /* Free all memory allocated so far. */
690 if (sc->req_ctxt_buf)
691 free(sc->req_ctxt_buf, TW_OSLI_MALLOC_CLASS);
692
693 if (sc->non_dma_mem)
694 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
695
696 if (sc->dma_mem) {
697 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
698 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
699 sc->cmd_map);
700 }
701 if (sc->cmd_tag)
702 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
703 tw_osli_dbg_dprintf(1, sc,
704 "dma_tag_destroy(cmd) returned %d", error);
705
706 if (sc->dma_tag)
707 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
708 tw_osli_dbg_dprintf(1, sc,
709 "dma_tag_destroy(dma) returned %d", error);
710
711 if (sc->ioctl_tag)
712 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
713 tw_osli_dbg_dprintf(1, sc,
714 "dma_tag_destroy(ioctl) returned %d", error);
715
716 if (sc->parent_tag)
717 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
718 tw_osli_dbg_dprintf(1, sc,
719 "dma_tag_destroy(parent) returned %d", error);
720
721
722 /* Disconnect the interrupt handler. */
723 if (sc->intr_handle)
724 if ((error = bus_teardown_intr(sc->bus_dev,
725 sc->irq_res, sc->intr_handle)))
726 tw_osli_dbg_dprintf(1, sc,
727 "teardown_intr returned %d", error);
728
729 if (sc->irq_res != NULL)
730 if ((error = bus_release_resource(sc->bus_dev,
731 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
732 tw_osli_dbg_dprintf(1, sc,
733 "release_resource(irq) returned %d", error);
734
735
736 /* Release the register window mapping. */
737 if (sc->reg_res != NULL)
738 if ((error = bus_release_resource(sc->bus_dev,
739 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
740 tw_osli_dbg_dprintf(1, sc,
741 "release_resource(io) returned %d", error);
742
743
744 /* Destroy the control device. */
745 if (sc->ctrl_dev != (struct cdev *)NULL)
746 destroy_dev(sc->ctrl_dev);
747
748 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
749 tw_osli_dbg_dprintf(1, sc,
750 "sysctl_ctx_free returned %d", error);
751
752 }
753
754
755
756 /*
757 * Function name: twa_detach
758 * Description: Called when the controller is being detached from
759 * the pci bus.
760 *
761 * Input: dev -- bus device corresponding to the ctlr
762 * Output: None
763 * Return value: 0 -- success
764 * non-zero-- failure
765 */
766 static TW_INT32
767 twa_detach(device_t dev)
768 {
769 struct twa_softc *sc = device_get_softc(dev);
770 TW_INT32 error;
771
772 tw_osli_dbg_dprintf(3, sc, "entered");
773
774 error = EBUSY;
775 if (sc->state & TW_OSLI_CTLR_STATE_OPEN) {
776 tw_osli_printf(sc, "error = %d",
777 TW_CL_SEVERITY_ERROR_STRING,
778 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
779 0x2014,
780 "Device open",
781 error);
782 goto out;
783 }
784
785 /* Shut the controller down. */
786 if ((error = twa_shutdown(dev)))
787 goto out;
788
789 /* Free all resources associated with this controller. */
790 tw_osli_free_resources(sc);
791 error = 0;
792
793 out:
794 return(error);
795 }
796
797
798
799 /*
800 * Function name: twa_shutdown
801 * Description: Called at unload/shutdown time. Lets the controller
802 * know that we are going down.
803 *
804 * Input: dev -- bus device corresponding to the ctlr
805 * Output: None
806 * Return value: 0 -- success
807 * non-zero-- failure
808 */
809 static TW_INT32
810 twa_shutdown(device_t dev)
811 {
812 struct twa_softc *sc = device_get_softc(dev);
813 TW_INT32 error = 0;
814
815 tw_osli_dbg_dprintf(3, sc, "entered");
816
817 /* Disconnect from the controller. */
818 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
819 tw_osli_printf(sc, "error = %d",
820 TW_CL_SEVERITY_ERROR_STRING,
821 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
822 0x2015,
823 "Failed to shutdown Common Layer/controller",
824 error);
825 }
826 return(error);
827 }
828
829
830
831 /*
832 * Function name: twa_busdma_lock
833 * Description: Function to provide synchronization during busdma_swi.
834 *
835 * Input: lock_arg -- lock mutex sent as argument
836 * op -- operation (lock/unlock) expected of the function
837 * Output: None
838 * Return value: None
839 */
840 TW_VOID
841 twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
842 {
843 struct mtx *lock;
844
845 lock = (struct mtx *)lock_arg;
846 switch (op) {
847 case BUS_DMA_LOCK:
848 mtx_lock_spin(lock);
849 break;
850
851 case BUS_DMA_UNLOCK:
852 mtx_unlock_spin(lock);
853 break;
854
855 default:
856 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
857 }
858 }
859
860
861
862 /*
863 * Function name: twa_pci_intr
864 * Description: Interrupt handler. Wrapper for twa_interrupt.
865 *
866 * Input: arg -- ptr to OSL internal ctlr context
867 * Output: None
868 * Return value: None
869 */
870 static TW_VOID
871 twa_pci_intr(TW_VOID *arg)
872 {
873 struct twa_softc *sc = (struct twa_softc *)arg;
874
875 tw_osli_dbg_dprintf(10, sc, "entered");
876 if (tw_cl_interrupt(&(sc->ctlr_handle)))
877 #ifdef TW_OSLI_DEFERRED_INTR_USED
878 taskqueue_enqueue_fast(taskqueue_fast,
879 &(sc->deferred_intr_callback));
880 #else /* TW_OSLI_DEFERRED_INTR_USED */
881 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
882 #endif /* TW_OSLI_DEFERRED_INTR_USED */
883 }
884
885
886
887 #ifdef TW_OSLI_DEFERRED_INTR_USED
888
889 /*
890 * Function name: twa_deferred_intr
891 * Description: Deferred interrupt handler.
892 *
893 * Input: context -- ptr to OSL internal ctlr context
894 * pending -- not used
895 * Output: None
896 * Return value: None
897 */
898 static TW_VOID
899 twa_deferred_intr(TW_VOID *context, TW_INT32 pending)
900 {
901 struct twa_softc *sc = (struct twa_softc *)context;
902
903 tw_osli_dbg_dprintf(10, sc, "entered");
904
905 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
906 }
907
908 #endif /* TW_OSLI_DEFERRED_INTR_USED */
909
910
911
912 /*
913 * Function name: tw_osli_fw_passthru
914 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
915 *
916 * Input: sc -- ptr to OSL internal ctlr context
917 * buf -- ptr to ioctl pkt understood by CL
918 * Output: None
919 * Return value: 0 -- success
920 * non-zero-- failure
921 */
922 TW_INT32
923 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
924 {
925 struct tw_osli_req_context *req;
926 struct tw_osli_ioctl_no_data_buf *user_buf =
927 (struct tw_osli_ioctl_no_data_buf *)buf;
928 TW_TIME end_time;
929 TW_UINT32 timeout = 60;
930 TW_UINT32 data_buf_size_adjusted;
931 struct tw_cl_req_packet *req_pkt;
932 struct tw_cl_passthru_req_packet *pt_req;
933 TW_INT32 error;
934
935 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
936
937 if ((req = tw_osli_get_request(sc)) == NULL)
938 return(EBUSY);
939
940 req->req_handle.osl_req_ctxt = req;
941 req->orig_req = buf;
942 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
943
944 req_pkt = &(req->req_pkt);
945 req_pkt->status = 0;
946 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
947 /* Let the Common Layer retry the request on cmd queue full. */
948 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
949
950 pt_req = &(req_pkt->gen_req_pkt.pt_req);
951 /*
952 * Make sure that the data buffer sent to firmware is a
953 * 512 byte multiple in size.
954 */
955 data_buf_size_adjusted =
956 (user_buf->driver_pkt.buffer_length +
957 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
958 if ((req->length = data_buf_size_adjusted)) {
959 if ((req->data = malloc(data_buf_size_adjusted,
960 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
961 error = ENOMEM;
962 tw_osli_printf(sc, "error = %d",
963 TW_CL_SEVERITY_ERROR_STRING,
964 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
965 0x2016,
966 "Could not alloc mem for "
967 "fw_passthru data_buf",
968 error);
969 goto fw_passthru_err;
970 }
971 /* Copy the payload. */
972 if ((error = copyin((TW_VOID *)(user_buf->pdata),
973 req->data,
974 user_buf->driver_pkt.buffer_length)) != 0) {
975 tw_osli_printf(sc, "error = %d",
976 TW_CL_SEVERITY_ERROR_STRING,
977 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
978 0x2017,
979 "Could not copyin fw_passthru data_buf",
980 error);
981 goto fw_passthru_err;
982 }
983 pt_req->sgl_entries = 1; /* will be updated during mapping */
984 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
985 TW_OSLI_REQ_FLAGS_DATA_OUT);
986 } else
987 pt_req->sgl_entries = 0; /* no payload */
988
989 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
990 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
991
992 if ((error = tw_osli_map_request(req)))
993 goto fw_passthru_err;
994
995 end_time = tw_osl_get_local_time() + timeout;
996 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
997 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
998
999 error = tsleep(req, PRIBIO, "twa_passthru", timeout * hz);
1000
1001 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1002 error = 0;
1003 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1004
1005 if (! error) {
1006 if (((error = req->error_code)) ||
1007 ((error = (req->state !=
1008 TW_OSLI_REQ_STATE_COMPLETE))) ||
1009 ((error = req_pkt->status)))
1010 goto fw_passthru_err;
1011 break;
1012 }
1013
1014 if (req_pkt->status) {
1015 error = req_pkt->status;
1016 goto fw_passthru_err;
1017 }
1018
1019 if (error == EWOULDBLOCK) {
1020 /* Time out! */
1021 tw_osli_printf(sc, "request = %p",
1022 TW_CL_SEVERITY_ERROR_STRING,
1023 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1024 0x2018,
1025 "Passthru request timed out!",
1026 req);
1027 /*
1028 * Should I check here if the timeout happened
1029 * because of yet another reset, and not do a
1030 * second reset?
1031 */
1032 tw_cl_reset_ctlr(&sc->ctlr_handle);
1033 /*
1034 * Don't touch req after a reset. It (and any
1035 * associated data) will already have been
1036 * unmapped by the callback.
1037 */
1038 user_buf->driver_pkt.os_status = error;
1039 error = ETIMEDOUT;
1040 goto fw_passthru_err;
1041 }
1042 /*
1043 * Either the request got completed, or we were woken up by a
1044 * signal. Calculate the new timeout, in case it was the latter.
1045 */
1046 timeout = (end_time - tw_osl_get_local_time());
1047 }
1048
1049 /* If there was a payload, copy it back. */
1050 if ((!error) && (req->length))
1051 if ((error = copyout(req->data, user_buf->pdata,
1052 user_buf->driver_pkt.buffer_length)))
1053 tw_osli_printf(sc, "error = %d",
1054 TW_CL_SEVERITY_ERROR_STRING,
1055 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1056 0x2019,
1057 "Could not copyout fw_passthru data_buf",
1058 error);
1059
1060 fw_passthru_err:
1061 /*
1062 * Print the failure message. For some reason, on certain OS versions,
1063 * printing this error message during reset hangs the display (although
1064 * the rest of the system is running fine. So, don't print it if the
1065 * failure was due to a reset.
1066 */
1067 if ((error) && (error != TW_CL_ERR_REQ_BUS_RESET))
1068 tw_osli_printf(sc, "error = %d",
1069 TW_CL_SEVERITY_ERROR_STRING,
1070 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1071 0x201A,
1072 "Firmware passthru failed!",
1073 error);
1074
1075 user_buf->driver_pkt.os_status = error;
1076 /* Free resources. */
1077 if (req->data)
1078 free(req->data, TW_OSLI_MALLOC_CLASS);
1079 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1080 return(error);
1081 }
1082
1083
1084
1085 /*
1086 * Function name: tw_osl_complete_passthru
1087 * Description: Called to complete passthru requests.
1088 *
1089 * Input: req_handle -- ptr to request handle
1090 * Output: None
1091 * Return value: None
1092 */
1093 TW_VOID
1094 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1095 {
1096 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1097 struct twa_softc *sc = req->ctlr;
1098
1099 tw_osli_dbg_dprintf(5, sc, "entered");
1100
1101 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1102 tw_osli_printf(sc, "request = %p, status = %d",
1103 TW_CL_SEVERITY_ERROR_STRING,
1104 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1105 0x201B,
1106 "Unposted command completed!!",
1107 req, req->state);
1108 }
1109
1110 /*
1111 * Remove request from the busy queue. Just mark it complete.
1112 * There's no need to move it into the complete queue as we are
1113 * going to be done with it right now.
1114 */
1115 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1116 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1117
1118 tw_osli_unmap_request(req);
1119
1120 /*
1121 * Don't do a wake up if there was an error even before the request
1122 * was sent down to the Common Layer, and we hadn't gotten an
1123 * EINPROGRESS. The request originator will then be returned an
1124 * error, and he can do the clean-up.
1125 */
1126 if ((req->error_code) &&
1127 (!(req->state & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1128 return;
1129
1130 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1131 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1132 /* Wake up the sleeping command originator. */
1133 tw_osli_dbg_dprintf(5, sc,
1134 "Waking up originator of request %p", req);
1135 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1136 wakeup_one(req);
1137 } else {
1138 /*
1139 * If the request completed even before tsleep
1140 * was called, simply return.
1141 */
1142 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1143 return;
1144
1145 tw_osli_printf(sc, "request = %p",
1146 TW_CL_SEVERITY_ERROR_STRING,
1147 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1148 0x201C,
1149 "Passthru callback called, "
1150 "and caller not sleeping",
1151 req);
1152 }
1153 } else {
1154 tw_osli_printf(sc, "request = %p",
1155 TW_CL_SEVERITY_ERROR_STRING,
1156 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1157 0x201D,
1158 "Passthru callback called for non-passthru request",
1159 req);
1160 }
1161 }
1162
1163
1164
1165 /*
1166 * Function name: tw_osli_get_request
1167 * Description: Gets a request pkt from the free queue.
1168 *
1169 * Input: sc -- ptr to OSL internal ctlr context
1170 * Output: None
1171 * Return value: ptr to request pkt -- success
1172 * NULL -- failure
1173 */
1174 struct tw_osli_req_context *
1175 tw_osli_get_request(struct twa_softc *sc)
1176 {
1177 struct tw_osli_req_context *req;
1178
1179 tw_osli_dbg_dprintf(4, sc, "entered");
1180
1181 /* Get a free request packet. */
1182 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1183
1184 /* Initialize some fields to their defaults. */
1185 if (req) {
1186 req->req_handle.osl_req_ctxt = NULL;
1187 req->req_handle.cl_req_ctxt = NULL;
1188 req->data = NULL;
1189 req->length = 0;
1190 req->real_data = NULL;
1191 req->real_length = 0;
1192 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1193 req->flags = 0;
1194 req->error_code = 0;
1195 req->orig_req = NULL;
1196
1197 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1198
1199 }
1200 return(req);
1201 }
1202
1203
1204
1205 /*
1206 * Function name: twa_map_load_data_callback
1207 * Description: Callback of bus_dmamap_load for the buffer associated
1208 * with data. Updates the cmd pkt (size/sgl_entries
1209 * fields, as applicable) to reflect the number of sg
1210 * elements.
1211 *
1212 * Input: arg -- ptr to OSL internal request context
1213 * segs -- ptr to a list of segment descriptors
1214 * nsegments--# of segments
1215 * error -- 0 if no errors encountered before callback,
1216 * non-zero if errors were encountered
1217 * Output: None
1218 * Return value: None
1219 */
1220 static TW_VOID
1221 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1222 TW_INT32 nsegments, TW_INT32 error)
1223 {
1224 struct tw_osli_req_context *req =
1225 (struct tw_osli_req_context *)arg;
1226 struct twa_softc *sc = req->ctlr;
1227 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1228
1229 tw_osli_dbg_dprintf(10, sc, "entered");
1230
1231 /* Mark the request as currently being processed. */
1232 req->state = TW_OSLI_REQ_STATE_BUSY;
1233 /* Move the request into the busy queue. */
1234 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1235
1236 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1237 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1238 tw_osli_allow_new_requests(sc, (TW_VOID *)(req->orig_req));
1239
1240 if (error == EFBIG) {
1241 req->error_code = error;
1242 goto out;
1243 }
1244
1245 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1246 struct tw_cl_passthru_req_packet *pt_req;
1247
1248 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1249 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1250 BUS_DMASYNC_PREREAD);
1251
1252 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1253 /*
1254 * If we're using an alignment buffer, and we're
1255 * writing data, copy the real data out.
1256 */
1257 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1258 bcopy(req->real_data, req->data, req->real_length);
1259 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1260 BUS_DMASYNC_PREWRITE);
1261 }
1262
1263 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1264 pt_req->sg_list = (TW_UINT8 *)segs;
1265 pt_req->sgl_entries += (nsegments - 1);
1266 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1267 &(req->req_handle));
1268 } else {
1269 struct tw_cl_scsi_req_packet *scsi_req;
1270
1271 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1272 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1273 BUS_DMASYNC_PREREAD);
1274
1275 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1276 /*
1277 * If we're using an alignment buffer, and we're
1278 * writing data, copy the real data out.
1279 */
1280 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1281 bcopy(req->real_data, req->data, req->real_length);
1282 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1283 BUS_DMASYNC_PREWRITE);
1284 }
1285
1286 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1287 scsi_req->sg_list = (TW_UINT8 *)segs;
1288 scsi_req->sgl_entries += (nsegments - 1);
1289 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1290 &(req->req_handle));
1291 }
1292
1293 out:
1294 if (error) {
1295 req->error_code = error;
1296 req_pkt->tw_osl_callback(&(req->req_handle));
1297 /*
1298 * If the caller had been returned EINPROGRESS, and he has
1299 * registered a callback for handling completion, the callback
1300 * will never get called because we were unable to submit the
1301 * request. So, free up the request right here.
1302 */
1303 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1304 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1305 }
1306 }
1307
1308
1309
1310 /*
1311 * Function name: twa_map_load_callback
1312 * Description: Callback of bus_dmamap_load for the buffer associated
1313 * with a cmd pkt.
1314 *
1315 * Input: arg -- ptr to variable to hold phys addr
1316 * segs -- ptr to a list of segment descriptors
1317 * nsegments--# of segments
1318 * error -- 0 if no errors encountered before callback,
1319 * non-zero if errors were encountered
1320 * Output: None
1321 * Return value: None
1322 */
1323 static TW_VOID
1324 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1325 TW_INT32 nsegments, TW_INT32 error)
1326 {
1327 *((TW_UINT64 *)arg) = segs[0].ds_addr;
1328 }
1329
1330
1331
1332 /*
1333 * Function name: tw_osli_map_request
1334 * Description: Maps a cmd pkt and data associated with it, into
1335 * DMA'able memory.
1336 *
1337 * Input: req -- ptr to request pkt
1338 * Output: None
1339 * Return value: 0 -- success
1340 * non-zero-- failure
1341 */
1342 TW_INT32
1343 tw_osli_map_request(struct tw_osli_req_context *req)
1344 {
1345 struct twa_softc *sc = req->ctlr;
1346 TW_INT32 error = 0;
1347
1348 tw_osli_dbg_dprintf(10, sc, "entered");
1349
1350 /* If the command involves data, map that too. */
1351 if (req->data != NULL) {
1352 /*
1353 * It's sufficient for the data pointer to be 4-byte aligned
1354 * to work with 9000. However, if 4-byte aligned addresses
1355 * are passed to bus_dmamap_load, we can get back sg elements
1356 * that are not 512-byte multiples in size. So, we will let
1357 * only those buffers that are 512-byte aligned to pass
1358 * through, and bounce the rest, so as to make sure that we
1359 * always get back sg elements that are 512-byte multiples
1360 * in size.
1361 */
1362 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1363 (req->length % sc->sg_size_factor)) {
1364 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1365 /* Save original data pointer and length. */
1366 req->real_data = req->data;
1367 req->real_length = req->length;
1368 req->length = (req->length +
1369 (sc->sg_size_factor - 1)) &
1370 ~(sc->sg_size_factor - 1);
1371 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1372 M_NOWAIT);
1373 if (req->data == NULL) {
1374 tw_osli_printf(sc, "error = %d",
1375 TW_CL_SEVERITY_ERROR_STRING,
1376 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1377 0x201E,
1378 "Failed to allocate memory "
1379 "for bounce buffer",
1380 ENOMEM);
1381 /* Restore original data pointer and length. */
1382 req->data = req->real_data;
1383 req->length = req->real_length;
1384 return(ENOMEM);
1385 }
1386 }
1387
1388 /*
1389 * Map the data buffer into bus space and build the SG list.
1390 */
1391 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1392 /* Lock against multiple simultaneous ioctl calls. */
1393 mtx_lock_spin(sc->io_lock);
1394 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1395 req->data, req->length,
1396 twa_map_load_data_callback, req,
1397 BUS_DMA_WAITOK);
1398 mtx_unlock_spin(sc->io_lock);
1399 } else {
1400 /*
1401 * There's only one CAM I/O thread running at a time.
1402 * So, there's no need to hold the io_lock.
1403 */
1404 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1405 req->data, req->length,
1406 twa_map_load_data_callback, req,
1407 BUS_DMA_WAITOK);
1408 }
1409
1410 if (!error)
1411 error = req->error_code;
1412 else {
1413 if (error == EINPROGRESS) {
1414 /*
1415 * Specifying sc->io_lock as the lockfuncarg
1416 * in ...tag_create should protect the access
1417 * of ...FLAGS_MAPPED from the callback.
1418 */
1419 mtx_lock_spin(sc->io_lock);
1420 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) {
1421 req->flags |=
1422 TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1423 tw_osli_disallow_new_requests(sc);
1424 }
1425 mtx_unlock_spin(sc->io_lock);
1426 error = 0;
1427 } else {
1428 /* Free alignment buffer if it was used. */
1429 if (req->flags &
1430 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1431 free(req->data, TW_OSLI_MALLOC_CLASS);
1432 /*
1433 * Restore original data pointer
1434 * and length.
1435 */
1436 req->data = req->real_data;
1437 req->length = req->real_length;
1438 }
1439 }
1440 }
1441
1442 } else {
1443 /* Mark the request as currently being processed. */
1444 req->state = TW_OSLI_REQ_STATE_BUSY;
1445 /* Move the request into the busy queue. */
1446 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1447 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1448 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1449 &(req->req_pkt), &(req->req_handle));
1450 else
1451 error = tw_cl_start_io(&sc->ctlr_handle,
1452 &(req->req_pkt), &(req->req_handle));
1453 if (error) {
1454 req->error_code = error;
1455 req->req_pkt.tw_osl_callback(&(req->req_handle));
1456 }
1457 }
1458 return(error);
1459 }
1460
1461
1462
1463 /*
1464 * Function name: tw_osli_unmap_request
1465 * Description: Undoes the mapping done by tw_osli_map_request.
1466 *
1467 * Input: req -- ptr to request pkt
1468 * Output: None
1469 * Return value: None
1470 */
1471 TW_VOID
1472 tw_osli_unmap_request(struct tw_osli_req_context *req)
1473 {
1474 struct twa_softc *sc = req->ctlr;
1475
1476 tw_osli_dbg_dprintf(10, sc, "entered");
1477
1478 /* If the command involved data, unmap that too. */
1479 if (req->data != NULL) {
1480 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1481 /* Lock against multiple simultaneous ioctl calls. */
1482 mtx_lock_spin(sc->io_lock);
1483
1484 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1485 bus_dmamap_sync(sc->ioctl_tag,
1486 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1487
1488 /*
1489 * If we are using a bounce buffer, and we are
1490 * reading data, copy the real data in.
1491 */
1492 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1493 bcopy(req->data, req->real_data,
1494 req->real_length);
1495 }
1496
1497 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1498 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1499 BUS_DMASYNC_POSTWRITE);
1500
1501 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1502
1503 mtx_unlock_spin(sc->io_lock);
1504 } else {
1505 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1506 bus_dmamap_sync(sc->dma_tag,
1507 req->dma_map, BUS_DMASYNC_POSTREAD);
1508
1509 /*
1510 * If we are using a bounce buffer, and we are
1511 * reading data, copy the real data in.
1512 */
1513 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1514 bcopy(req->data, req->real_data,
1515 req->real_length);
1516 }
1517 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1518 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1519 BUS_DMASYNC_POSTWRITE);
1520
1521 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1522 }
1523 }
1524
1525 /* Free alignment buffer if it was used. */
1526 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1527 free(req->data, TW_OSLI_MALLOC_CLASS);
1528 /* Restore original data pointer and length. */
1529 req->data = req->real_data;
1530 req->length = req->real_length;
1531 }
1532 }
1533
1534
1535
1536 #ifdef TW_OSL_DEBUG
1537
1538 TW_VOID twa_report_stats(TW_VOID);
1539 TW_VOID twa_reset_stats(TW_VOID);
1540 TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1541 TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1542
1543
1544 /*
1545 * Function name: twa_report_stats
1546 * Description: For being called from ddb. Calls functions that print
1547 * OSL and CL internal stats for the controller.
1548 *
1549 * Input: None
1550 * Output: None
1551 * Return value: None
1552 */
1553 TW_VOID
1554 twa_report_stats(TW_VOID)
1555 {
1556 struct twa_softc *sc;
1557 TW_INT32 i;
1558
1559 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1560 tw_osli_print_ctlr_stats(sc);
1561 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1562 }
1563 }
1564
1565
1566
1567 /*
1568 * Function name: tw_osli_print_ctlr_stats
1569 * Description: For being called from ddb. Prints OSL controller stats
1570 *
1571 * Input: sc -- ptr to OSL internal controller context
1572 * Output: None
1573 * Return value: None
1574 */
1575 TW_VOID
1576 tw_osli_print_ctlr_stats(struct twa_softc *sc)
1577 {
1578 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1579 twa_printf(sc, "OSLq type current max\n");
1580 twa_printf(sc, "free %04d %04d\n",
1581 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1582 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1583 twa_printf(sc, "busy %04d %04d\n",
1584 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1585 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1586 }
1587
1588
1589
1590 /*
1591 * Function name: twa_print_req_info
1592 * Description: For being called from ddb. Calls functions that print
1593 * OSL and CL internal details for the request.
1594 *
1595 * Input: req -- ptr to OSL internal request context
1596 * Output: None
1597 * Return value: None
1598 */
1599 TW_VOID
1600 twa_print_req_info(struct tw_osli_req_context *req)
1601 {
1602 struct twa_softc *sc = req->ctlr;
1603
1604 twa_printf(sc, "OSL details for request:\n");
1605 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1606 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1607 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1608 "next_req = %p, prev_req = %p, dma_map = %p\n",
1609 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1610 req->data, req->length, req->real_data, req->real_length,
1611 req->state, req->flags, req->error_code, req->orig_req,
1612 req->link.next, req->link.prev, req->dma_map);
1613 tw_cl_print_req_info(&(req->req_handle));
1614 }
1615
1616
1617
1618 /*
1619 * Function name: twa_reset_stats
1620 * Description: For being called from ddb.
1621 * Resets some OSL controller stats.
1622 *
1623 * Input: None
1624 * Output: None
1625 * Return value: None
1626 */
1627 TW_VOID
1628 twa_reset_stats(TW_VOID)
1629 {
1630 struct twa_softc *sc;
1631 TW_INT32 i;
1632
1633 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1634 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1635 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1636 tw_cl_reset_stats(&sc->ctlr_handle);
1637 }
1638 }
1639
1640 #endif /* TW_OSL_DEBUG */
Cache object: f9145766554ca416ea314643460b7d9b
|