1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
5 * Copyright (c) 2004-05 Vinod Kashyap.
6 * Copyright (c) 2000 Michael Smith
7 * Copyright (c) 2000 BSDi
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 /*
36 * AMCC'S 3ware driver for 9000 series storage controllers.
37 *
38 * Author: Vinod Kashyap
39 * Modifications by: Adam Radford
40 * Modifications by: Manjunath Ranganathaiah
41 */
42
43
44 /*
45 * FreeBSD specific functions not related to CAM, and other
46 * miscellaneous functions.
47 */
48
49
50 #include <dev/twa/tw_osl_includes.h>
51 #include <dev/twa/tw_cl_fwif.h>
52 #include <dev/twa/tw_cl_ioctl.h>
53 #include <dev/twa/tw_osl_ioctl.h>
54
55 #ifdef TW_OSL_DEBUG
56 TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
57 TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
58 #endif /* TW_OSL_DEBUG */
59
60 static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
61
62
63 static d_open_t twa_open;
64 static d_close_t twa_close;
65 static d_ioctl_t twa_ioctl;
66
67 static struct cdevsw twa_cdevsw = {
68 .d_version = D_VERSION,
69 .d_open = twa_open,
70 .d_close = twa_close,
71 .d_ioctl = twa_ioctl,
72 .d_name = "twa",
73 };
74
75 static devclass_t twa_devclass;
76
77
78 /*
79 * Function name: twa_open
80 * Description: Called when the controller is opened.
81 * Simply marks the controller as open.
82 *
83 * Input: dev -- control device corresponding to the ctlr
84 * flags -- mode of open
85 * fmt -- device type (character/block etc.)
86 * proc -- current process
87 * Output: None
88 * Return value: 0 -- success
89 * non-zero-- failure
90 */
91 static TW_INT32
92 twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
93 {
94 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
95
96 tw_osli_dbg_dprintf(5, sc, "entered");
97 sc->open = TW_CL_TRUE;
98 return(0);
99 }
100
101
102
103 /*
104 * Function name: twa_close
105 * Description: Called when the controller is closed.
106 * Simply marks the controller as not open.
107 *
108 * Input: dev -- control device corresponding to the ctlr
109 * flags -- mode of corresponding open
110 * fmt -- device type (character/block etc.)
111 * proc -- current process
112 * Output: None
113 * Return value: 0 -- success
114 * non-zero-- failure
115 */
116 static TW_INT32
117 twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
118 {
119 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
120
121 tw_osli_dbg_dprintf(5, sc, "entered");
122 sc->open = TW_CL_FALSE;
123 return(0);
124 }
125
126
127
128 /*
129 * Function name: twa_ioctl
130 * Description: Called when an ioctl is posted to the controller.
131 * Handles any OS Layer specific cmds, passes the rest
132 * on to the Common Layer.
133 *
134 * Input: dev -- control device corresponding to the ctlr
135 * cmd -- ioctl cmd
136 * buf -- ptr to buffer in kernel memory, which is
137 * a copy of the input buffer in user-space
138 * flags -- mode of corresponding open
139 * proc -- current process
140 * Output: buf -- ptr to buffer in kernel memory, which will
141 * be copied to the output buffer in user-space
142 * Return value: 0 -- success
143 * non-zero-- failure
144 */
145 static TW_INT32
146 twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc)
147 {
148 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
149 TW_INT32 error;
150
151 tw_osli_dbg_dprintf(5, sc, "entered");
152
153 switch (cmd) {
154 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
155 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
156 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
157 break;
158
159 case TW_OSL_IOCTL_SCAN_BUS:
160 /* Request CAM for a bus scan. */
161 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
162 error = tw_osli_request_bus_scan(sc);
163 break;
164
165 default:
166 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
167 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
168 break;
169 }
170 return(error);
171 }
172
173
174
175 static TW_INT32 twa_probe(device_t dev);
176 static TW_INT32 twa_attach(device_t dev);
177 static TW_INT32 twa_detach(device_t dev);
178 static TW_INT32 twa_shutdown(device_t dev);
179 static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
180 static TW_VOID twa_pci_intr(TW_VOID *arg);
181 static TW_VOID twa_watchdog(TW_VOID *arg);
182 int twa_setup_intr(struct twa_softc *sc);
183 int twa_teardown_intr(struct twa_softc *sc);
184
185 static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
186 static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
187
188 static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
189 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
190 static TW_VOID twa_map_load_callback(TW_VOID *arg,
191 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
192
193
194 static device_method_t twa_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, twa_probe),
197 DEVMETHOD(device_attach, twa_attach),
198 DEVMETHOD(device_detach, twa_detach),
199 DEVMETHOD(device_shutdown, twa_shutdown),
200
201 DEVMETHOD_END
202 };
203
204 static driver_t twa_pci_driver = {
205 "twa",
206 twa_methods,
207 sizeof(struct twa_softc)
208 };
209
210 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
211 MODULE_DEPEND(twa, cam, 1, 1, 1);
212 MODULE_DEPEND(twa, pci, 1, 1, 1);
213
214
215 /*
216 * Function name: twa_probe
217 * Description: Called at driver load time. Claims 9000 ctlrs.
218 *
219 * Input: dev -- bus device corresponding to the ctlr
220 * Output: None
221 * Return value: <= 0 -- success
222 * > 0 -- failure
223 */
224 static TW_INT32
225 twa_probe(device_t dev)
226 {
227 static TW_UINT8 first_ctlr = 1;
228
229 tw_osli_dbg_printf(3, "entered");
230
231 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
232 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
233 /* Print the driver version only once. */
234 if (first_ctlr) {
235 printf("3ware device driver for 9000 series storage "
236 "controllers, version: %s\n",
237 TW_OSL_DRIVER_VERSION_STRING);
238 first_ctlr = 0;
239 }
240 return(0);
241 }
242 return(ENXIO);
243 }
244
245 int twa_setup_intr(struct twa_softc *sc)
246 {
247 int error = 0;
248
249 if (!(sc->intr_handle) && (sc->irq_res)) {
250 error = bus_setup_intr(sc->bus_dev, sc->irq_res,
251 INTR_TYPE_CAM | INTR_MPSAFE,
252 NULL, twa_pci_intr,
253 sc, &sc->intr_handle);
254 }
255 return( error );
256 }
257
258
259 int twa_teardown_intr(struct twa_softc *sc)
260 {
261 int error = 0;
262
263 if ((sc->intr_handle) && (sc->irq_res)) {
264 error = bus_teardown_intr(sc->bus_dev,
265 sc->irq_res, sc->intr_handle);
266 sc->intr_handle = NULL;
267 }
268 return( error );
269 }
270
271
272
273 /*
274 * Function name: twa_attach
275 * Description: Allocates pci resources; updates sc; adds a node to the
276 * sysctl tree to expose the driver version; makes calls
277 * (to the Common Layer) to initialize ctlr, and to
278 * attach to CAM.
279 *
280 * Input: dev -- bus device corresponding to the ctlr
281 * Output: None
282 * Return value: 0 -- success
283 * non-zero-- failure
284 */
285 static TW_INT32
286 twa_attach(device_t dev)
287 {
288 struct twa_softc *sc = device_get_softc(dev);
289 TW_INT32 bar_num;
290 TW_INT32 bar0_offset;
291 TW_INT32 bar_size;
292 TW_INT32 error;
293
294 tw_osli_dbg_dprintf(3, sc, "entered");
295
296 sc->ctlr_handle.osl_ctlr_ctxt = sc;
297
298 /* Initialize the softc structure. */
299 sc->bus_dev = dev;
300 sc->device_id = pci_get_device(dev);
301
302 /* Initialize the mutexes right here. */
303 sc->io_lock = &(sc->io_lock_handle);
304 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
305 sc->q_lock = &(sc->q_lock_handle);
306 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
307 sc->sim_lock = &(sc->sim_lock_handle);
308 mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);
309
310 sysctl_ctx_init(&sc->sysctl_ctxt);
311 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
312 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
313 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
314 if (sc->sysctl_tree == NULL) {
315 tw_osli_printf(sc, "error = %d",
316 TW_CL_SEVERITY_ERROR_STRING,
317 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
318 0x2000,
319 "Cannot add sysctl tree node",
320 ENXIO);
321 return(ENXIO);
322 }
323 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
324 OID_AUTO, "driver_version", CTLFLAG_RD,
325 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
326
327 /* Force the busmaster enable bit on, in case the BIOS forgot. */
328 pci_enable_busmaster(dev);
329
330 /* Allocate the PCI register window. */
331 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
332 &bar_num, &bar0_offset, &bar_size))) {
333 tw_osli_printf(sc, "error = %d",
334 TW_CL_SEVERITY_ERROR_STRING,
335 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
336 0x201F,
337 "Can't get PCI BAR info",
338 error);
339 tw_osli_free_resources(sc);
340 return(error);
341 }
342 sc->reg_res_id = PCIR_BARS + bar0_offset;
343 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
344 &(sc->reg_res_id), RF_ACTIVE))
345 == NULL) {
346 tw_osli_printf(sc, "error = %d",
347 TW_CL_SEVERITY_ERROR_STRING,
348 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
349 0x2002,
350 "Can't allocate register window",
351 ENXIO);
352 tw_osli_free_resources(sc);
353 return(ENXIO);
354 }
355 sc->bus_tag = rman_get_bustag(sc->reg_res);
356 sc->bus_handle = rman_get_bushandle(sc->reg_res);
357
358 /* Allocate and register our interrupt. */
359 sc->irq_res_id = 0;
360 if ((sc->irq_res = bus_alloc_resource_any(sc->bus_dev, SYS_RES_IRQ,
361 &(sc->irq_res_id),
362 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
363 tw_osli_printf(sc, "error = %d",
364 TW_CL_SEVERITY_ERROR_STRING,
365 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
366 0x2003,
367 "Can't allocate interrupt",
368 ENXIO);
369 tw_osli_free_resources(sc);
370 return(ENXIO);
371 }
372 if ((error = twa_setup_intr(sc))) {
373 tw_osli_printf(sc, "error = %d",
374 TW_CL_SEVERITY_ERROR_STRING,
375 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
376 0x2004,
377 "Can't set up interrupt",
378 error);
379 tw_osli_free_resources(sc);
380 return(error);
381 }
382
383 if ((error = tw_osli_alloc_mem(sc))) {
384 tw_osli_printf(sc, "error = %d",
385 TW_CL_SEVERITY_ERROR_STRING,
386 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
387 0x2005,
388 "Memory allocation failure",
389 error);
390 tw_osli_free_resources(sc);
391 return(error);
392 }
393
394 /* Initialize the Common Layer for this controller. */
395 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
396 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
397 sc->non_dma_mem, sc->dma_mem,
398 sc->dma_mem_phys
399 ))) {
400 tw_osli_printf(sc, "error = %d",
401 TW_CL_SEVERITY_ERROR_STRING,
402 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
403 0x2006,
404 "Failed to initialize Common Layer/controller",
405 error);
406 tw_osli_free_resources(sc);
407 return(error);
408 }
409
410 /* Create the control device. */
411 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
412 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
413 "twa%d", device_get_unit(sc->bus_dev));
414 sc->ctrl_dev->si_drv1 = sc;
415
416 if ((error = tw_osli_cam_attach(sc))) {
417 tw_osli_free_resources(sc);
418 tw_osli_printf(sc, "error = %d",
419 TW_CL_SEVERITY_ERROR_STRING,
420 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
421 0x2007,
422 "Failed to initialize CAM",
423 error);
424 return(error);
425 }
426
427 sc->watchdog_index = 0;
428 callout_init(&(sc->watchdog_callout[0]), 1);
429 callout_init(&(sc->watchdog_callout[1]), 1);
430 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
431 gone_in_dev(dev, 14, "twa(4) removed");
432
433 return(0);
434 }
435
436
437 static TW_VOID
438 twa_watchdog(TW_VOID *arg)
439 {
440 struct tw_cl_ctlr_handle *ctlr_handle =
441 (struct tw_cl_ctlr_handle *)arg;
442 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
443 int i;
444 int i_need_a_reset = 0;
445 int driver_is_active = 0;
446 int my_watchdog_was_pending = 1234;
447 TW_UINT64 current_time;
448 struct tw_osli_req_context *my_req;
449
450
451 //==============================================================================
452 current_time = (TW_UINT64) (tw_osl_get_local_time());
453
454 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
455 my_req = &(sc->req_ctx_buf[i]);
456
457 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
458 (my_req->deadline) &&
459 (my_req->deadline < current_time)) {
460 tw_cl_set_reset_needed(ctlr_handle);
461 #ifdef TW_OSL_DEBUG
462 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
463 #else /* TW_OSL_DEBUG */
464 device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
465 #endif /* TW_OSL_DEBUG */
466 break;
467 }
468 }
469 //==============================================================================
470
471 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
472
473 i = (int) ((sc->watchdog_index++) & 1);
474
475 driver_is_active = tw_cl_is_active(ctlr_handle);
476
477 if (i_need_a_reset) {
478 #ifdef TW_OSL_DEBUG
479 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
480 #endif /* TW_OSL_DEBUG */
481 my_watchdog_was_pending =
482 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
483 tw_cl_reset_ctlr(ctlr_handle);
484 #ifdef TW_OSL_DEBUG
485 device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
486 #endif /* TW_OSL_DEBUG */
487 } else if (driver_is_active) {
488 my_watchdog_was_pending =
489 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle);
490 }
491 #ifdef TW_OSL_DEBUG
492 if (i_need_a_reset || my_watchdog_was_pending)
493 device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
494 "driver_is_active = %d, my_watchdog_was_pending = %d\n",
495 i_need_a_reset, driver_is_active, my_watchdog_was_pending);
496 #endif /* TW_OSL_DEBUG */
497 }
498
499
500 /*
501 * Function name: tw_osli_alloc_mem
502 * Description: Allocates memory needed both by CL and OSL.
503 *
504 * Input: sc -- OSL internal controller context
505 * Output: None
506 * Return value: 0 -- success
507 * non-zero-- failure
508 */
509 static TW_INT32
510 tw_osli_alloc_mem(struct twa_softc *sc)
511 {
512 struct tw_osli_req_context *req;
513 TW_UINT32 max_sg_elements;
514 TW_UINT32 non_dma_mem_size;
515 TW_UINT32 dma_mem_size;
516 TW_INT32 error;
517 TW_INT32 i;
518
519 tw_osli_dbg_dprintf(3, sc, "entered");
520
521 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
522 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
523
524 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
525 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
526
527 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
528 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
529 &(sc->alignment), &(sc->sg_size_factor),
530 &non_dma_mem_size, &dma_mem_size
531 ))) {
532 tw_osli_printf(sc, "error = %d",
533 TW_CL_SEVERITY_ERROR_STRING,
534 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
535 0x2008,
536 "Can't get Common Layer's memory requirements",
537 error);
538 return(error);
539 }
540
541 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
542 M_WAITOK)) == NULL) {
543 tw_osli_printf(sc, "error = %d",
544 TW_CL_SEVERITY_ERROR_STRING,
545 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
546 0x2009,
547 "Can't allocate non-dma memory",
548 ENOMEM);
549 return(ENOMEM);
550 }
551
552 /* Create the parent dma tag. */
553 if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */
554 sc->alignment, /* alignment */
555 TW_OSLI_DMA_BOUNDARY, /* boundary */
556 BUS_SPACE_MAXADDR, /* lowaddr */
557 BUS_SPACE_MAXADDR, /* highaddr */
558 NULL, NULL, /* filter, filterarg */
559 TW_CL_MAX_IO_SIZE, /* maxsize */
560 max_sg_elements, /* nsegments */
561 TW_CL_MAX_IO_SIZE, /* maxsegsize */
562 0, /* flags */
563 NULL, /* lockfunc */
564 NULL, /* lockfuncarg */
565 &sc->parent_tag /* tag */)) {
566 tw_osli_printf(sc, "error = %d",
567 TW_CL_SEVERITY_ERROR_STRING,
568 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
569 0x200A,
570 "Can't allocate parent DMA tag",
571 ENOMEM);
572 return(ENOMEM);
573 }
574
575 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
576 if (bus_dma_tag_create(sc->parent_tag, /* parent */
577 sc->alignment, /* alignment */
578 0, /* boundary */
579 BUS_SPACE_MAXADDR, /* lowaddr */
580 BUS_SPACE_MAXADDR, /* highaddr */
581 NULL, NULL, /* filter, filterarg */
582 dma_mem_size, /* maxsize */
583 1, /* nsegments */
584 BUS_SPACE_MAXSIZE, /* maxsegsize */
585 0, /* flags */
586 NULL, /* lockfunc */
587 NULL, /* lockfuncarg */
588 &sc->cmd_tag /* tag */)) {
589 tw_osli_printf(sc, "error = %d",
590 TW_CL_SEVERITY_ERROR_STRING,
591 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
592 0x200B,
593 "Can't allocate DMA tag for Common Layer's "
594 "DMA'able memory",
595 ENOMEM);
596 return(ENOMEM);
597 }
598
599 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
600 BUS_DMA_NOWAIT, &sc->cmd_map)) {
601 /* Try a second time. */
602 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
603 BUS_DMA_NOWAIT, &sc->cmd_map)) {
604 tw_osli_printf(sc, "error = %d",
605 TW_CL_SEVERITY_ERROR_STRING,
606 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
607 0x200C,
608 "Can't allocate DMA'able memory for the"
609 "Common Layer",
610 ENOMEM);
611 return(ENOMEM);
612 }
613 }
614
615 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
616 dma_mem_size, twa_map_load_callback,
617 &sc->dma_mem_phys, 0);
618
619 /*
620 * Create a dma tag for data buffers; size will be the maximum
621 * possible I/O size (128kB).
622 */
623 if (bus_dma_tag_create(sc->parent_tag, /* parent */
624 sc->alignment, /* alignment */
625 0, /* boundary */
626 BUS_SPACE_MAXADDR, /* lowaddr */
627 BUS_SPACE_MAXADDR, /* highaddr */
628 NULL, NULL, /* filter, filterarg */
629 TW_CL_MAX_IO_SIZE, /* maxsize */
630 max_sg_elements, /* nsegments */
631 TW_CL_MAX_IO_SIZE, /* maxsegsize */
632 BUS_DMA_ALLOCNOW, /* flags */
633 twa_busdma_lock, /* lockfunc */
634 sc->io_lock, /* lockfuncarg */
635 &sc->dma_tag /* tag */)) {
636 tw_osli_printf(sc, "error = %d",
637 TW_CL_SEVERITY_ERROR_STRING,
638 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
639 0x200F,
640 "Can't allocate DMA tag for data buffers",
641 ENOMEM);
642 return(ENOMEM);
643 }
644
645 /*
646 * Create a dma tag for ioctl data buffers; size will be the maximum
647 * possible I/O size (128kB).
648 */
649 if (bus_dma_tag_create(sc->parent_tag, /* parent */
650 sc->alignment, /* alignment */
651 0, /* boundary */
652 BUS_SPACE_MAXADDR, /* lowaddr */
653 BUS_SPACE_MAXADDR, /* highaddr */
654 NULL, NULL, /* filter, filterarg */
655 TW_CL_MAX_IO_SIZE, /* maxsize */
656 max_sg_elements, /* nsegments */
657 TW_CL_MAX_IO_SIZE, /* maxsegsize */
658 BUS_DMA_ALLOCNOW, /* flags */
659 twa_busdma_lock, /* lockfunc */
660 sc->io_lock, /* lockfuncarg */
661 &sc->ioctl_tag /* tag */)) {
662 tw_osli_printf(sc, "error = %d",
663 TW_CL_SEVERITY_ERROR_STRING,
664 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
665 0x2010,
666 "Can't allocate DMA tag for ioctl data buffers",
667 ENOMEM);
668 return(ENOMEM);
669 }
670
671 /* Create just one map for all ioctl request data buffers. */
672 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
673 tw_osli_printf(sc, "error = %d",
674 TW_CL_SEVERITY_ERROR_STRING,
675 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
676 0x2011,
677 "Can't create ioctl map",
678 ENOMEM);
679 return(ENOMEM);
680 }
681
682
683 /* Initialize request queues. */
684 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
685 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
686
687 if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
688 malloc((sizeof(struct tw_osli_req_context) *
689 TW_OSLI_MAX_NUM_REQUESTS),
690 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
691 tw_osli_printf(sc, "error = %d",
692 TW_CL_SEVERITY_ERROR_STRING,
693 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
694 0x2012,
695 "Failed to allocate request packets",
696 ENOMEM);
697 return(ENOMEM);
698 }
699 bzero(sc->req_ctx_buf,
700 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);
701
702 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
703 req = &(sc->req_ctx_buf[i]);
704 req->ctlr = sc;
705 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
706 tw_osli_printf(sc, "request # = %d, error = %d",
707 TW_CL_SEVERITY_ERROR_STRING,
708 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
709 0x2013,
710 "Can't create dma map",
711 i, ENOMEM);
712 return(ENOMEM);
713 }
714
715 /* Initialize the ioctl wakeup/ timeout mutex */
716 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
717 mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);
718
719 /* Insert request into the free queue. */
720 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
721 }
722
723 return(0);
724 }
725
726
727
728 /*
729 * Function name: tw_osli_free_resources
730 * Description: Performs clean-up at the time of going down.
731 *
732 * Input: sc -- ptr to OSL internal ctlr context
733 * Output: None
734 * Return value: None
735 */
736 static TW_VOID
737 tw_osli_free_resources(struct twa_softc *sc)
738 {
739 struct tw_osli_req_context *req;
740 TW_INT32 error = 0;
741
742 tw_osli_dbg_dprintf(3, sc, "entered");
743
744 /* Detach from CAM */
745 tw_osli_cam_detach(sc);
746
747 if (sc->req_ctx_buf)
748 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
749 NULL) {
750 mtx_destroy(req->ioctl_wake_timeout_lock);
751
752 if ((error = bus_dmamap_destroy(sc->dma_tag,
753 req->dma_map)))
754 tw_osli_dbg_dprintf(1, sc,
755 "dmamap_destroy(dma) returned %d",
756 error);
757 }
758
759 if ((sc->ioctl_tag) && (sc->ioctl_map))
760 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
761 tw_osli_dbg_dprintf(1, sc,
762 "dmamap_destroy(ioctl) returned %d", error);
763
764 /* Free all memory allocated so far. */
765 if (sc->req_ctx_buf)
766 free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
767
768 if (sc->non_dma_mem)
769 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
770
771 if (sc->dma_mem) {
772 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
773 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
774 sc->cmd_map);
775 }
776 if (sc->cmd_tag)
777 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
778 tw_osli_dbg_dprintf(1, sc,
779 "dma_tag_destroy(cmd) returned %d", error);
780
781 if (sc->dma_tag)
782 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
783 tw_osli_dbg_dprintf(1, sc,
784 "dma_tag_destroy(dma) returned %d", error);
785
786 if (sc->ioctl_tag)
787 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
788 tw_osli_dbg_dprintf(1, sc,
789 "dma_tag_destroy(ioctl) returned %d", error);
790
791 if (sc->parent_tag)
792 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
793 tw_osli_dbg_dprintf(1, sc,
794 "dma_tag_destroy(parent) returned %d", error);
795
796
797 /* Disconnect the interrupt handler. */
798 if ((error = twa_teardown_intr(sc)))
799 tw_osli_dbg_dprintf(1, sc,
800 "teardown_intr returned %d", error);
801
802 if (sc->irq_res != NULL)
803 if ((error = bus_release_resource(sc->bus_dev,
804 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
805 tw_osli_dbg_dprintf(1, sc,
806 "release_resource(irq) returned %d", error);
807
808
809 /* Release the register window mapping. */
810 if (sc->reg_res != NULL)
811 if ((error = bus_release_resource(sc->bus_dev,
812 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
813 tw_osli_dbg_dprintf(1, sc,
814 "release_resource(io) returned %d", error);
815
816
817 /* Destroy the control device. */
818 if (sc->ctrl_dev != (struct cdev *)NULL)
819 destroy_dev(sc->ctrl_dev);
820
821 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
822 tw_osli_dbg_dprintf(1, sc,
823 "sysctl_ctx_free returned %d", error);
824
825 }
826
827
828
829 /*
830 * Function name: twa_detach
831 * Description: Called when the controller is being detached from
832 * the pci bus.
833 *
834 * Input: dev -- bus device corresponding to the ctlr
835 * Output: None
836 * Return value: 0 -- success
837 * non-zero-- failure
838 */
839 static TW_INT32
840 twa_detach(device_t dev)
841 {
842 struct twa_softc *sc = device_get_softc(dev);
843 TW_INT32 error;
844
845 tw_osli_dbg_dprintf(3, sc, "entered");
846
847 error = EBUSY;
848 if (sc->open) {
849 tw_osli_printf(sc, "error = %d",
850 TW_CL_SEVERITY_ERROR_STRING,
851 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
852 0x2014,
853 "Device open",
854 error);
855 goto out;
856 }
857
858 /* Shut the controller down. */
859 if ((error = twa_shutdown(dev)))
860 goto out;
861
862 /* Free all resources associated with this controller. */
863 tw_osli_free_resources(sc);
864 error = 0;
865
866 out:
867 return(error);
868 }
869
870
871
872 /*
873 * Function name: twa_shutdown
874 * Description: Called at unload/shutdown time. Lets the controller
875 * know that we are going down.
876 *
877 * Input: dev -- bus device corresponding to the ctlr
878 * Output: None
879 * Return value: 0 -- success
880 * non-zero-- failure
881 */
882 static TW_INT32
883 twa_shutdown(device_t dev)
884 {
885 struct twa_softc *sc = device_get_softc(dev);
886 TW_INT32 error = 0;
887
888 tw_osli_dbg_dprintf(3, sc, "entered");
889
890 /* Disconnect interrupts. */
891 error = twa_teardown_intr(sc);
892
893 /* Stop watchdog task. */
894 callout_drain(&(sc->watchdog_callout[0]));
895 callout_drain(&(sc->watchdog_callout[1]));
896
897 /* Disconnect from the controller. */
898 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
899 tw_osli_printf(sc, "error = %d",
900 TW_CL_SEVERITY_ERROR_STRING,
901 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
902 0x2015,
903 "Failed to shutdown Common Layer/controller",
904 error);
905 }
906 return(error);
907 }
908
909
910
911 /*
912 * Function name: twa_busdma_lock
913 * Description: Function to provide synchronization during busdma_swi.
914 *
915 * Input: lock_arg -- lock mutex sent as argument
916 * op -- operation (lock/unlock) expected of the function
917 * Output: None
918 * Return value: None
919 */
920 TW_VOID
921 twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
922 {
923 struct mtx *lock;
924
925 lock = (struct mtx *)lock_arg;
926 switch (op) {
927 case BUS_DMA_LOCK:
928 mtx_lock_spin(lock);
929 break;
930
931 case BUS_DMA_UNLOCK:
932 mtx_unlock_spin(lock);
933 break;
934
935 default:
936 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
937 }
938 }
939
940
941 /*
942 * Function name: twa_pci_intr
943 * Description: Interrupt handler. Wrapper for twa_interrupt.
944 *
945 * Input: arg -- ptr to OSL internal ctlr context
946 * Output: None
947 * Return value: None
948 */
949 static TW_VOID
950 twa_pci_intr(TW_VOID *arg)
951 {
952 struct twa_softc *sc = (struct twa_softc *)arg;
953
954 tw_osli_dbg_dprintf(10, sc, "entered");
955 tw_cl_interrupt(&(sc->ctlr_handle));
956 }
957
958
959 /*
960 * Function name: tw_osli_fw_passthru
961 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
962 *
963 * Input: sc -- ptr to OSL internal ctlr context
964 * buf -- ptr to ioctl pkt understood by CL
965 * Output: None
966 * Return value: 0 -- success
967 * non-zero-- failure
968 */
969 TW_INT32
970 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
971 {
972 struct tw_osli_req_context *req;
973 struct tw_osli_ioctl_no_data_buf *user_buf =
974 (struct tw_osli_ioctl_no_data_buf *)buf;
975 TW_TIME end_time;
976 TW_UINT32 timeout = 60;
977 TW_UINT32 data_buf_size_adjusted;
978 struct tw_cl_req_packet *req_pkt;
979 struct tw_cl_passthru_req_packet *pt_req;
980 TW_INT32 error;
981
982 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
983
984 if ((req = tw_osli_get_request(sc)) == NULL)
985 return(EBUSY);
986
987 req->req_handle.osl_req_ctxt = req;
988 req->orig_req = buf;
989 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
990
991 req_pkt = &(req->req_pkt);
992 req_pkt->status = 0;
993 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
994 /* Let the Common Layer retry the request on cmd queue full. */
995 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
996
997 pt_req = &(req_pkt->gen_req_pkt.pt_req);
998 /*
999 * Make sure that the data buffer sent to firmware is a
1000 * 512 byte multiple in size.
1001 */
1002 data_buf_size_adjusted =
1003 (user_buf->driver_pkt.buffer_length +
1004 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
1005 if ((req->length = data_buf_size_adjusted)) {
1006 if ((req->data = malloc(data_buf_size_adjusted,
1007 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
1008 error = ENOMEM;
1009 tw_osli_printf(sc, "error = %d",
1010 TW_CL_SEVERITY_ERROR_STRING,
1011 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1012 0x2016,
1013 "Could not alloc mem for "
1014 "fw_passthru data_buf",
1015 error);
1016 goto fw_passthru_err;
1017 }
1018 /* Copy the payload. */
1019 if ((error = copyin((TW_VOID *)(user_buf->pdata),
1020 req->data,
1021 user_buf->driver_pkt.buffer_length)) != 0) {
1022 tw_osli_printf(sc, "error = %d",
1023 TW_CL_SEVERITY_ERROR_STRING,
1024 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1025 0x2017,
1026 "Could not copyin fw_passthru data_buf",
1027 error);
1028 goto fw_passthru_err;
1029 }
1030 pt_req->sgl_entries = 1; /* will be updated during mapping */
1031 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1032 TW_OSLI_REQ_FLAGS_DATA_OUT);
1033 } else
1034 pt_req->sgl_entries = 0; /* no payload */
1035
1036 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1037 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1038
1039 if ((error = tw_osli_map_request(req)))
1040 goto fw_passthru_err;
1041
1042 end_time = tw_osl_get_local_time() + timeout;
1043 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1044 mtx_lock(req->ioctl_wake_timeout_lock);
1045 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1046
1047 error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
1048 "twa_passthru", timeout*hz);
1049 mtx_unlock(req->ioctl_wake_timeout_lock);
1050
1051 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1052 error = 0;
1053 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1054
1055 if (! error) {
1056 if (((error = req->error_code)) ||
1057 ((error = (req->state !=
1058 TW_OSLI_REQ_STATE_COMPLETE))) ||
1059 ((error = req_pkt->status)))
1060 goto fw_passthru_err;
1061 break;
1062 }
1063
1064 if (req_pkt->status) {
1065 error = req_pkt->status;
1066 goto fw_passthru_err;
1067 }
1068
1069 if (error == EWOULDBLOCK) {
1070 /* Time out! */
1071 if ((!(req->error_code)) &&
1072 (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
1073 (!(req_pkt->status)) ) {
1074 #ifdef TW_OSL_DEBUG
1075 tw_osli_printf(sc, "request = %p",
1076 TW_CL_SEVERITY_ERROR_STRING,
1077 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1078 0x7777,
1079 "FALSE Passthru timeout!",
1080 req);
1081 #endif /* TW_OSL_DEBUG */
1082 error = 0; /* False error */
1083 break;
1084 }
1085 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1086 #ifdef TW_OSL_DEBUG
1087 tw_osli_printf(sc, "request = %p",
1088 TW_CL_SEVERITY_ERROR_STRING,
1089 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1090 0x2018,
1091 "Passthru request timed out!",
1092 req);
1093 #else /* TW_OSL_DEBUG */
1094 device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1095 #endif /* TW_OSL_DEBUG */
1096 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1097 }
1098
1099 error = 0;
1100 end_time = tw_osl_get_local_time() + timeout;
1101 continue;
1102 /*
1103 * Don't touch req after a reset. It (and any
1104 * associated data) will be
1105 * unmapped by the callback.
1106 */
1107 }
1108 /*
1109 * Either the request got completed, or we were woken up by a
1110 * signal. Calculate the new timeout, in case it was the latter.
1111 */
1112 timeout = (end_time - tw_osl_get_local_time());
1113 } /* End of while loop */
1114
1115 /* If there was a payload, copy it back. */
1116 if ((!error) && (req->length))
1117 if ((error = copyout(req->data, user_buf->pdata,
1118 user_buf->driver_pkt.buffer_length)))
1119 tw_osli_printf(sc, "error = %d",
1120 TW_CL_SEVERITY_ERROR_STRING,
1121 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1122 0x2019,
1123 "Could not copyout fw_passthru data_buf",
1124 error);
1125
1126 fw_passthru_err:
1127
1128 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1129 error = EBUSY;
1130
1131 user_buf->driver_pkt.os_status = error;
1132 /* Free resources. */
1133 if (req->data)
1134 free(req->data, TW_OSLI_MALLOC_CLASS);
1135 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1136 return(error);
1137 }
1138
1139
1140
1141 /*
1142 * Function name: tw_osl_complete_passthru
1143 * Description: Called to complete passthru requests.
1144 *
1145 * Input: req_handle -- ptr to request handle
1146 * Output: None
1147 * Return value: None
1148 */
1149 TW_VOID
1150 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1151 {
1152 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1153 struct tw_cl_req_packet *req_pkt =
1154 (struct tw_cl_req_packet *)(&req->req_pkt);
1155 struct twa_softc *sc = req->ctlr;
1156
1157 tw_osli_dbg_dprintf(5, sc, "entered");
1158
1159 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1160 tw_osli_printf(sc, "request = %p, status = %d",
1161 TW_CL_SEVERITY_ERROR_STRING,
1162 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1163 0x201B,
1164 "Unposted command completed!!",
1165 req, req->state);
1166 }
1167
1168 /*
1169 * Remove request from the busy queue. Just mark it complete.
1170 * There's no need to move it into the complete queue as we are
1171 * going to be done with it right now.
1172 */
1173 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1174 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1175
1176 tw_osli_unmap_request(req);
1177
1178 /*
1179 * Don't do a wake up if there was an error even before the request
1180 * was sent down to the Common Layer, and we hadn't gotten an
1181 * EINPROGRESS. The request originator will then be returned an
1182 * error, and he can do the clean-up.
1183 */
1184 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1185 return;
1186
1187 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1188 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1189 /* Wake up the sleeping command originator. */
1190 tw_osli_dbg_dprintf(5, sc,
1191 "Waking up originator of request %p", req);
1192 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1193 wakeup_one(req);
1194 } else {
1195 /*
1196 * If the request completed even before mtx_sleep
1197 * was called, simply return.
1198 */
1199 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1200 return;
1201
1202 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1203 return;
1204
1205 tw_osli_printf(sc, "request = %p",
1206 TW_CL_SEVERITY_ERROR_STRING,
1207 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1208 0x201C,
1209 "Passthru callback called, "
1210 "and caller not sleeping",
1211 req);
1212 }
1213 } else {
1214 tw_osli_printf(sc, "request = %p",
1215 TW_CL_SEVERITY_ERROR_STRING,
1216 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1217 0x201D,
1218 "Passthru callback called for non-passthru request",
1219 req);
1220 }
1221 }
1222
1223
1224
1225 /*
1226 * Function name: tw_osli_get_request
1227 * Description: Gets a request pkt from the free queue.
1228 *
1229 * Input: sc -- ptr to OSL internal ctlr context
1230 * Output: None
1231 * Return value: ptr to request pkt -- success
1232 * NULL -- failure
1233 */
1234 struct tw_osli_req_context *
1235 tw_osli_get_request(struct twa_softc *sc)
1236 {
1237 struct tw_osli_req_context *req;
1238
1239 tw_osli_dbg_dprintf(4, sc, "entered");
1240
1241 /* Get a free request packet. */
1242 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1243
1244 /* Initialize some fields to their defaults. */
1245 if (req) {
1246 req->req_handle.osl_req_ctxt = NULL;
1247 req->req_handle.cl_req_ctxt = NULL;
1248 req->req_handle.is_io = 0;
1249 req->data = NULL;
1250 req->length = 0;
1251 req->deadline = 0;
1252 req->real_data = NULL;
1253 req->real_length = 0;
1254 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1255 req->flags = 0;
1256 req->error_code = 0;
1257 req->orig_req = NULL;
1258
1259 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1260
1261 }
1262 return(req);
1263 }
1264
1265
1266
1267 /*
1268 * Function name: twa_map_load_data_callback
1269 * Description: Callback of bus_dmamap_load for the buffer associated
1270 * with data. Updates the cmd pkt (size/sgl_entries
1271 * fields, as applicable) to reflect the number of sg
1272 * elements.
1273 *
1274 * Input: arg -- ptr to OSL internal request context
1275 * segs -- ptr to a list of segment descriptors
1276 * nsegments--# of segments
1277 * error -- 0 if no errors encountered before callback,
1278 * non-zero if errors were encountered
1279 * Output: None
1280 * Return value: None
1281 */
1282 static TW_VOID
1283 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1284 TW_INT32 nsegments, TW_INT32 error)
1285 {
1286 struct tw_osli_req_context *req =
1287 (struct tw_osli_req_context *)arg;
1288 struct twa_softc *sc = req->ctlr;
1289 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1290
1291 tw_osli_dbg_dprintf(10, sc, "entered");
1292
1293 if (error == EINVAL) {
1294 req->error_code = error;
1295 return;
1296 }
1297
1298 /* Mark the request as currently being processed. */
1299 req->state = TW_OSLI_REQ_STATE_BUSY;
1300 /* Move the request into the busy queue. */
1301 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1302
1303 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1304
1305 if (error == EFBIG) {
1306 req->error_code = error;
1307 goto out;
1308 }
1309
1310 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1311 struct tw_cl_passthru_req_packet *pt_req;
1312
1313 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1314 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1315 BUS_DMASYNC_PREREAD);
1316
1317 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1318 /*
1319 * If we're using an alignment buffer, and we're
1320 * writing data, copy the real data out.
1321 */
1322 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1323 bcopy(req->real_data, req->data, req->real_length);
1324 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1325 BUS_DMASYNC_PREWRITE);
1326 }
1327
1328 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1329 pt_req->sg_list = (TW_UINT8 *)segs;
1330 pt_req->sgl_entries += (nsegments - 1);
1331 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1332 &(req->req_handle));
1333 } else {
1334 struct tw_cl_scsi_req_packet *scsi_req;
1335
1336 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1337 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1338 BUS_DMASYNC_PREREAD);
1339
1340 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1341 /*
1342 * If we're using an alignment buffer, and we're
1343 * writing data, copy the real data out.
1344 */
1345 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1346 bcopy(req->real_data, req->data, req->real_length);
1347 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1348 BUS_DMASYNC_PREWRITE);
1349 }
1350
1351 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1352 scsi_req->sg_list = (TW_UINT8 *)segs;
1353 scsi_req->sgl_entries += (nsegments - 1);
1354 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1355 &(req->req_handle));
1356 }
1357
1358 out:
1359 if (error) {
1360 req->error_code = error;
1361 req_pkt->tw_osl_callback(&(req->req_handle));
1362 /*
1363 * If the caller had been returned EINPROGRESS, and he has
1364 * registered a callback for handling completion, the callback
1365 * will never get called because we were unable to submit the
1366 * request. So, free up the request right here.
1367 */
1368 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1369 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1370 }
1371 }
1372
1373
1374
1375 /*
1376 * Function name: twa_map_load_callback
1377 * Description: Callback of bus_dmamap_load for the buffer associated
1378 * with a cmd pkt.
1379 *
1380 * Input: arg -- ptr to variable to hold phys addr
1381 * segs -- ptr to a list of segment descriptors
1382 * nsegments--# of segments
1383 * error -- 0 if no errors encountered before callback,
1384 * non-zero if errors were encountered
1385 * Output: None
1386 * Return value: None
1387 */
1388 static TW_VOID
1389 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1390 TW_INT32 nsegments, TW_INT32 error)
1391 {
1392 *((bus_addr_t *)arg) = segs[0].ds_addr;
1393 }
1394
1395
1396
1397 /*
1398 * Function name: tw_osli_map_request
1399 * Description: Maps a cmd pkt and data associated with it, into
1400 * DMA'able memory.
1401 *
1402 * Input: req -- ptr to request pkt
1403 * Output: None
1404 * Return value: 0 -- success
1405 * non-zero-- failure
1406 */
1407 TW_INT32
1408 tw_osli_map_request(struct tw_osli_req_context *req)
1409 {
1410 struct twa_softc *sc = req->ctlr;
1411 TW_INT32 error = 0;
1412
1413 tw_osli_dbg_dprintf(10, sc, "entered");
1414
1415 /* If the command involves data, map that too. */
1416 if (req->data != NULL) {
1417 /*
1418 * It's sufficient for the data pointer to be 4-byte aligned
1419 * to work with 9000. However, if 4-byte aligned addresses
1420 * are passed to bus_dmamap_load, we can get back sg elements
1421 * that are not 512-byte multiples in size. So, we will let
1422 * only those buffers that are 512-byte aligned to pass
1423 * through, and bounce the rest, so as to make sure that we
1424 * always get back sg elements that are 512-byte multiples
1425 * in size.
1426 */
1427 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1428 (req->length % sc->sg_size_factor)) {
1429 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1430 /* Save original data pointer and length. */
1431 req->real_data = req->data;
1432 req->real_length = req->length;
1433 req->length = (req->length +
1434 (sc->sg_size_factor - 1)) &
1435 ~(sc->sg_size_factor - 1);
1436 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1437 M_NOWAIT);
1438 if (req->data == NULL) {
1439 tw_osli_printf(sc, "error = %d",
1440 TW_CL_SEVERITY_ERROR_STRING,
1441 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1442 0x201E,
1443 "Failed to allocate memory "
1444 "for bounce buffer",
1445 ENOMEM);
1446 /* Restore original data pointer and length. */
1447 req->data = req->real_data;
1448 req->length = req->real_length;
1449 return(ENOMEM);
1450 }
1451 }
1452
1453 /*
1454 * Map the data buffer into bus space and build the SG list.
1455 */
1456 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1457 /* Lock against multiple simultaneous ioctl calls. */
1458 mtx_lock_spin(sc->io_lock);
1459 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1460 req->data, req->length,
1461 twa_map_load_data_callback, req,
1462 BUS_DMA_WAITOK);
1463 mtx_unlock_spin(sc->io_lock);
1464 } else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) {
1465 error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map,
1466 req->orig_req, twa_map_load_data_callback, req,
1467 BUS_DMA_WAITOK);
1468 } else {
1469 /*
1470 * There's only one CAM I/O thread running at a time.
1471 * So, there's no need to hold the io_lock.
1472 */
1473 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1474 req->data, req->length,
1475 twa_map_load_data_callback, req,
1476 BUS_DMA_WAITOK);
1477 }
1478
1479 if (!error)
1480 error = req->error_code;
1481 else {
1482 if (error == EINPROGRESS) {
1483 /*
1484 * Specifying sc->io_lock as the lockfuncarg
1485 * in ...tag_create should protect the access
1486 * of ...FLAGS_MAPPED from the callback.
1487 */
1488 mtx_lock_spin(sc->io_lock);
1489 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1490 req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1491 tw_osli_disallow_new_requests(sc, &(req->req_handle));
1492 mtx_unlock_spin(sc->io_lock);
1493 error = 0;
1494 } else {
1495 tw_osli_printf(sc, "error = %d",
1496 TW_CL_SEVERITY_ERROR_STRING,
1497 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1498 0x9999,
1499 "Failed to map DMA memory "
1500 "for I/O request",
1501 error);
1502 req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1503 /* Free alignment buffer if it was used. */
1504 if (req->flags &
1505 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1506 free(req->data, TW_OSLI_MALLOC_CLASS);
1507 /*
1508 * Restore original data pointer
1509 * and length.
1510 */
1511 req->data = req->real_data;
1512 req->length = req->real_length;
1513 }
1514 }
1515 }
1516
1517 } else {
1518 /* Mark the request as currently being processed. */
1519 req->state = TW_OSLI_REQ_STATE_BUSY;
1520 /* Move the request into the busy queue. */
1521 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1522 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1523 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1524 &(req->req_pkt), &(req->req_handle));
1525 else
1526 error = tw_cl_start_io(&sc->ctlr_handle,
1527 &(req->req_pkt), &(req->req_handle));
1528 if (error) {
1529 req->error_code = error;
1530 req->req_pkt.tw_osl_callback(&(req->req_handle));
1531 }
1532 }
1533 return(error);
1534 }
1535
1536
1537
1538 /*
1539 * Function name: tw_osli_unmap_request
1540 * Description: Undoes the mapping done by tw_osli_map_request.
1541 *
1542 * Input: req -- ptr to request pkt
1543 * Output: None
1544 * Return value: None
1545 */
1546 TW_VOID
1547 tw_osli_unmap_request(struct tw_osli_req_context *req)
1548 {
1549 struct twa_softc *sc = req->ctlr;
1550
1551 tw_osli_dbg_dprintf(10, sc, "entered");
1552
1553 /* If the command involved data, unmap that too. */
1554 if (req->data != NULL) {
1555 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1556 /* Lock against multiple simultaneous ioctl calls. */
1557 mtx_lock_spin(sc->io_lock);
1558
1559 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1560 bus_dmamap_sync(sc->ioctl_tag,
1561 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1562
1563 /*
1564 * If we are using a bounce buffer, and we are
1565 * reading data, copy the real data in.
1566 */
1567 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1568 bcopy(req->data, req->real_data,
1569 req->real_length);
1570 }
1571
1572 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1573 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1574 BUS_DMASYNC_POSTWRITE);
1575
1576 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1577
1578 mtx_unlock_spin(sc->io_lock);
1579 } else {
1580 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1581 bus_dmamap_sync(sc->dma_tag,
1582 req->dma_map, BUS_DMASYNC_POSTREAD);
1583
1584 /*
1585 * If we are using a bounce buffer, and we are
1586 * reading data, copy the real data in.
1587 */
1588 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1589 bcopy(req->data, req->real_data,
1590 req->real_length);
1591 }
1592 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1593 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1594 BUS_DMASYNC_POSTWRITE);
1595
1596 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1597 }
1598 }
1599
1600 /* Free alignment buffer if it was used. */
1601 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1602 free(req->data, TW_OSLI_MALLOC_CLASS);
1603 /* Restore original data pointer and length. */
1604 req->data = req->real_data;
1605 req->length = req->real_length;
1606 }
1607 }
1608
1609
1610
1611 #ifdef TW_OSL_DEBUG
1612
1613 TW_VOID twa_report_stats(TW_VOID);
1614 TW_VOID twa_reset_stats(TW_VOID);
1615 TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1616 TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1617
1618
1619 /*
1620 * Function name: twa_report_stats
1621 * Description: For being called from ddb. Calls functions that print
1622 * OSL and CL internal stats for the controller.
1623 *
1624 * Input: None
1625 * Output: None
1626 * Return value: None
1627 */
1628 TW_VOID
1629 twa_report_stats(TW_VOID)
1630 {
1631 struct twa_softc *sc;
1632 TW_INT32 i;
1633
1634 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1635 tw_osli_print_ctlr_stats(sc);
1636 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1637 }
1638 }
1639
1640
1641
1642 /*
1643 * Function name: tw_osli_print_ctlr_stats
1644 * Description: For being called from ddb. Prints OSL controller stats
1645 *
1646 * Input: sc -- ptr to OSL internal controller context
1647 * Output: None
1648 * Return value: None
1649 */
1650 TW_VOID
1651 tw_osli_print_ctlr_stats(struct twa_softc *sc)
1652 {
1653 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1654 twa_printf(sc, "OSLq type current max\n");
1655 twa_printf(sc, "free %04d %04d\n",
1656 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1657 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1658 twa_printf(sc, "busy %04d %04d\n",
1659 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1660 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1661 }
1662
1663
1664
1665 /*
1666 * Function name: twa_print_req_info
1667 * Description: For being called from ddb. Calls functions that print
1668 * OSL and CL internal details for the request.
1669 *
1670 * Input: req -- ptr to OSL internal request context
1671 * Output: None
1672 * Return value: None
1673 */
1674 TW_VOID
1675 twa_print_req_info(struct tw_osli_req_context *req)
1676 {
1677 struct twa_softc *sc = req->ctlr;
1678
1679 twa_printf(sc, "OSL details for request:\n");
1680 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1681 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1682 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1683 "next_req = %p, prev_req = %p, dma_map = %p\n",
1684 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1685 req->data, req->length, req->real_data, req->real_length,
1686 req->state, req->flags, req->error_code, req->orig_req,
1687 req->link.next, req->link.prev, req->dma_map);
1688 tw_cl_print_req_info(&(req->req_handle));
1689 }
1690
1691
1692
1693 /*
1694 * Function name: twa_reset_stats
1695 * Description: For being called from ddb.
1696 * Resets some OSL controller stats.
1697 *
1698 * Input: None
1699 * Output: None
1700 * Return value: None
1701 */
1702 TW_VOID
1703 twa_reset_stats(TW_VOID)
1704 {
1705 struct twa_softc *sc;
1706 TW_INT32 i;
1707
1708 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1709 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1710 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1711 tw_cl_reset_stats(&sc->ctlr_handle);
1712 }
1713 }
1714
1715 #endif /* TW_OSL_DEBUG */
Cache object: 2f1fb54eb758626ac4c6679cb446db72
|