FreeBSD/Linux Kernel Cross Reference
sys/dev/twa/tw_cl_io.c
1 /*
2 * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/6.0/sys/dev/twa/tw_cl_io.c 144966 2005-04-12 22:07:11Z vkashyap $
28 */
29
30 /*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 */
35
36
37 /*
38 * Common Layer I/O functions.
39 */
40
41
42 #include "tw_osl_share.h"
43 #include "tw_cl_share.h"
44 #include "tw_cl_fwif.h"
45 #include "tw_cl_ioctl.h"
46 #include "tw_cl.h"
47 #include "tw_cl_externs.h"
48 #include "tw_osl_ioctl.h"
49
50
51
52 /*
53 * Function name: tw_cl_start_io
54 * Description: Interface to OS Layer for accepting SCSI requests.
55 *
56 * Input: ctlr_handle -- controller handle
57 * req_pkt -- OSL built request packet
58 * req_handle -- request handle
59 * Output: None
60 * Return value: 0 -- success
61 * non-zero-- failure
62 */
63 TW_INT32
64 tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
65 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
66 {
67 struct tw_cli_ctlr_context *ctlr;
68 struct tw_cli_req_context *req;
69 struct tw_cl_command_9k *cmd;
70 struct tw_cl_scsi_req_packet *scsi_req;
71 TW_INT32 error;
72
73 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
74
75 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
76
77 if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
78 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
79 "I/O during reset: returning busy. Ctlr state = 0x%x",
80 ctlr->state);
81 return(TW_OSL_EBUSY);
82 }
83
84 /*
85 * If working with a firmware version that does not support multiple
86 * luns, and this request is directed at a non-zero lun, error it
87 * back right away.
88 */
89 if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
90 (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
91 req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
92 TW_CL_ERR_REQ_SCSI_ERROR);
93 req_pkt->tw_osl_callback(req_handle);
94 return(TW_CL_ERR_REQ_SUCCESS);
95 }
96
97 if ((req = tw_cli_get_request(ctlr
98 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
99 , req_pkt
100 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
101 )) == TW_CL_NULL) {
102 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
103 "Out of request context packets: returning busy");
104 return(TW_OSL_EBUSY);
105 }
106
107 req_handle->cl_req_ctxt = req;
108
109 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
110
111 req->cmd_pkt = req_pkt->dma_mem;
112 req->cmd_pkt_phys = req_pkt->dma_mem_phys;
113 tw_osl_memzero(req->cmd_pkt,
114 sizeof(struct tw_cl_command_header) +
115 28 /* max bytes before sglist */);
116
117 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
118
119 req->req_handle = req_handle;
120 req->orig_req = req_pkt;
121 req->tw_cli_callback = tw_cli_complete_io;
122
123 req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
124 req->flags |= TW_CLI_REQ_FLAGS_9K;
125
126 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
127
128 /* Build the cmd pkt. */
129 cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
130
131 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
132
133 cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
134 cmd->unit = (TW_UINT8)(scsi_req->unit);
135 cmd->lun_l4__req_id = TW_CL_SWAP16(
136 BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
137 cmd->status = 0;
138 cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
139 tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
140
141 if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
142 TW_UINT32 num_sgl_entries;
143
144 req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
145 &num_sgl_entries);
146 cmd->lun_h4__sgl_entries =
147 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
148 num_sgl_entries));
149 } else {
150 cmd->lun_h4__sgl_entries =
151 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
152 scsi_req->sgl_entries));
153 tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
154 cmd->sg_list, scsi_req->sgl_entries);
155 }
156
157 if ((error = tw_cli_submit_cmd(req))) {
158 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
159 "Could not start request. request = %p, error = %d",
160 req, error);
161 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
162 }
163 return(error);
164 }
165
166
167
168 /*
169 * Function name: tw_cli_submit_cmd
170 * Description: Submits a cmd to firmware.
171 *
172 * Input: req -- ptr to CL internal request context
173 * Output: None
174 * Return value: 0 -- success
175 * non-zero-- failure
176 */
177 TW_INT32
178 tw_cli_submit_cmd(struct tw_cli_req_context *req)
179 {
180 struct tw_cli_ctlr_context *ctlr = req->ctlr;
181 TW_UINT32 status_reg;
182 TW_INT32 error;
183
184 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
185
186 /* Serialize access to the controller cmd queue. */
187 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->io_lock);
188
189 /* Check to see if we can post a command. */
190 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle);
191 if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
192 goto out;
193
194 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
195 struct tw_cl_req_packet *req_pkt =
196 (struct tw_cl_req_packet *)(req->orig_req);
197
198 tw_cli_dbg_printf(7, ctlr->ctlr_handle, tw_osl_cur_func(),
199 "Cmd queue full");
200
201 if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
202 #ifndef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
203 || ((req_pkt) &&
204 (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
205 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
206 ) {
207 if (req->state != TW_CLI_REQ_STATE_PENDING) {
208 tw_cli_dbg_printf(2, ctlr->ctlr_handle,
209 tw_osl_cur_func(),
210 "pending internal/ioctl request");
211 req->state = TW_CLI_REQ_STATE_PENDING;
212 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
213 error = 0;
214 } else
215 error = TW_OSL_EBUSY;
216 } else
217 error = TW_OSL_EBUSY;
218
219 /* Unmask command interrupt. */
220 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
221 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
222 } else {
223 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
224 "Submitting command");
225
226 /*
227 * The controller cmd queue is not full. Mark the request as
228 * currently being processed by the firmware, and move it into
229 * the busy queue. Then submit the cmd.
230 */
231 req->state = TW_CLI_REQ_STATE_BUSY;
232 tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
233 TW_CLI_WRITE_COMMAND_QUEUE(ctlr->ctlr_handle,
234 req->cmd_pkt_phys +
235 sizeof(struct tw_cl_command_header));
236 }
237
238 out:
239 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->io_lock);
240 return(error);
241 }
242
243
244
245 /*
246 * Function name: tw_cl_fw_passthru
247 * Description: Interface to OS Layer for accepting firmware
248 * passthru requests.
249 * Input: ctlr_handle -- controller handle
250 * req_pkt -- OSL built request packet
251 * req_handle -- request handle
252 * Output: None
253 * Return value: 0 -- success
254 * non-zero-- failure
255 */
256 TW_INT32
257 tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
258 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
259 {
260 struct tw_cli_ctlr_context *ctlr;
261 struct tw_cli_req_context *req;
262 union tw_cl_command_7k *cmd_7k;
263 struct tw_cl_command_9k *cmd_9k;
264 struct tw_cl_passthru_req_packet *pt_req;
265 TW_UINT8 opcode;
266 TW_UINT8 sgl_offset;
267 TW_VOID *sgl = TW_CL_NULL;
268 TW_INT32 error;
269
270 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
271
272 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
273
274 if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
275 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
276 "Passthru request during reset: returning busy. "
277 "Ctlr state = 0x%x",
278 ctlr->state);
279 return(TW_OSL_EBUSY);
280 }
281
282 if ((req = tw_cli_get_request(ctlr
283 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
284 , req_pkt
285 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
286 )) == TW_CL_NULL) {
287 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
288 "Out of request context packets: returning busy");
289 return(TW_OSL_EBUSY);
290 }
291
292 req_handle->cl_req_ctxt = req;
293
294 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
295
296 req->cmd_pkt = req_pkt->dma_mem;
297 req->cmd_pkt_phys = req_pkt->dma_mem_phys;
298 tw_osl_memzero(req->cmd_pkt,
299 sizeof(struct tw_cl_command_header) +
300 28 /* max bytes before sglist */);
301
302 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
303
304 req->req_handle = req_handle;
305 req->orig_req = req_pkt;
306 req->tw_cli_callback = tw_cli_complete_io;
307
308 req->flags |= (TW_CLI_REQ_FLAGS_EXTERNAL | TW_CLI_REQ_FLAGS_PASSTHRU);
309
310 pt_req = &(req_pkt->gen_req_pkt.pt_req);
311
312 tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
313 pt_req->cmd_pkt_length);
314 /* Build the cmd pkt. */
315 if ((opcode = GET_OPCODE(((TW_UINT8 *)
316 (pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
317 == TWA_FW_CMD_EXECUTE_SCSI) {
318 TW_UINT16 lun_l4, lun_h4;
319
320 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
321 "passthru: 9k cmd pkt");
322 req->flags |= TW_CLI_REQ_FLAGS_9K;
323 cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
324 lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
325 lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
326 cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
327 BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
328 if (pt_req->sgl_entries) {
329 cmd_9k->lun_h4__sgl_entries =
330 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
331 pt_req->sgl_entries));
332 sgl = (TW_VOID *)(cmd_9k->sg_list);
333 }
334 } else {
335 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
336 "passthru: 7k cmd pkt");
337 cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
338 cmd_7k->generic.request_id =
339 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
340 if ((sgl_offset =
341 GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
342 sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
343 cmd_7k->generic.size += pt_req->sgl_entries *
344 ((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
345 }
346 }
347
348 if (sgl)
349 tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
350 sgl, pt_req->sgl_entries);
351
352 if ((error = tw_cli_submit_cmd(req))) {
353 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
354 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
355 0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
356 "Failed to start passthru command",
357 "error = %d", error);
358 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
359 }
360 return(error);
361 }
362
363
364
365 /*
366 * Function name: tw_cl_ioctl
367 * Description: Handler of CL supported ioctl cmds.
368 *
369 * Input: ctlr -- ptr to per ctlr structure
370 * cmd -- ioctl cmd
371 * buf -- ptr to buffer in kernel memory, which is
372 * a copy of the input buffer in user-space
373 * Output: buf -- ptr to buffer in kernel memory, which will
374 * need to be copied to the output buffer in
375 * user-space
376 * Return value: 0 -- success
377 * non-zero-- failure
378 */
379 TW_INT32
380 tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, TW_INT32 cmd, TW_VOID *buf)
381 {
382 struct tw_cli_ctlr_context *ctlr =
383 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
384 struct tw_cl_ioctl_packet *user_buf =
385 (struct tw_cl_ioctl_packet *)buf;
386 struct tw_cl_event_packet event_buf;
387 TW_INT32 event_index;
388 TW_INT32 start_index;
389 TW_INT32 error = TW_OSL_ESUCCESS;
390
391 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
392
393 /* Serialize access to the AEN queue and the ioctl lock. */
394 tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
395
396 switch (cmd) {
397 case TW_CL_IOCTL_GET_FIRST_EVENT:
398 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
399 "Get First Event");
400
401 if (ctlr->aen_q_wrapped) {
402 if (ctlr->aen_q_overflow) {
403 /*
404 * The aen queue has wrapped, even before some
405 * events have been retrieved. Let the caller
406 * know that he missed out on some AEN's.
407 */
408 user_buf->driver_pkt.status =
409 TW_CL_ERROR_AEN_OVERFLOW;
410 ctlr->aen_q_overflow = TW_CL_FALSE;
411 } else
412 user_buf->driver_pkt.status = 0;
413 event_index = ctlr->aen_head;
414 } else {
415 if (ctlr->aen_head == ctlr->aen_tail) {
416 user_buf->driver_pkt.status =
417 TW_CL_ERROR_AEN_NO_EVENTS;
418 break;
419 }
420 user_buf->driver_pkt.status = 0;
421 event_index = ctlr->aen_tail; /* = 0 */
422 }
423 tw_osl_memcpy(user_buf->data_buf,
424 &(ctlr->aen_queue[event_index]),
425 sizeof(struct tw_cl_event_packet));
426
427 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
428
429 break;
430
431
432 case TW_CL_IOCTL_GET_LAST_EVENT:
433 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
434 "Get Last Event");
435
436 if (ctlr->aen_q_wrapped) {
437 if (ctlr->aen_q_overflow) {
438 /*
439 * The aen queue has wrapped, even before some
440 * events have been retrieved. Let the caller
441 * know that he missed out on some AEN's.
442 */
443 user_buf->driver_pkt.status =
444 TW_CL_ERROR_AEN_OVERFLOW;
445 ctlr->aen_q_overflow = TW_CL_FALSE;
446 } else
447 user_buf->driver_pkt.status = 0;
448 } else {
449 if (ctlr->aen_head == ctlr->aen_tail) {
450 user_buf->driver_pkt.status =
451 TW_CL_ERROR_AEN_NO_EVENTS;
452 break;
453 }
454 user_buf->driver_pkt.status = 0;
455 }
456 event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
457 ctlr->max_aens_supported;
458
459 tw_osl_memcpy(user_buf->data_buf,
460 &(ctlr->aen_queue[event_index]),
461 sizeof(struct tw_cl_event_packet));
462
463 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
464
465 break;
466
467
468 case TW_CL_IOCTL_GET_NEXT_EVENT:
469 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
470 "Get Next Event");
471
472 user_buf->driver_pkt.status = 0;
473 if (ctlr->aen_q_wrapped) {
474 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
475 "Get Next Event: wrapped");
476 if (ctlr->aen_q_overflow) {
477 /*
478 * The aen queue has wrapped, even before some
479 * events have been retrieved. Let the caller
480 * know that he missed out on some AEN's.
481 */
482 tw_cli_dbg_printf(2, ctlr_handle,
483 tw_osl_cur_func(),
484 "Get Next Event: overflow");
485 user_buf->driver_pkt.status =
486 TW_CL_ERROR_AEN_OVERFLOW;
487 ctlr->aen_q_overflow = TW_CL_FALSE;
488 }
489 start_index = ctlr->aen_head;
490 } else {
491 if (ctlr->aen_head == ctlr->aen_tail) {
492 tw_cli_dbg_printf(3, ctlr_handle,
493 tw_osl_cur_func(),
494 "Get Next Event: empty queue");
495 user_buf->driver_pkt.status =
496 TW_CL_ERROR_AEN_NO_EVENTS;
497 break;
498 }
499 start_index = ctlr->aen_tail; /* = 0 */
500 }
501 tw_osl_memcpy(&event_buf, user_buf->data_buf,
502 sizeof(struct tw_cl_event_packet));
503
504 event_index = (start_index + event_buf.sequence_id -
505 ctlr->aen_queue[start_index].sequence_id + 1) %
506 ctlr->max_aens_supported;
507
508 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
509 "Get Next Event: si = %x, ei = %x, ebsi = %x, "
510 "sisi = %x, eisi = %x",
511 start_index, event_index, event_buf.sequence_id,
512 ctlr->aen_queue[start_index].sequence_id,
513 ctlr->aen_queue[event_index].sequence_id);
514
515 if (! (ctlr->aen_queue[event_index].sequence_id >
516 event_buf.sequence_id)) {
517 /*
518 * We don't have any event matching the criterion. So,
519 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
520 * encountered an overflow condition above, we cannot
521 * report both conditions during this call. We choose
522 * to report NO_EVENTS this time, and an overflow the
523 * next time we are called.
524 */
525 if (user_buf->driver_pkt.status ==
526 TW_CL_ERROR_AEN_OVERFLOW) {
527 /*
528 * Make a note so we report the overflow
529 * next time.
530 */
531 ctlr->aen_q_overflow = TW_CL_TRUE;
532 }
533 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
534 break;
535 }
536 /* Copy the event -- even if there has been an overflow. */
537 tw_osl_memcpy(user_buf->data_buf,
538 &(ctlr->aen_queue[event_index]),
539 sizeof(struct tw_cl_event_packet));
540
541 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
542
543 break;
544
545
546 case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
547 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
548 "Get Previous Event");
549
550 user_buf->driver_pkt.status = 0;
551 if (ctlr->aen_q_wrapped) {
552 if (ctlr->aen_q_overflow) {
553 /*
554 * The aen queue has wrapped, even before some
555 * events have been retrieved. Let the caller
556 * know that he missed out on some AEN's.
557 */
558 user_buf->driver_pkt.status =
559 TW_CL_ERROR_AEN_OVERFLOW;
560 ctlr->aen_q_overflow = TW_CL_FALSE;
561 }
562 start_index = ctlr->aen_head;
563 } else {
564 if (ctlr->aen_head == ctlr->aen_tail) {
565 user_buf->driver_pkt.status =
566 TW_CL_ERROR_AEN_NO_EVENTS;
567 break;
568 }
569 start_index = ctlr->aen_tail; /* = 0 */
570 }
571 tw_osl_memcpy(&event_buf, user_buf->data_buf,
572 sizeof(struct tw_cl_event_packet));
573
574 event_index = (start_index + event_buf.sequence_id -
575 ctlr->aen_queue[start_index].sequence_id - 1) %
576 ctlr->max_aens_supported;
577
578 if (! (ctlr->aen_queue[event_index].sequence_id <
579 event_buf.sequence_id)) {
580 /*
581 * We don't have any event matching the criterion. So,
582 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
583 * encountered an overflow condition above, we cannot
584 * report both conditions during this call. We choose
585 * to report NO_EVENTS this time, and an overflow the
586 * next time we are called.
587 */
588 if (user_buf->driver_pkt.status ==
589 TW_CL_ERROR_AEN_OVERFLOW) {
590 /*
591 * Make a note so we report the overflow
592 * next time.
593 */
594 ctlr->aen_q_overflow = TW_CL_TRUE;
595 }
596 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
597 break;
598 }
599 /* Copy the event -- even if there has been an overflow. */
600 tw_osl_memcpy(user_buf->data_buf,
601 &(ctlr->aen_queue[event_index]),
602 sizeof(struct tw_cl_event_packet));
603
604 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
605
606 break;
607
608
609 case TW_CL_IOCTL_GET_LOCK:
610 {
611 struct tw_cl_lock_packet lock_pkt;
612 TW_TIME cur_time;
613
614 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
615 "Get ioctl lock");
616
617 cur_time = tw_osl_get_local_time();
618 tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
619 sizeof(struct tw_cl_lock_packet));
620
621 if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
622 (lock_pkt.force_flag) ||
623 (cur_time >= ctlr->ioctl_lock.timeout)) {
624 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
625 "GET_LOCK: Getting lock!");
626 ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
627 ctlr->ioctl_lock.timeout =
628 cur_time + (lock_pkt.timeout_msec / 1000);
629 lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
630 user_buf->driver_pkt.status = 0;
631 } else {
632 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
633 "GET_LOCK: Lock already held!");
634 lock_pkt.time_remaining_msec = (TW_UINT32)(
635 (ctlr->ioctl_lock.timeout - cur_time) * 1000);
636 user_buf->driver_pkt.status =
637 TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
638 }
639 tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
640 sizeof(struct tw_cl_lock_packet));
641 break;
642 }
643
644
645 case TW_CL_IOCTL_RELEASE_LOCK:
646 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
647 "Release ioctl lock");
648
649 if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
650 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
651 "twa_ioctl: RELEASE_LOCK: Lock not held!");
652 user_buf->driver_pkt.status =
653 TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
654 } else {
655 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
656 "RELEASE_LOCK: Releasing lock!");
657 ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
658 user_buf->driver_pkt.status = 0;
659 }
660 break;
661
662
663 case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
664 {
665 struct tw_cl_compatibility_packet comp_pkt;
666
667 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
668 "Get compatibility info");
669
670 tw_osl_memcpy(comp_pkt.driver_version,
671 TW_OSL_DRIVER_VERSION_STRING,
672 sizeof(TW_OSL_DRIVER_VERSION_STRING));
673 comp_pkt.working_srl = ctlr->working_srl;
674 comp_pkt.working_branch = ctlr->working_branch;
675 comp_pkt.working_build = ctlr->working_build;
676 user_buf->driver_pkt.status = 0;
677
678 /* Copy compatibility information to user space. */
679 tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
680 (sizeof(struct tw_cl_compatibility_packet) <
681 user_buf->driver_pkt.buffer_length) ?
682 sizeof(struct tw_cl_compatibility_packet) :
683 user_buf->driver_pkt.buffer_length);
684 break;
685 }
686
687 default:
688 /* Unknown opcode. */
689 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
690 "Unknown ioctl cmd 0x%x", cmd);
691 error = TW_OSL_ENOTTY;
692 }
693
694 tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
695 return(error);
696 }
697
698
699
700 /*
701 * Function name: tw_cli_get_param
702 * Description: Get a firmware parameter.
703 *
704 * Input: ctlr -- ptr to per ctlr structure
705 * table_id -- parameter table #
706 * param_id -- index of the parameter in the table
707 * param_size -- size of the parameter in bytes
708 * callback -- ptr to function, if any, to be called
709 * back on completion; TW_CL_NULL if no callback.
710 * Output: param_data -- param value
711 * Return value: 0 -- success
712 * non-zero-- failure
713 */
714 TW_INT32
715 tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
716 TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
717 TW_VOID (* callback)(struct tw_cli_req_context *req))
718 {
719 struct tw_cli_req_context *req;
720 union tw_cl_command_7k *cmd;
721 struct tw_cl_param_9k *param = TW_CL_NULL;
722 TW_INT32 error = TW_OSL_EBUSY;
723
724 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
725
726 /* Get a request packet. */
727 if ((req = tw_cli_get_request(ctlr
728 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
729 , TW_CL_NULL
730 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
731 )) == TW_CL_NULL)
732 goto out;
733
734 /* Make sure this is the only CL internal request at this time. */
735 if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
736 error = TW_OSL_EBUSY;
737 goto out;
738 }
739 ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
740
741 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
742
743 req->cmd_pkt = ctlr->cmd_pkt_buf;
744 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
745 tw_osl_memzero(req->cmd_pkt,
746 sizeof(struct tw_cl_command_header) +
747 28 /* max bytes before sglist */);
748
749 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
750
751 req->data = ctlr->internal_req_data;
752 req->data_phys = ctlr->internal_req_data_phys;
753 req->length = TW_CLI_SECTOR_SIZE;
754 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
755
756 /* Initialize memory to read data into. */
757 param = (struct tw_cl_param_9k *)(req->data);
758 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
759
760 /* Build the cmd pkt. */
761 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
762
763 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
764
765 cmd->param.sgl_off__opcode =
766 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
767 cmd->param.request_id =
768 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
769 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
770 cmd->param.param_count = TW_CL_SWAP16(1);
771
772 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
773 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
774 TW_CL_SWAP64(req->data_phys);
775 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
776 TW_CL_SWAP32(req->length);
777 cmd->param.size = 2 + 3;
778 } else {
779 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
780 TW_CL_SWAP32(req->data_phys);
781 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
782 TW_CL_SWAP32(req->length);
783 cmd->param.size = 2 + 2;
784 }
785
786 /* Specify which parameter we need. */
787 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
788 param->parameter_id = (TW_UINT8)(param_id);
789 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
790
791 /* Submit the command. */
792 if (callback == TW_CL_NULL) {
793 /* There's no call back; wait till the command completes. */
794 error = tw_cli_submit_and_poll_request(req,
795 TW_CLI_REQUEST_TIMEOUT_PERIOD);
796 if (error == TW_OSL_ETIMEDOUT)
797 /* Clean-up done by tw_cli_submit_and_poll_request. */
798 return(error);
799 if (error)
800 goto out;
801 if ((error = cmd->param.status)) {
802 tw_cli_create_ctlr_event(ctlr,
803 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
804 &(req->cmd_pkt->cmd_hdr));
805 goto out;
806 }
807 tw_osl_memcpy(param_data, param->data, param_size);
808 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
809 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
810 } else {
811 /* There's a call back. Simply submit the command. */
812 req->tw_cli_callback = callback;
813 if ((error = tw_cli_submit_cmd(req)))
814 goto out;
815 }
816 return(0);
817
818 out:
819 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
820 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
821 0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
822 "get_param failed",
823 "error = %d", error);
824 if (param)
825 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
826 if (req)
827 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
828 return(1);
829 }
830
831
832
833 /*
834 * Function name: tw_cli_set_param
835 * Description: Set a firmware parameter.
836 *
837 * Input: ctlr -- ptr to per ctlr structure
838 * table_id -- parameter table #
839 * param_id -- index of the parameter in the table
840 * param_size -- size of the parameter in bytes
841 * callback -- ptr to function, if any, to be called
842 * back on completion; TW_CL_NULL if no callback.
843 * Output: None
844 * Return value: 0 -- success
845 * non-zero-- failure
846 */
847 TW_INT32
848 tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
849 TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
850 TW_VOID (* callback)(struct tw_cli_req_context *req))
851 {
852 struct tw_cli_req_context *req;
853 union tw_cl_command_7k *cmd;
854 struct tw_cl_param_9k *param = TW_CL_NULL;
855 TW_INT32 error = TW_OSL_EBUSY;
856
857 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
858
859 /* Get a request packet. */
860 if ((req = tw_cli_get_request(ctlr
861 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
862 , TW_CL_NULL
863 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
864 )) == TW_CL_NULL)
865 goto out;
866
867 /* Make sure this is the only CL internal request at this time. */
868 if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
869 error = TW_OSL_EBUSY;
870 goto out;
871 }
872 ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
873
874 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
875
876 req->cmd_pkt = ctlr->cmd_pkt_buf;
877 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
878 tw_osl_memzero(req->cmd_pkt,
879 sizeof(struct tw_cl_command_header) +
880 28 /* max bytes before sglist */);
881
882 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
883
884 req->data = ctlr->internal_req_data;
885 req->data_phys = ctlr->internal_req_data_phys;
886 req->length = TW_CLI_SECTOR_SIZE;
887 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
888
889 /* Initialize memory to send data using. */
890 param = (struct tw_cl_param_9k *)(req->data);
891 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
892
893 /* Build the cmd pkt. */
894 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
895
896 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
897
898 cmd->param.sgl_off__opcode =
899 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
900 cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
901 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
902 cmd->param.param_count = TW_CL_SWAP16(1);
903
904 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
905 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
906 TW_CL_SWAP64(req->data_phys);
907 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
908 TW_CL_SWAP32(req->length);
909 cmd->param.size = 2 + 3;
910 } else {
911 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
912 TW_CL_SWAP32(req->data_phys);
913 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
914 TW_CL_SWAP32(req->length);
915 cmd->param.size = 2 + 2;
916 }
917
918 /* Specify which parameter we want to set. */
919 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
920 param->parameter_id = (TW_UINT8)(param_id);
921 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
922 tw_osl_memcpy(param->data, data, param_size);
923
924 /* Submit the command. */
925 if (callback == TW_CL_NULL) {
926 /* There's no call back; wait till the command completes. */
927 error = tw_cli_submit_and_poll_request(req,
928 TW_CLI_REQUEST_TIMEOUT_PERIOD);
929 if (error == TW_OSL_ETIMEDOUT)
930 /* Clean-up done by tw_cli_submit_and_poll_request. */
931 return(error);
932 if (error)
933 goto out;
934 if ((error = cmd->param.status)) {
935 tw_cli_create_ctlr_event(ctlr,
936 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
937 &(req->cmd_pkt->cmd_hdr));
938 goto out;
939 }
940 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
941 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
942 } else {
943 /* There's a call back. Simply submit the command. */
944 req->tw_cli_callback = callback;
945 if ((error = tw_cli_submit_cmd(req)))
946 goto out;
947 }
948 return(error);
949
950 out:
951 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
952 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
953 0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
954 "set_param failed",
955 "error = %d", error);
956 if (param)
957 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
958 if (req)
959 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
960 return(error);
961 }
962
963
964
965 /*
966 * Function name: tw_cli_submit_and_poll_request
967 * Description: Sends down a firmware cmd, and waits for the completion
968 * in a tight loop.
969 *
970 * Input: req -- ptr to request pkt
971 * timeout -- max # of seconds to wait before giving up
972 * Output: None
973 * Return value: 0 -- success
974 * non-zero-- failure
975 */
976 TW_INT32
977 tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
978 TW_UINT32 timeout)
979 {
980 struct tw_cli_ctlr_context *ctlr = req->ctlr;
981 TW_TIME end_time;
982 TW_INT32 error;
983
984 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
985
986 /*
987 * If the cmd queue is full, tw_cli_submit_cmd will queue this
988 * request in the pending queue, since this is an internal request.
989 */
990 if ((error = tw_cli_submit_cmd(req))) {
991 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
992 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
993 0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
994 "Failed to start internal request",
995 "error = %d", error);
996 return(error);
997 }
998
999 /*
1000 * Poll for the response until the command gets completed, or there's
1001 * a timeout.
1002 */
1003 end_time = tw_osl_get_local_time() + timeout;
1004 do {
1005 if ((error = req->error_code))
1006 /*
1007 * This will take care of completion due to a reset,
1008 * or a failure in tw_cli_submit_pending_queue.
1009 * The caller should do the clean-up.
1010 */
1011 return(error);
1012
1013 /* See if the command completed. */
1014 tw_cli_process_resp_intr(ctlr);
1015
1016 if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
1017 (req->state != TW_CLI_REQ_STATE_PENDING))
1018 return(req->state != TW_CLI_REQ_STATE_COMPLETE);
1019 } while (tw_osl_get_local_time() <= end_time);
1020
1021 /* Time out! */
1022 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1023 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1024 0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1025 "Internal request timed out",
1026 "request = %p", req);
1027
1028 /*
1029 * We will reset the controller only if the request has already been
1030 * submitted, so as to not lose the request packet. If a busy request
1031 * timed out, the reset will take care of freeing resources. If a
1032 * pending request timed out, we will free resources for that request,
1033 * right here, thereby avoiding a reset. So, the caller is expected
1034 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1035 */
1036
1037 /*
1038 * We have to make sure that this timed out request, if it were in the
1039 * pending queue, doesn't get submitted while we are here, from
1040 * tw_cli_submit_pending_queue. There could be a race in that case.
1041 * Need to revisit.
1042 */
1043 if (req->state != TW_CLI_REQ_STATE_PENDING)
1044 tw_cl_reset_ctlr(ctlr->ctlr_handle);
1045 else {
1046 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1047 "Removing request from pending queue");
1048 /*
1049 * Request was never submitted. Clean up. Note that we did
1050 * not do a reset. So, we have to remove the request ourselves
1051 * from the pending queue (as against tw_cli_drain_pendinq_queue
1052 * taking care of it).
1053 */
1054 tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1055 if (req->data)
1056 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1057 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1058 }
1059
1060 return(TW_OSL_ETIMEDOUT);
1061 }
1062
1063
1064
1065 /*
1066 * Function name: tw_cl_reset_ctlr
1067 * Description: Soft resets and then initializes the controller;
1068 * drains any incomplete requests.
1069 *
1070 * Input: ctlr -- ptr to per ctlr structure
1071 * Output: None
1072 * Return value: 0 -- success
1073 * non-zero-- failure
1074 */
1075 TW_INT32
1076 tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1077 {
1078 struct tw_cli_ctlr_context *ctlr =
1079 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1080 TW_INT32 reset_attempt = 1;
1081 TW_INT32 error;
1082
1083 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1084
1085 ctlr->state |= TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
1086
1087 /*
1088 * Error back all requests in the complete, busy, and pending queues.
1089 * If any request is already on its way to getting submitted, it's in
1090 * none of these queues and so, will not be completed. That request
1091 * will continue its course and get submitted to the controller after
1092 * the reset is done (and io_lock is released).
1093 */
1094 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
1095 "Draining all queues following reset");
1096 tw_cli_drain_complete_queue(ctlr);
1097 tw_cli_drain_busy_queue(ctlr);
1098 tw_cli_drain_pending_queue(ctlr);
1099
1100 tw_cli_disable_interrupts(ctlr);
1101
1102 /* Soft reset the controller. */
1103 try_reset:
1104 if ((error = tw_cli_soft_reset(ctlr))) {
1105 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1106 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1107 0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1108 "Controller reset failed",
1109 "error = %d; attempt %d", error, reset_attempt++);
1110 if (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS)
1111 goto try_reset;
1112 else
1113 goto out;
1114 }
1115
1116 /* Re-establish logical connection with the controller. */
1117 if ((error = tw_cli_init_connection(ctlr,
1118 (TW_UINT16)(ctlr->max_simult_reqs),
1119 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1120 TW_CL_NULL, TW_CL_NULL))) {
1121 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1122 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1123 0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1124 "Can't initialize connection after reset",
1125 "error = %d", error);
1126 goto out;
1127 }
1128
1129 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1130 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1131 0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1132 "Controller reset done!",
1133 " ");
1134
1135 out:
1136 ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
1137 /*
1138 * Enable interrupts, and also clear attention and response interrupts.
1139 */
1140 tw_cli_enable_interrupts(ctlr);
1141
1142 /* Request for a bus re-scan. */
1143 if (!error)
1144 tw_osl_scan_bus(ctlr_handle);
1145 return(error);
1146 }
1147
1148
1149
1150 /*
1151 * Function name: tw_cli_soft_reset
1152 * Description: Does the actual soft reset.
1153 *
1154 * Input: ctlr -- ptr to per ctlr structure
1155 * Output: None
1156 * Return value: 0 -- success
1157 * non-zero-- failure
1158 */
1159 TW_INT32
1160 tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1161 {
1162 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
1163 TW_UINT32 status_reg;
1164 TW_UINT32 error;
1165
1166 tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1167
1168 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1169 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1170 0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1171 "Resetting controller...",
1172 " ");
1173
1174 /* Don't let any new commands get submitted to the controller. */
1175 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1176
1177 TW_CLI_SOFT_RESET(ctlr_handle);
1178
1179 if ((error = tw_cli_poll_status(ctlr,
1180 TWA_STATUS_MICROCONTROLLER_READY |
1181 TWA_STATUS_ATTENTION_INTERRUPT,
1182 TW_CLI_RESET_TIMEOUT_PERIOD))) {
1183 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1184 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1185 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1186 "Micro-ctlr not ready/No attn intr after reset",
1187 "error = %d", error);
1188 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1189 return(error);
1190 }
1191
1192 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1193 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1194
1195 if ((error = tw_cli_drain_response_queue(ctlr))) {
1196 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1197 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1198 0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1199 "Can't drain response queue after reset",
1200 "error = %d", error);
1201 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1202 return(error);
1203 }
1204
1205 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1206
1207 if ((error = tw_cli_drain_aen_queue(ctlr))) {
1208 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1209 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1210 0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1211 "Can't drain AEN queue after reset",
1212 "error = %d", error);
1213 return(error);
1214 }
1215
1216 if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1217 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1218 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1219 0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1220 "Reset not reported by controller",
1221 "error = %d", error);
1222 return(error);
1223 }
1224
1225 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
1226
1227 if ((error = TW_CLI_STATUS_ERRORS(status_reg)) ||
1228 (error = tw_cli_check_ctlr_state(ctlr, status_reg))) {
1229 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1230 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1231 0x110D, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1232 "Controller errors detected after reset",
1233 "error = %d", error);
1234 return(error);
1235 }
1236
1237 return(TW_OSL_ESUCCESS);
1238 }
1239
1240
1241
1242 /*
1243 * Function name: tw_cli_send_scsi_cmd
1244 * Description: Sends down a scsi cmd to fw.
1245 *
1246 * Input: req -- ptr to request pkt
1247 * cmd -- opcode of scsi cmd to send
1248 * Output: None
1249 * Return value: 0 -- success
1250 * non-zero-- failure
1251 */
1252 TW_INT32
1253 tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1254 {
1255 struct tw_cl_command_packet *cmdpkt;
1256 struct tw_cl_command_9k *cmd9k;
1257 struct tw_cli_ctlr_context *ctlr;
1258 TW_INT32 error;
1259
1260 ctlr = req->ctlr;
1261 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1262
1263 /* Make sure this is the only CL internal request at this time. */
1264 if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY)
1265 return(TW_OSL_EBUSY);
1266 ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1267 req->data = ctlr->internal_req_data;
1268 req->data_phys = ctlr->internal_req_data_phys;
1269 tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1270 req->length = TW_CLI_SECTOR_SIZE;
1271
1272 /* Build the cmd pkt. */
1273 cmdpkt = req->cmd_pkt;
1274
1275 cmdpkt->cmd_hdr.header_desc.size_header = 128;
1276
1277 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1278
1279 cmd9k->res__opcode =
1280 BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1281 cmd9k->unit = 0;
1282 cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1283 cmd9k->status = 0;
1284 cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1285 cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1286
1287 if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1288 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1289 TW_CL_SWAP64(req->data_phys);
1290 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1291 TW_CL_SWAP32(req->length);
1292 } else {
1293 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1294 TW_CL_SWAP32(req->data_phys);
1295 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1296 TW_CL_SWAP32(req->length);
1297 }
1298
1299 cmd9k->cdb[0] = (TW_UINT8)cmd;
1300 cmd9k->cdb[4] = 128;
1301
1302 if ((error = tw_cli_submit_cmd(req)))
1303 if (error != TW_OSL_EBUSY) {
1304 tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1305 tw_osl_cur_func(),
1306 "Failed to start SCSI command",
1307 "request = %p, error = %d", req, error);
1308 return(TW_OSL_EIO);
1309 }
1310 return(TW_OSL_ESUCCESS);
1311 }
1312
1313
1314
1315 /*
1316 * Function name: tw_cli_get_aen
1317 * Description: Sends down a Request Sense cmd to fw to fetch an AEN.
1318 *
1319 * Input: ctlr -- ptr to per ctlr structure
1320 * Output: None
1321 * Return value: 0 -- success
1322 * non-zero-- failure
1323 */
1324 TW_INT32
1325 tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1326 {
1327 struct tw_cli_req_context *req;
1328 TW_INT32 error;
1329
1330 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1331
1332 if ((req = tw_cli_get_request(ctlr
1333 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
1334 , TW_CL_NULL
1335 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
1336 )) == TW_CL_NULL)
1337 return(TW_OSL_EBUSY);
1338
1339 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
1340
1341 req->cmd_pkt = ctlr->cmd_pkt_buf;
1342 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
1343 tw_osl_memzero(req->cmd_pkt,
1344 sizeof(struct tw_cl_command_header) +
1345 28 /* max bytes before sglist */);
1346
1347 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
1348
1349 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1350 req->flags |= TW_CLI_REQ_FLAGS_9K;
1351 req->tw_cli_callback = tw_cli_aen_callback;
1352 if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1353 tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1354 "Could not send SCSI command",
1355 "request = %p, error = %d", req, error);
1356 if (req->data)
1357 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1358 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1359 }
1360 return(error);
1361 }
1362
1363
1364
1365 /*
1366 * Function name: tw_cli_fill_sg_list
1367 * Description: Fills in the scatter/gather list.
1368 *
1369 * Input: ctlr -- ptr to per ctlr structure
1370 * sgl_src -- ptr to fill the sg list from
1371 * sgl_dest-- ptr to sg list
1372 * nsegments--# of segments
1373 * Output: None
1374 * Return value: None
1375 */
1376 TW_VOID
1377 tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1378 TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1379 {
1380 TW_INT32 i;
1381
1382 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1383
1384 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1385 struct tw_cl_sg_desc64 *sgl_s =
1386 (struct tw_cl_sg_desc64 *)sgl_src;
1387 struct tw_cl_sg_desc64 *sgl_d =
1388 (struct tw_cl_sg_desc64 *)sgl_dest;
1389
1390 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1391 "64 bit addresses");
1392 for (i = 0; i < num_sgl_entries; i++) {
1393 sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1394 sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1395 sgl_s++;
1396 if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1397 sgl_s = (struct tw_cl_sg_desc64 *)
1398 (((TW_INT8 *)(sgl_s)) + 4);
1399 }
1400 } else {
1401 struct tw_cl_sg_desc32 *sgl_s =
1402 (struct tw_cl_sg_desc32 *)sgl_src;
1403 struct tw_cl_sg_desc32 *sgl_d =
1404 (struct tw_cl_sg_desc32 *)sgl_dest;
1405
1406 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1407 "32 bit addresses");
1408 for (i = 0; i < num_sgl_entries; i++) {
1409 sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1410 sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1411 }
1412 }
1413 }
1414
Cache object: 73f27f00844481b71f6f1960d82a714e
|