1 /*
2 * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/6.0/sys/dev/twa/tw_cl_intr.c 144966 2005-04-12 22:07:11Z vkashyap $
28 */
29
30 /*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 */
35
36
37 /*
38 * Common Layer interrupt handling functions.
39 */
40
41
42 #include "tw_osl_share.h"
43 #include "tw_cl_share.h"
44 #include "tw_cl_fwif.h"
45 #include "tw_cl_ioctl.h"
46 #include "tw_cl.h"
47 #include "tw_cl_externs.h"
48 #include "tw_osl_ioctl.h"
49
50
51
52 /*
53 * Function name: twa_interrupt
54 * Description: Interrupt handler. Determines the kind of interrupt,
55 * and returns TW_CL_TRUE if it recognizes the interrupt.
56 *
57 * Input: ctlr_handle -- controller handle
58 * Output: None
59 * Return value: TW_CL_TRUE -- interrupt recognized
60 * TW_CL_FALSE-- interrupt not recognized
61 */
62 TW_INT32
63 tw_cl_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
64 {
65 struct tw_cli_ctlr_context *ctlr =
66 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
67 TW_UINT32 status_reg;
68 TW_INT32 rc = TW_CL_FALSE;
69
70 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
71
72 /*
73 * Serialize access to this function so multiple threads don't try to
74 * do the same thing (such as clearing interrupt bits).
75 */
76 tw_osl_get_lock(ctlr_handle, ctlr->intr_lock);
77
78 /* Read the status register to determine the type of interrupt. */
79 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
80 if (tw_cli_check_ctlr_state(ctlr, status_reg))
81 goto out;
82
83 /* Clear the interrupt. */
84 if (status_reg & TWA_STATUS_HOST_INTERRUPT) {
85 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
86 "Host interrupt");
87 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
88 TWA_CONTROL_CLEAR_HOST_INTERRUPT);
89 ctlr->host_intr_pending = 0; /* we don't use this */
90 rc |= TW_CL_FALSE; /* don't request for a deferred isr call */
91 }
92 if (status_reg & TWA_STATUS_ATTENTION_INTERRUPT) {
93 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
94 "Attention interrupt");
95 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
96 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
97 ctlr->attn_intr_pending = 1;
98 rc |= TW_CL_TRUE; /* request for a deferred isr call */
99 }
100 if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) {
101 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
102 "Command interrupt");
103 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
104 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
105 ctlr->cmd_intr_pending = 1;
106 rc |= TW_CL_TRUE; /* request for a deferred isr call */
107 }
108 if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) {
109 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
110 "Response interrupt");
111 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
112 TWA_CONTROL_MASK_RESPONSE_INTERRUPT);
113 ctlr->resp_intr_pending = 1;
114 rc |= TW_CL_TRUE; /* request for a deferred isr call */
115 }
116 out:
117 tw_osl_free_lock(ctlr_handle, ctlr->intr_lock);
118
119 return(rc);
120 }
121
122
123
124 /*
125 * Function name: tw_cl_deferred_interrupt
126 * Description: Deferred interrupt handler. Does most of the processing
127 * related to an interrupt.
128 *
129 * Input: ctlr_handle -- controller handle
130 * Output: None
131 * Return value: None
132 */
133 TW_VOID
134 tw_cl_deferred_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
135 {
136 struct tw_cli_ctlr_context *ctlr =
137 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
138
139 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
140
141 /* Dispatch based on the kind of interrupt. */
142 if (ctlr->host_intr_pending) {
143 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
144 "Processing Host interrupt");
145 ctlr->host_intr_pending = 0;
146 tw_cli_process_host_intr(ctlr);
147 }
148 if (ctlr->attn_intr_pending) {
149 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
150 "Processing Attention interrupt");
151 ctlr->attn_intr_pending = 0;
152 tw_cli_process_attn_intr(ctlr);
153 }
154 if (ctlr->cmd_intr_pending) {
155 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
156 "Processing Command interrupt");
157 ctlr->cmd_intr_pending = 0;
158 tw_cli_process_cmd_intr(ctlr);
159 }
160 if (ctlr->resp_intr_pending) {
161 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
162 "Processing Response interrupt");
163 ctlr->resp_intr_pending = 0;
164 tw_cli_process_resp_intr(ctlr);
165 }
166 }
167
168
169
170 /*
171 * Function name: tw_cli_process_host_intr
172 * Description: This function gets called if we triggered an interrupt.
173 * We don't use it as of now.
174 *
175 * Input: ctlr -- ptr to CL internal ctlr context
176 * Output: None
177 * Return value: None
178 */
179 TW_VOID
180 tw_cli_process_host_intr(struct tw_cli_ctlr_context *ctlr)
181 {
182 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
183 }
184
185
186
187 /*
188 * Function name: tw_cli_process_attn_intr
189 * Description: This function gets called if the fw posted an AEN
190 * (Asynchronous Event Notification). It fetches
191 * all the AEN's that the fw might have posted.
192 *
193 * Input: ctlr -- ptr to CL internal ctlr context
194 * Output: None
195 * Return value: None
196 */
197 TW_VOID
198 tw_cli_process_attn_intr(struct tw_cli_ctlr_context *ctlr)
199 {
200 TW_INT32 error;
201
202 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
203
204 if ((error = tw_cli_get_aen(ctlr))) {
205 /*
206 * If the driver is already in the process of retrieveing AEN's,
207 * we will be returned TW_OSL_EBUSY. In this case,
208 * tw_cli_param_callback or tw_cli_aen_callback will eventually
209 * retrieve the AEN this attention interrupt is for. So, we
210 * don't need to print the failure.
211 */
212 if (error != TW_OSL_EBUSY)
213 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
214 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
215 0x1200, 0x1, TW_CL_SEVERITY_ERROR_STRING,
216 "Failed to fetch AEN",
217 "error = %d", error);
218 }
219 }
220
221
222
223 /*
224 * Function name: tw_cli_process_cmd_intr
225 * Description: This function gets called if we hit a queue full
226 * condition earlier, and the fw is now ready for
227 * new cmds. Submits any pending requests.
228 *
229 * Input: ctlr -- ptr to CL internal ctlr context
230 * Output: None
231 * Return value: None
232 */
233 TW_VOID
234 tw_cli_process_cmd_intr(struct tw_cli_ctlr_context *ctlr)
235 {
236 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
237
238 /*
239 * Let the OS Layer submit any requests in its pending queue,
240 * if it has one.
241 */
242 tw_osl_ctlr_ready(ctlr->ctlr_handle);
243
244 /* Start any requests that might be in the pending queue. */
245 tw_cli_submit_pending_queue(ctlr);
246
247 /*
248 * If tw_cli_submit_pending_queue was unsuccessful due to a "cmd queue
249 * full" condition, cmd_intr will already have been unmasked by
250 * tw_cli_submit_cmd. We don't need to do it again... simply return.
251 */
252 }
253
254
255
256 /*
257 * Function name: tw_cli_process_resp_intr
258 * Description: Looks for cmd completions from fw; queues cmds completed
259 * by fw into complete queue.
260 *
261 * Input: ctlr -- ptr to CL internal ctlr context
262 * Output: None
263 * Return value: 0 -- no ctlr error
264 * non-zero-- ctlr error
265 */
266 TW_INT32
267 tw_cli_process_resp_intr(struct tw_cli_ctlr_context *ctlr)
268 {
269 TW_UINT32 resp;
270 struct tw_cli_req_context *req;
271 TW_INT32 error;
272 TW_UINT32 status_reg;
273
274 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
275
276 /* Serialize access to the controller response queue. */
277 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->intr_lock);
278
279 for (;;) {
280 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle);
281 if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
282 break;
283 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY) {
284 tw_cli_dbg_printf(7, ctlr->ctlr_handle,
285 tw_osl_cur_func(), "Response queue empty");
286 break;
287 }
288
289 /* Response queue is not empty. */
290 resp = TW_CLI_READ_RESPONSE_QUEUE(ctlr->ctlr_handle);
291 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
292 if (GET_RESP_ID(resp) >= 1)
293 req = ctlr->busy_reqs[GET_RESP_ID(resp)];
294 else
295 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
296 {
297 req = &(ctlr->req_ctxt_buf[GET_RESP_ID(resp)]);
298 }
299
300 if (req->state != TW_CLI_REQ_STATE_BUSY) {
301 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
302 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
303 0x1201, 0x1, TW_CL_SEVERITY_ERROR_STRING,
304 "Unposted command completed!!",
305 "request = %p, status = %d",
306 req, req->state);
307 #ifdef TW_OSL_DEBUG
308 tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
309 #endif /* TW_OSL_DEBUG */
310 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
311 tw_cl_reset_ctlr(ctlr->ctlr_handle);
312 return(TW_OSL_EIO);
313 }
314
315 /*
316 * Remove the request from the busy queue, mark it as complete,
317 * and enqueue it in the complete queue.
318 */
319 tw_cli_req_q_remove_item(req, TW_CLI_BUSY_Q);
320 req->state = TW_CLI_REQ_STATE_COMPLETE;
321 tw_cli_req_q_insert_tail(req, TW_CLI_COMPLETE_Q);
322 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
323 /* Call the CL internal callback, if there's one. */
324 if (req->tw_cli_callback)
325 req->tw_cli_callback(req);
326 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
327 }
328
329 /* Unmask the response interrupt. */
330 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
331 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT);
332
333 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
334
335 #ifndef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
336 /* Complete this, and other requests in the complete queue. */
337 tw_cli_process_complete_queue(ctlr);
338 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
339
340 return(error);
341 }
342
343
344
345 /*
346 * Function name: tw_cli_submit_pending_queue
347 * Description: Kick starts any requests in the pending queue.
348 *
349 * Input: ctlr -- ptr to CL internal ctlr context
350 * Output: None
351 * Return value: 0 -- all pending requests submitted successfully
352 * non-zero-- otherwise
353 */
354 TW_INT32
355 tw_cli_submit_pending_queue(struct tw_cli_ctlr_context *ctlr)
356 {
357 struct tw_cli_req_context *req;
358 TW_INT32 error = TW_OSL_ESUCCESS;
359
360 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
361
362 /*
363 * Pull requests off the pending queue, and submit them.
364 */
365 while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_PENDING_Q)) !=
366 TW_CL_NULL) {
367 if ((error = tw_cli_submit_cmd(req))) {
368 if (error == TW_OSL_EBUSY) {
369 tw_cli_dbg_printf(2, ctlr->ctlr_handle,
370 tw_osl_cur_func(),
371 "Requeueing pending request");
372 req->state = TW_CLI_REQ_STATE_PENDING;
373 /*
374 * Queue the request at the head of the pending
375 * queue, and break away, so we don't try to
376 * submit any more requests.
377 */
378 tw_cli_req_q_insert_head(req, TW_CLI_PENDING_Q);
379 break;
380 } else {
381 tw_cl_create_event(ctlr->ctlr_handle,
382 TW_CL_FALSE,
383 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
384 0x1202, 0x1,
385 TW_CL_SEVERITY_ERROR_STRING,
386 "Could not start request "
387 "in pending queue",
388 "request = %p, opcode = 0x%x, "
389 "error = %d", req,
390 GET_OPCODE(req->cmd_pkt->
391 command.cmd_pkt_9k.res__opcode),
392 error);
393 /*
394 * Set the appropriate error and call the CL
395 * internal callback if there's one. If the
396 * request originator is polling for completion,
397 * he should be checking req->error to
398 * determine that the request did not go
399 * through. The request originators are
400 * responsible for the clean-up.
401 */
402 req->error_code = error;
403 req->state = TW_CLI_REQ_STATE_COMPLETE;
404 if (req->tw_cli_callback)
405 req->tw_cli_callback(req);
406 error = TW_OSL_ESUCCESS;
407 }
408 }
409 }
410 return(error);
411 }
412
413
414
415 /*
416 * Function name: tw_cli_process_complete_queue
417 * Description: Calls the CL internal callback routine, if any, for
418 * each request in the complete queue.
419 *
420 * Input: ctlr -- ptr to CL internal ctlr context
421 * Output: None
422 * Return value: None
423 */
424 TW_VOID
425 tw_cli_process_complete_queue(struct tw_cli_ctlr_context *ctlr)
426 {
427 struct tw_cli_req_context *req;
428
429 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
430
431 /*
432 * Pull commands off the completed list, dispatch them appropriately.
433 */
434 while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_COMPLETE_Q)) !=
435 TW_CL_NULL) {
436 /* Call the CL internal callback, if there's one. */
437 if (req->tw_cli_callback)
438 req->tw_cli_callback(req);
439 }
440 }
441
442
443
444 /*
445 * Function name: tw_cli_complete_io
446 * Description: CL internal callback for SCSI/fw passthru requests.
447 *
448 * Input: req -- ptr to CL internal request context
449 * Output: None
450 * Return value: None
451 */
452 TW_VOID
453 tw_cli_complete_io(struct tw_cli_req_context *req)
454 {
455 struct tw_cli_ctlr_context *ctlr = req->ctlr;
456 struct tw_cl_req_packet *req_pkt =
457 (struct tw_cl_req_packet *)(req->orig_req);
458
459 tw_cli_dbg_printf(8, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
460
461 req_pkt->status = TW_CL_ERR_REQ_SUCCESS;
462 if (req->error_code) {
463 req_pkt->status = TW_CL_ERR_REQ_UNABLE_TO_SUBMIT_COMMAND;
464 goto out;
465 }
466
467 if (req->state != TW_CLI_REQ_STATE_COMPLETE) {
468 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
469 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
470 0x1203, 0x1, TW_CL_SEVERITY_ERROR_STRING,
471 "I/O completion on incomplete command!!",
472 "request = %p, status = %d",
473 req, req->state);
474 #ifdef TW_OSL_DEBUG
475 tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
476 #endif /* TW_OSL_DEBUG */
477 tw_cl_reset_ctlr(ctlr->ctlr_handle);
478 req_pkt->status = TW_CL_ERR_REQ_BUS_RESET;
479 goto out;
480 }
481
482 if (req->flags & TW_CLI_REQ_FLAGS_PASSTHRU) {
483 /* Copy the command packet back into OSL's space. */
484 tw_osl_memcpy(req_pkt->gen_req_pkt.pt_req.cmd_pkt, req->cmd_pkt,
485 sizeof(struct tw_cl_command_packet));
486 } else
487 tw_cli_scsi_complete(req);
488
489 out:
490 req_pkt->tw_osl_callback(req->req_handle);
491 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
492 }
493
494
495
496 /*
497 * Function name: tw_cli_scsi_complete
498 * Description: Completion routine for SCSI requests.
499 *
500 * Input: req -- ptr to CL internal request context
501 * Output: None
502 * Return value: None
503 */
504 TW_VOID
505 tw_cli_scsi_complete(struct tw_cli_req_context *req)
506 {
507 struct tw_cl_req_packet *req_pkt =
508 (struct tw_cl_req_packet *)(req->orig_req);
509 struct tw_cl_scsi_req_packet *scsi_req =
510 &(req_pkt->gen_req_pkt.scsi_req);
511 struct tw_cl_command_9k *cmd =
512 &(req->cmd_pkt->command.cmd_pkt_9k);
513 struct tw_cl_command_header *cmd_hdr;
514 TW_UINT16 error;
515 TW_UINT8 *cdb;
516
517 tw_cli_dbg_printf(8, req->ctlr->ctlr_handle, tw_osl_cur_func(),
518 "entered");
519
520 scsi_req->scsi_status = cmd->status;
521 if (! cmd->status)
522 return;
523
524 tw_cli_dbg_printf(1, req->ctlr->ctlr_handle, tw_osl_cur_func(),
525 "req_id = 0x%x, status = 0x%x",
526 GET_REQ_ID(cmd->lun_l4__req_id), cmd->status);
527
528 cmd_hdr = &(req->cmd_pkt->cmd_hdr);
529 error = cmd_hdr->status_block.error;
530 if ((error == TWA_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) ||
531 (error == TWA_ERROR_UNIT_OFFLINE)) {
532 if (GET_LUN_L4(cmd->lun_l4__req_id))
533 req_pkt->status |= TW_CL_ERR_REQ_INVALID_LUN;
534 else
535 req_pkt->status |= TW_CL_ERR_REQ_INVALID_TARGET;
536 } else {
537 tw_cli_dbg_printf(2, req->ctlr->ctlr_handle,
538 tw_osl_cur_func(),
539 "cmd = %x %x %x %x %x %x %x",
540 GET_OPCODE(cmd->res__opcode),
541 GET_SGL_OFF(cmd->res__opcode),
542 cmd->unit,
543 cmd->lun_l4__req_id,
544 cmd->status,
545 cmd->sgl_offset,
546 cmd->lun_h4__sgl_entries);
547
548 cdb = (TW_UINT8 *)(cmd->cdb);
549 tw_cli_dbg_printf(2, req->ctlr->ctlr_handle,
550 tw_osl_cur_func(),
551 "cdb = %x %x %x %x %x %x %x %x "
552 "%x %x %x %x %x %x %x %x",
553 cdb[0], cdb[1], cdb[2], cdb[3],
554 cdb[4], cdb[5], cdb[6], cdb[7],
555 cdb[8], cdb[9], cdb[10], cdb[11],
556 cdb[12], cdb[13], cdb[14], cdb[15]);
557
558 /*
559 * Print the error. Firmware doesn't yet support
560 * the 'Mode Sense' cmd. Don't print if the cmd
561 * is 'Mode Sense', and the error is 'Invalid field
562 * in CDB'.
563 */
564 if (! ((cdb[0] == 0x1A) && (error == 0x10D)))
565 tw_cli_create_ctlr_event(req->ctlr,
566 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
567 cmd_hdr);
568 }
569
570 if (scsi_req->sense_data) {
571 tw_osl_memcpy(scsi_req->sense_data, cmd_hdr->sense_data,
572 TWA_SENSE_DATA_LENGTH);
573 scsi_req->sense_len = TWA_SENSE_DATA_LENGTH;
574 req_pkt->status |= TW_CL_ERR_REQ_AUTO_SENSE_VALID;
575 }
576 req_pkt->status |= TW_CL_ERR_REQ_SCSI_ERROR;
577 }
578
579
580
581 /*
582 * Function name: tw_cli_param_callback
583 * Description: Callback for get/set_param requests.
584 *
585 * Input: req -- ptr to completed request pkt
586 * Output: None
587 * Return value: None
588 */
589 TW_VOID
590 tw_cli_param_callback(struct tw_cli_req_context *req)
591 {
592 struct tw_cli_ctlr_context *ctlr = req->ctlr;
593 union tw_cl_command_7k *cmd =
594 &(req->cmd_pkt->command.cmd_pkt_7k);
595 TW_INT32 error;
596
597 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
598
599 /*
600 * If the request was never submitted to the controller, the function
601 * that sets req->error is responsible for calling tw_cl_create_event.
602 */
603 if (! req->error_code)
604 if (cmd->param.status) {
605 tw_cli_create_ctlr_event(ctlr,
606 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
607 &(req->cmd_pkt->cmd_hdr));
608 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
609 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
610 0x1204, 0x1, TW_CL_SEVERITY_ERROR_STRING,
611 "get/set_param failed",
612 "status = %d", cmd->param.status);
613 }
614
615 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
616 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
617
618 if ((ctlr->state & TW_CLI_CTLR_STATE_GET_MORE_AENS) &&
619 (!(ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS))) {
620 ctlr->state &= ~TW_CLI_CTLR_STATE_GET_MORE_AENS;
621 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
622 "Fetching more AEN's");
623 if ((error = tw_cli_get_aen(ctlr)))
624 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
625 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
626 0x1205, 0x1, TW_CL_SEVERITY_ERROR_STRING,
627 "Failed to fetch all AEN's from param_callback",
628 "error = %d", error);
629 }
630 }
631
632
633
634 /*
635 * Function name: tw_cli_aen_callback
636 * Description: Callback for requests to fetch AEN's.
637 *
638 * Input: req -- ptr to completed request pkt
639 * Output: None
640 * Return value: None
641 */
642 TW_VOID
643 tw_cli_aen_callback(struct tw_cli_req_context *req)
644 {
645 struct tw_cli_ctlr_context *ctlr = req->ctlr;
646 struct tw_cl_command_header *cmd_hdr;
647 struct tw_cl_command_9k *cmd =
648 &(req->cmd_pkt->command.cmd_pkt_9k);
649 TW_UINT16 aen_code = TWA_AEN_QUEUE_EMPTY;
650 TW_INT32 error;
651
652 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
653
654 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
655 "req_id = 0x%x, req error = %d, status = 0x%x",
656 GET_REQ_ID(cmd->lun_l4__req_id), req->error_code, cmd->status);
657
658 /*
659 * If the request was never submitted to the controller, the function
660 * that sets error is responsible for calling tw_cl_create_event.
661 */
662 if (!(error = req->error_code))
663 if ((error = cmd->status)) {
664 cmd_hdr = (struct tw_cl_command_header *)
665 (&(req->cmd_pkt->cmd_hdr));
666 tw_cli_create_ctlr_event(ctlr,
667 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
668 cmd_hdr);
669 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
670 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
671 0x1206, 0x1, TW_CL_SEVERITY_ERROR_STRING,
672 "Request Sense failed",
673 "opcode = 0x%x, status = %d",
674 GET_OPCODE(cmd->res__opcode), cmd->status);
675 }
676
677 if (error) {
678 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
679 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
680 return;
681 }
682
683 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
684 "Request Sense command succeeded");
685
686 aen_code = tw_cli_manage_aen(ctlr, req);
687
688 if (aen_code != TWA_AEN_SYNC_TIME_WITH_HOST) {
689 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
690 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
691 if (aen_code != TWA_AEN_QUEUE_EMPTY)
692 if ((error = tw_cli_get_aen(ctlr)))
693 tw_cl_create_event(ctlr->ctlr_handle,
694 TW_CL_FALSE,
695 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
696 0x1207, 0x1,
697 TW_CL_SEVERITY_ERROR_STRING,
698 "Failed to fetch all AEN's",
699 "error = %d", error);
700 }
701 }
702
703
704
705 /*
706 * Function name: tw_cli_manage_aen
707 * Description: Handles AEN's.
708 *
709 * Input: ctlr -- ptr to CL internal ctlr context
710 * req -- ptr to CL internal request context
711 * Output: None
712 * Return value: None
713 */
714 TW_UINT16
715 tw_cli_manage_aen(struct tw_cli_ctlr_context *ctlr,
716 struct tw_cli_req_context *req)
717 {
718 struct tw_cl_command_header *cmd_hdr;
719 TW_UINT16 aen_code;
720 TW_TIME local_time;
721 TW_TIME sync_time;
722 TW_UINT32 error;
723
724 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
725
726 cmd_hdr = (struct tw_cl_command_header *)(req->data);
727 aen_code = cmd_hdr->status_block.error;
728
729 switch (aen_code) {
730 case TWA_AEN_SYNC_TIME_WITH_HOST:
731 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
732 "Received AEN_SYNC_TIME");
733 /*
734 * Free the internal req pkt right here, since
735 * tw_cli_set_param will need it.
736 */
737 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
738 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
739
740 /*
741 * We will use a callback in tw_cli_set_param only when
742 * interrupts are enabled and we can expect our callback
743 * to get called. Setting the TW_CLI_CTLR_STATE_GET_MORE_AENS
744 * flag will make the callback continue to try to retrieve
745 * more AEN's.
746 */
747 if (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED)
748 ctlr->state |= TW_CLI_CTLR_STATE_GET_MORE_AENS;
749 /* Calculate time (in seconds) since last Sunday 12.00 AM. */
750 local_time = tw_osl_get_local_time();
751 sync_time = (local_time - (3 * 86400)) % 604800;
752 if ((error = tw_cli_set_param(ctlr, TWA_PARAM_TIME_TABLE,
753 TWA_PARAM_TIME_SCHED_TIME, 4,
754 &sync_time,
755 (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED)
756 ? tw_cli_param_callback : TW_CL_NULL)))
757 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
758 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
759 0x1208, 0x1, TW_CL_SEVERITY_ERROR_STRING,
760 "Unable to sync time with ctlr",
761 "error = %d", error);
762
763 break;
764
765
766 case TWA_AEN_QUEUE_EMPTY:
767 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
768 "AEN queue empty");
769 break;
770
771
772 default:
773 /* Queue the event. */
774
775 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
776 "Queueing AEN");
777 tw_cli_create_ctlr_event(ctlr,
778 TW_CL_MESSAGE_SOURCE_CONTROLLER_EVENT,
779 cmd_hdr);
780 break;
781 } /* switch */
782 return(aen_code);
783 }
784
785
786
787 /*
788 * Function name: tw_cli_enable_interrupts
789 * Description: Enables interrupts on the controller
790 *
791 * Input: ctlr -- ptr to CL internal ctlr context
792 * Output: None
793 * Return value: None
794 */
795 TW_VOID
796 tw_cli_enable_interrupts(struct tw_cli_ctlr_context *ctlr)
797 {
798 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
799
800 ctlr->state |= TW_CLI_CTLR_STATE_INTR_ENABLED;
801 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
802 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
803 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
804 TWA_CONTROL_ENABLE_INTERRUPTS);
805 }
806
807
808
809 /*
810 * Function name: twa_setup
811 * Description: Disables interrupts on the controller
812 *
813 * Input: ctlr -- ptr to CL internal ctlr context
814 * Output: None
815 * Return value: None
816 */
817 TW_VOID
818 tw_cli_disable_interrupts(struct tw_cli_ctlr_context *ctlr)
819 {
820 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
821
822 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
823 TWA_CONTROL_DISABLE_INTERRUPTS);
824 ctlr->state &= ~TW_CLI_CTLR_STATE_INTR_ENABLED;
825 }
826
Cache object: c81bb542e0531ea8549c6b635bcf172b
|