1 /*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29
30 /*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 * Modifications by: Adam Radford
35 */
36
37
38 /*
39 * Common Layer interrupt handling functions.
40 */
41
42
43 #include "tw_osl_share.h"
44 #include "tw_cl_share.h"
45 #include "tw_cl_fwif.h"
46 #include "tw_cl_ioctl.h"
47 #include "tw_cl.h"
48 #include "tw_cl_externs.h"
49 #include "tw_osl_ioctl.h"
50
51
52
53 /*
54 * Function name: twa_interrupt
55 * Description: Interrupt handler. Determines the kind of interrupt,
56 * and returns TW_CL_TRUE if it recognizes the interrupt.
57 *
58 * Input: ctlr_handle -- controller handle
59 * Output: None
60 * Return value: TW_CL_TRUE -- interrupt recognized
61 * TW_CL_FALSE-- interrupt not recognized
62 */
63 TW_INT32
64 tw_cl_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
65 {
66 struct tw_cli_ctlr_context *ctlr =
67 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
68 TW_UINT32 status_reg;
69 TW_INT32 rc = TW_CL_FALSE;
70
71 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
72
73 /* If we don't have controller context, bail */
74 if (ctlr == NULL)
75 goto out;
76
77 /* If we get an interrupt while resetting, it is a shared
78 one for another device, so just bail */
79 if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS)
80 goto out;
81
82 /*
83 * Synchronize access between writes to command and control registers
84 * in 64-bit environments, on G66.
85 */
86 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
87 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
88
89 /* Read the status register to determine the type of interrupt. */
90 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
91 if (tw_cli_check_ctlr_state(ctlr, status_reg))
92 goto out_unlock;
93
94 /* Clear the interrupt. */
95 if (status_reg & TWA_STATUS_HOST_INTERRUPT) {
96 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
97 "Host interrupt");
98 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
99 TWA_CONTROL_CLEAR_HOST_INTERRUPT);
100 ctlr->host_intr_pending = 0; /* we don't use this */
101 rc |= TW_CL_FALSE; /* don't request for a deferred isr call */
102 }
103 if (status_reg & TWA_STATUS_ATTENTION_INTERRUPT) {
104 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
105 "Attention interrupt");
106 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
107 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
108 ctlr->attn_intr_pending = 1;
109 rc |= TW_CL_TRUE; /* request for a deferred isr call */
110 }
111 if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) {
112 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
113 "Command interrupt");
114 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
115 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
116 ctlr->cmd_intr_pending = 1;
117 rc |= TW_CL_TRUE; /* request for a deferred isr call */
118 }
119 if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) {
120 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
121 "Response interrupt");
122 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
123 TWA_CONTROL_MASK_RESPONSE_INTERRUPT);
124 ctlr->resp_intr_pending = 1;
125 rc |= TW_CL_TRUE; /* request for a deferred isr call */
126 }
127 out_unlock:
128 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
129 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
130 out:
131 return(rc);
132 }
133
134
135
136 /*
137 * Function name: tw_cl_deferred_interrupt
138 * Description: Deferred interrupt handler. Does most of the processing
139 * related to an interrupt.
140 *
141 * Input: ctlr_handle -- controller handle
142 * Output: None
143 * Return value: None
144 */
145 TW_VOID
146 tw_cl_deferred_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
147 {
148 struct tw_cli_ctlr_context *ctlr =
149 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
150
151 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
152
153 /* Dispatch based on the kind of interrupt. */
154 if (ctlr->host_intr_pending) {
155 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
156 "Processing Host interrupt");
157 ctlr->host_intr_pending = 0;
158 tw_cli_process_host_intr(ctlr);
159 }
160 if (ctlr->attn_intr_pending) {
161 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
162 "Processing Attention interrupt");
163 ctlr->attn_intr_pending = 0;
164 tw_cli_process_attn_intr(ctlr);
165 }
166 if (ctlr->cmd_intr_pending) {
167 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
168 "Processing Command interrupt");
169 ctlr->cmd_intr_pending = 0;
170 tw_cli_process_cmd_intr(ctlr);
171 }
172 if (ctlr->resp_intr_pending) {
173 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
174 "Processing Response interrupt");
175 ctlr->resp_intr_pending = 0;
176 tw_cli_process_resp_intr(ctlr);
177 }
178 }
179
180
181
182 /*
183 * Function name: tw_cli_process_host_intr
184 * Description: This function gets called if we triggered an interrupt.
185 * We don't use it as of now.
186 *
187 * Input: ctlr -- ptr to CL internal ctlr context
188 * Output: None
189 * Return value: None
190 */
191 TW_VOID
192 tw_cli_process_host_intr(struct tw_cli_ctlr_context *ctlr)
193 {
194 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
195 }
196
197
198
199 /*
200 * Function name: tw_cli_process_attn_intr
201 * Description: This function gets called if the fw posted an AEN
202 * (Asynchronous Event Notification). It fetches
203 * all the AEN's that the fw might have posted.
204 *
205 * Input: ctlr -- ptr to CL internal ctlr context
206 * Output: None
207 * Return value: None
208 */
209 TW_VOID
210 tw_cli_process_attn_intr(struct tw_cli_ctlr_context *ctlr)
211 {
212 TW_INT32 error;
213
214 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
215
216 if ((error = tw_cli_get_aen(ctlr))) {
217 /*
218 * If the driver is already in the process of retrieveing AEN's,
219 * we will be returned TW_OSL_EBUSY. In this case,
220 * tw_cli_param_callback or tw_cli_aen_callback will eventually
221 * retrieve the AEN this attention interrupt is for. So, we
222 * don't need to print the failure.
223 */
224 if (error != TW_OSL_EBUSY)
225 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
226 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
227 0x1200, 0x1, TW_CL_SEVERITY_ERROR_STRING,
228 "Failed to fetch AEN",
229 "error = %d", error);
230 }
231 }
232
233
234
235 /*
236 * Function name: tw_cli_process_cmd_intr
237 * Description: This function gets called if we hit a queue full
238 * condition earlier, and the fw is now ready for
239 * new cmds. Submits any pending requests.
240 *
241 * Input: ctlr -- ptr to CL internal ctlr context
242 * Output: None
243 * Return value: None
244 */
245 TW_VOID
246 tw_cli_process_cmd_intr(struct tw_cli_ctlr_context *ctlr)
247 {
248 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
249
250 /*
251 * Let the OS Layer submit any requests in its pending queue,
252 * if it has one.
253 */
254 tw_osl_ctlr_ready(ctlr->ctlr_handle);
255
256 /* Start any requests that might be in the pending queue. */
257 tw_cli_submit_pending_queue(ctlr);
258
259 /*
260 * If tw_cli_submit_pending_queue was unsuccessful due to a "cmd queue
261 * full" condition, cmd_intr will already have been unmasked by
262 * tw_cli_submit_cmd. We don't need to do it again... simply return.
263 */
264 }
265
266
267
268 /*
269 * Function name: tw_cli_process_resp_intr
270 * Description: Looks for cmd completions from fw; queues cmds completed
271 * by fw into complete queue.
272 *
273 * Input: ctlr -- ptr to CL internal ctlr context
274 * Output: None
275 * Return value: 0 -- no ctlr error
276 * non-zero-- ctlr error
277 */
278 TW_INT32
279 tw_cli_process_resp_intr(struct tw_cli_ctlr_context *ctlr)
280 {
281 TW_UINT32 resp;
282 struct tw_cli_req_context *req;
283 TW_INT32 error;
284 TW_UINT32 status_reg;
285
286 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
287
288 /* Serialize access to the controller response queue. */
289 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->intr_lock);
290
291 for (;;) {
292 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle);
293 if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
294 break;
295 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY) {
296 tw_cli_dbg_printf(7, ctlr->ctlr_handle,
297 tw_osl_cur_func(), "Response queue empty");
298 break;
299 }
300
301 /* Response queue is not empty. */
302 resp = TW_CLI_READ_RESPONSE_QUEUE(ctlr->ctlr_handle);
303 {
304 req = &(ctlr->req_ctxt_buf[GET_RESP_ID(resp)]);
305 }
306
307 if (req->state != TW_CLI_REQ_STATE_BUSY) {
308 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
309 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
310 0x1201, 0x1, TW_CL_SEVERITY_ERROR_STRING,
311 "Unposted command completed!!",
312 "request = %p, status = %d",
313 req, req->state);
314 #ifdef TW_OSL_DEBUG
315 tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
316 #endif /* TW_OSL_DEBUG */
317 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
318 tw_cl_reset_ctlr(ctlr->ctlr_handle);
319 return(TW_OSL_EIO);
320 }
321
322 /*
323 * Remove the request from the busy queue, mark it as complete,
324 * and enqueue it in the complete queue.
325 */
326 tw_cli_req_q_remove_item(req, TW_CLI_BUSY_Q);
327 req->state = TW_CLI_REQ_STATE_COMPLETE;
328 tw_cli_req_q_insert_tail(req, TW_CLI_COMPLETE_Q);
329
330 }
331
332 /* Unmask the response interrupt. */
333 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
334 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT);
335
336 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
337
338 /* Complete this, and other requests in the complete queue. */
339 tw_cli_process_complete_queue(ctlr);
340
341 return(error);
342 }
343
344
345
346 /*
347 * Function name: tw_cli_submit_pending_queue
348 * Description: Kick starts any requests in the pending queue.
349 *
350 * Input: ctlr -- ptr to CL internal ctlr context
351 * Output: None
352 * Return value: 0 -- all pending requests submitted successfully
353 * non-zero-- otherwise
354 */
355 TW_INT32
356 tw_cli_submit_pending_queue(struct tw_cli_ctlr_context *ctlr)
357 {
358 struct tw_cli_req_context *req;
359 TW_INT32 error = TW_OSL_ESUCCESS;
360
361 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
362
363 /*
364 * Pull requests off the pending queue, and submit them.
365 */
366 while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_PENDING_Q)) !=
367 TW_CL_NULL) {
368 if ((error = tw_cli_submit_cmd(req))) {
369 if (error == TW_OSL_EBUSY) {
370 tw_cli_dbg_printf(2, ctlr->ctlr_handle,
371 tw_osl_cur_func(),
372 "Requeueing pending request");
373 req->state = TW_CLI_REQ_STATE_PENDING;
374 /*
375 * Queue the request at the head of the pending
376 * queue, and break away, so we don't try to
377 * submit any more requests.
378 */
379 tw_cli_req_q_insert_head(req, TW_CLI_PENDING_Q);
380 break;
381 } else {
382 tw_cl_create_event(ctlr->ctlr_handle,
383 TW_CL_FALSE,
384 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
385 0x1202, 0x1,
386 TW_CL_SEVERITY_ERROR_STRING,
387 "Could not start request "
388 "in pending queue",
389 "request = %p, opcode = 0x%x, "
390 "error = %d", req,
391 GET_OPCODE(req->cmd_pkt->
392 command.cmd_pkt_9k.res__opcode),
393 error);
394 /*
395 * Set the appropriate error and call the CL
396 * internal callback if there's one. If the
397 * request originator is polling for completion,
398 * he should be checking req->error to
399 * determine that the request did not go
400 * through. The request originators are
401 * responsible for the clean-up.
402 */
403 req->error_code = error;
404 req->state = TW_CLI_REQ_STATE_COMPLETE;
405 if (req->tw_cli_callback)
406 req->tw_cli_callback(req);
407 error = TW_OSL_ESUCCESS;
408 }
409 }
410 }
411 return(error);
412 }
413
414
415
416 /*
417 * Function name: tw_cli_process_complete_queue
418 * Description: Calls the CL internal callback routine, if any, for
419 * each request in the complete queue.
420 *
421 * Input: ctlr -- ptr to CL internal ctlr context
422 * Output: None
423 * Return value: None
424 */
425 TW_VOID
426 tw_cli_process_complete_queue(struct tw_cli_ctlr_context *ctlr)
427 {
428 struct tw_cli_req_context *req;
429
430 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
431
432 /*
433 * Pull commands off the completed list, dispatch them appropriately.
434 */
435 while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_COMPLETE_Q)) !=
436 TW_CL_NULL) {
437 /* Call the CL internal callback, if there's one. */
438 if (req->tw_cli_callback)
439 req->tw_cli_callback(req);
440 }
441 }
442
443
444
445 /*
446 * Function name: tw_cli_complete_io
447 * Description: CL internal callback for SCSI/fw passthru requests.
448 *
449 * Input: req -- ptr to CL internal request context
450 * Output: None
451 * Return value: None
452 */
453 TW_VOID
454 tw_cli_complete_io(struct tw_cli_req_context *req)
455 {
456 struct tw_cli_ctlr_context *ctlr = req->ctlr;
457 struct tw_cl_req_packet *req_pkt =
458 (struct tw_cl_req_packet *)(req->orig_req);
459
460 tw_cli_dbg_printf(8, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
461
462 req_pkt->status = TW_CL_ERR_REQ_SUCCESS;
463 if (req->error_code) {
464 req_pkt->status = TW_CL_ERR_REQ_UNABLE_TO_SUBMIT_COMMAND;
465 goto out;
466 }
467
468 if (req->state != TW_CLI_REQ_STATE_COMPLETE) {
469 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
470 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
471 0x1203, 0x1, TW_CL_SEVERITY_ERROR_STRING,
472 "I/O completion on incomplete command!!",
473 "request = %p, status = %d",
474 req, req->state);
475 #ifdef TW_OSL_DEBUG
476 tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
477 #endif /* TW_OSL_DEBUG */
478 tw_cl_reset_ctlr(ctlr->ctlr_handle);
479 req_pkt->status = TW_CL_ERR_REQ_BUS_RESET;
480 goto out;
481 }
482
483 if (req->flags & TW_CLI_REQ_FLAGS_PASSTHRU) {
484 /* Copy the command packet back into OSL's space. */
485 tw_osl_memcpy(req_pkt->gen_req_pkt.pt_req.cmd_pkt, req->cmd_pkt,
486 sizeof(struct tw_cl_command_packet));
487 } else
488 tw_cli_scsi_complete(req);
489
490 out:
491 req_pkt->tw_osl_callback(req->req_handle);
492 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
493 }
494
495
496
497 /*
498 * Function name: tw_cli_scsi_complete
499 * Description: Completion routine for SCSI requests.
500 *
501 * Input: req -- ptr to CL internal request context
502 * Output: None
503 * Return value: None
504 */
505 TW_VOID
506 tw_cli_scsi_complete(struct tw_cli_req_context *req)
507 {
508 struct tw_cl_req_packet *req_pkt =
509 (struct tw_cl_req_packet *)(req->orig_req);
510 struct tw_cl_scsi_req_packet *scsi_req =
511 &(req_pkt->gen_req_pkt.scsi_req);
512 struct tw_cl_command_9k *cmd =
513 &(req->cmd_pkt->command.cmd_pkt_9k);
514 struct tw_cl_command_header *cmd_hdr;
515 TW_UINT16 error;
516 TW_UINT8 *cdb;
517
518 tw_cli_dbg_printf(8, req->ctlr->ctlr_handle, tw_osl_cur_func(),
519 "entered");
520
521 scsi_req->scsi_status = cmd->status;
522 if (! cmd->status)
523 return;
524
525 tw_cli_dbg_printf(1, req->ctlr->ctlr_handle, tw_osl_cur_func(),
526 "req_id = 0x%x, status = 0x%x",
527 GET_REQ_ID(cmd->lun_l4__req_id), cmd->status);
528
529 cmd_hdr = &(req->cmd_pkt->cmd_hdr);
530 error = cmd_hdr->status_block.error;
531 if ((error == TWA_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) ||
532 (error == TWA_ERROR_UNIT_OFFLINE)) {
533 if (GET_LUN_L4(cmd->lun_l4__req_id))
534 req_pkt->status |= TW_CL_ERR_REQ_INVALID_LUN;
535 else
536 req_pkt->status |= TW_CL_ERR_REQ_INVALID_TARGET;
537 } else {
538 tw_cli_dbg_printf(2, req->ctlr->ctlr_handle,
539 tw_osl_cur_func(),
540 "cmd = %x %x %x %x %x %x %x",
541 GET_OPCODE(cmd->res__opcode),
542 GET_SGL_OFF(cmd->res__opcode),
543 cmd->unit,
544 cmd->lun_l4__req_id,
545 cmd->status,
546 cmd->sgl_offset,
547 cmd->lun_h4__sgl_entries);
548
549 cdb = (TW_UINT8 *)(cmd->cdb);
550 tw_cli_dbg_printf(2, req->ctlr->ctlr_handle,
551 tw_osl_cur_func(),
552 "cdb = %x %x %x %x %x %x %x %x "
553 "%x %x %x %x %x %x %x %x",
554 cdb[0], cdb[1], cdb[2], cdb[3],
555 cdb[4], cdb[5], cdb[6], cdb[7],
556 cdb[8], cdb[9], cdb[10], cdb[11],
557 cdb[12], cdb[13], cdb[14], cdb[15]);
558
559 /*
560 * Print the error. Firmware doesn't yet support
561 * the 'Mode Sense' cmd. Don't print if the cmd
562 * is 'Mode Sense', and the error is 'Invalid field
563 * in CDB'.
564 */
565 if (! ((cdb[0] == 0x1A) && (error == 0x10D)))
566 tw_cli_create_ctlr_event(req->ctlr,
567 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
568 cmd_hdr);
569 }
570
571 if (scsi_req->sense_data) {
572 tw_osl_memcpy(scsi_req->sense_data, cmd_hdr->sense_data,
573 TWA_SENSE_DATA_LENGTH);
574 scsi_req->sense_len = TWA_SENSE_DATA_LENGTH;
575 req_pkt->status |= TW_CL_ERR_REQ_AUTO_SENSE_VALID;
576 }
577 req_pkt->status |= TW_CL_ERR_REQ_SCSI_ERROR;
578 }
579
580
581
582 /*
583 * Function name: tw_cli_param_callback
584 * Description: Callback for get/set_param requests.
585 *
586 * Input: req -- ptr to completed request pkt
587 * Output: None
588 * Return value: None
589 */
590 TW_VOID
591 tw_cli_param_callback(struct tw_cli_req_context *req)
592 {
593 struct tw_cli_ctlr_context *ctlr = req->ctlr;
594 union tw_cl_command_7k *cmd =
595 &(req->cmd_pkt->command.cmd_pkt_7k);
596 TW_INT32 error;
597
598 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
599
600 /*
601 * If the request was never submitted to the controller, the function
602 * that sets req->error is responsible for calling tw_cl_create_event.
603 */
604 if (! req->error_code)
605 if (cmd->param.status) {
606 tw_cli_create_ctlr_event(ctlr,
607 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
608 &(req->cmd_pkt->cmd_hdr));
609 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
610 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
611 0x1204, 0x1, TW_CL_SEVERITY_ERROR_STRING,
612 "get/set_param failed",
613 "status = %d", cmd->param.status);
614 }
615
616 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
617 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
618
619 if ((ctlr->state & TW_CLI_CTLR_STATE_GET_MORE_AENS) &&
620 (!(ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS))) {
621 ctlr->state &= ~TW_CLI_CTLR_STATE_GET_MORE_AENS;
622 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
623 "Fetching more AEN's");
624 if ((error = tw_cli_get_aen(ctlr)))
625 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
626 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
627 0x1205, 0x1, TW_CL_SEVERITY_ERROR_STRING,
628 "Failed to fetch all AEN's from param_callback",
629 "error = %d", error);
630 }
631 }
632
633
634
635 /*
636 * Function name: tw_cli_aen_callback
637 * Description: Callback for requests to fetch AEN's.
638 *
639 * Input: req -- ptr to completed request pkt
640 * Output: None
641 * Return value: None
642 */
643 TW_VOID
644 tw_cli_aen_callback(struct tw_cli_req_context *req)
645 {
646 struct tw_cli_ctlr_context *ctlr = req->ctlr;
647 struct tw_cl_command_header *cmd_hdr;
648 struct tw_cl_command_9k *cmd =
649 &(req->cmd_pkt->command.cmd_pkt_9k);
650 TW_UINT16 aen_code = TWA_AEN_QUEUE_EMPTY;
651 TW_INT32 error;
652
653 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
654
655 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
656 "req_id = 0x%x, req error = %d, status = 0x%x",
657 GET_REQ_ID(cmd->lun_l4__req_id), req->error_code, cmd->status);
658
659 /*
660 * If the request was never submitted to the controller, the function
661 * that sets error is responsible for calling tw_cl_create_event.
662 */
663 if (!(error = req->error_code))
664 if ((error = cmd->status)) {
665 cmd_hdr = (struct tw_cl_command_header *)
666 (&(req->cmd_pkt->cmd_hdr));
667 tw_cli_create_ctlr_event(ctlr,
668 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
669 cmd_hdr);
670 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
671 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
672 0x1206, 0x1, TW_CL_SEVERITY_ERROR_STRING,
673 "Request Sense failed",
674 "opcode = 0x%x, status = %d",
675 GET_OPCODE(cmd->res__opcode), cmd->status);
676 }
677
678 if (error) {
679 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
680 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
681 return;
682 }
683
684 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
685 "Request Sense command succeeded");
686
687 aen_code = tw_cli_manage_aen(ctlr, req);
688
689 if (aen_code != TWA_AEN_SYNC_TIME_WITH_HOST) {
690 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
691 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
692 if (aen_code != TWA_AEN_QUEUE_EMPTY)
693 if ((error = tw_cli_get_aen(ctlr)))
694 tw_cl_create_event(ctlr->ctlr_handle,
695 TW_CL_FALSE,
696 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
697 0x1207, 0x1,
698 TW_CL_SEVERITY_ERROR_STRING,
699 "Failed to fetch all AEN's",
700 "error = %d", error);
701 }
702 }
703
704
705
706 /*
707 * Function name: tw_cli_manage_aen
708 * Description: Handles AEN's.
709 *
710 * Input: ctlr -- ptr to CL internal ctlr context
711 * req -- ptr to CL internal request context
712 * Output: None
713 * Return value: None
714 */
715 TW_UINT16
716 tw_cli_manage_aen(struct tw_cli_ctlr_context *ctlr,
717 struct tw_cli_req_context *req)
718 {
719 struct tw_cl_command_header *cmd_hdr;
720 TW_UINT16 aen_code;
721 TW_TIME local_time;
722 TW_TIME sync_time;
723 TW_UINT32 error;
724
725 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
726
727 cmd_hdr = (struct tw_cl_command_header *)(req->data);
728 aen_code = cmd_hdr->status_block.error;
729
730 switch (aen_code) {
731 case TWA_AEN_SYNC_TIME_WITH_HOST:
732 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
733 "Received AEN_SYNC_TIME");
734 /*
735 * Free the internal req pkt right here, since
736 * tw_cli_set_param will need it.
737 */
738 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
739 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
740
741 /*
742 * We will use a callback in tw_cli_set_param only when
743 * interrupts are enabled and we can expect our callback
744 * to get called. Setting the TW_CLI_CTLR_STATE_GET_MORE_AENS
745 * flag will make the callback continue to try to retrieve
746 * more AEN's.
747 */
748 if (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED)
749 ctlr->state |= TW_CLI_CTLR_STATE_GET_MORE_AENS;
750 /* Calculate time (in seconds) since last Sunday 12.00 AM. */
751 local_time = tw_osl_get_local_time();
752 sync_time = (local_time - (3 * 86400)) % 604800;
753 if ((error = tw_cli_set_param(ctlr, TWA_PARAM_TIME_TABLE,
754 TWA_PARAM_TIME_SCHED_TIME, 4,
755 &sync_time,
756 (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED)
757 ? tw_cli_param_callback : TW_CL_NULL)))
758 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
759 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
760 0x1208, 0x1, TW_CL_SEVERITY_ERROR_STRING,
761 "Unable to sync time with ctlr",
762 "error = %d", error);
763
764 break;
765
766
767 case TWA_AEN_QUEUE_EMPTY:
768 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
769 "AEN queue empty");
770 break;
771
772
773 default:
774 /* Queue the event. */
775
776 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
777 "Queueing AEN");
778 tw_cli_create_ctlr_event(ctlr,
779 TW_CL_MESSAGE_SOURCE_CONTROLLER_EVENT,
780 cmd_hdr);
781 break;
782 } /* switch */
783 return(aen_code);
784 }
785
786
787
788 /*
789 * Function name: tw_cli_enable_interrupts
790 * Description: Enables interrupts on the controller
791 *
792 * Input: ctlr -- ptr to CL internal ctlr context
793 * Output: None
794 * Return value: None
795 */
796 TW_VOID
797 tw_cli_enable_interrupts(struct tw_cli_ctlr_context *ctlr)
798 {
799 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
800
801 ctlr->state |= TW_CLI_CTLR_STATE_INTR_ENABLED;
802 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
803 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
804 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
805 TWA_CONTROL_ENABLE_INTERRUPTS);
806 }
807
808
809
810 /*
811 * Function name: twa_setup
812 * Description: Disables interrupts on the controller
813 *
814 * Input: ctlr -- ptr to CL internal ctlr context
815 * Output: None
816 * Return value: None
817 */
818 TW_VOID
819 tw_cli_disable_interrupts(struct tw_cli_ctlr_context *ctlr)
820 {
821 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
822
823 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
824 TWA_CONTROL_DISABLE_INTERRUPTS);
825 ctlr->state &= ~TW_CLI_CTLR_STATE_INTR_ENABLED;
826 }
827
Cache object: 0e38dba5226ae9f015d7f40638090309
|