1 /*
2 * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/6.2/sys/dev/twa/tw_cl_intr.c 153207 2005-12-07 18:18:06Z vkashyap $
28 */
29
30 /*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 */
35
36
37 /*
38 * Common Layer interrupt handling functions.
39 */
40
41
42 #include "tw_osl_share.h"
43 #include "tw_cl_share.h"
44 #include "tw_cl_fwif.h"
45 #include "tw_cl_ioctl.h"
46 #include "tw_cl.h"
47 #include "tw_cl_externs.h"
48 #include "tw_osl_ioctl.h"
49
50
51
52 /*
53 * Function name: twa_interrupt
54 * Description: Interrupt handler. Determines the kind of interrupt,
55 * and returns TW_CL_TRUE if it recognizes the interrupt.
56 *
57 * Input: ctlr_handle -- controller handle
58 * Output: None
59 * Return value: TW_CL_TRUE -- interrupt recognized
60 * TW_CL_FALSE-- interrupt not recognized
61 */
62 TW_INT32
63 tw_cl_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
64 {
65 struct tw_cli_ctlr_context *ctlr =
66 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
67 TW_UINT32 status_reg;
68 TW_INT32 rc = TW_CL_FALSE;
69
70 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
71
72 /*
73 * Synchronize access between writes to command and control registers
74 * in 64-bit environments, on G66.
75 */
76 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
77 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
78
79 /* Read the status register to determine the type of interrupt. */
80 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
81 if (tw_cli_check_ctlr_state(ctlr, status_reg))
82 goto out;
83
84 /* Clear the interrupt. */
85 if (status_reg & TWA_STATUS_HOST_INTERRUPT) {
86 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
87 "Host interrupt");
88 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
89 TWA_CONTROL_CLEAR_HOST_INTERRUPT);
90 ctlr->host_intr_pending = 0; /* we don't use this */
91 rc |= TW_CL_FALSE; /* don't request for a deferred isr call */
92 }
93 if (status_reg & TWA_STATUS_ATTENTION_INTERRUPT) {
94 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
95 "Attention interrupt");
96 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
97 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
98 ctlr->attn_intr_pending = 1;
99 rc |= TW_CL_TRUE; /* request for a deferred isr call */
100 }
101 if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) {
102 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
103 "Command interrupt");
104 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
105 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
106 ctlr->cmd_intr_pending = 1;
107 rc |= TW_CL_TRUE; /* request for a deferred isr call */
108 }
109 if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) {
110 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
111 "Response interrupt");
112 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
113 TWA_CONTROL_MASK_RESPONSE_INTERRUPT);
114 ctlr->resp_intr_pending = 1;
115 rc |= TW_CL_TRUE; /* request for a deferred isr call */
116 }
117 out:
118 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
119 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
120
121 return(rc);
122 }
123
124
125
126 /*
127 * Function name: tw_cl_deferred_interrupt
128 * Description: Deferred interrupt handler. Does most of the processing
129 * related to an interrupt.
130 *
131 * Input: ctlr_handle -- controller handle
132 * Output: None
133 * Return value: None
134 */
135 TW_VOID
136 tw_cl_deferred_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
137 {
138 struct tw_cli_ctlr_context *ctlr =
139 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
140
141 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
142
143 /* Dispatch based on the kind of interrupt. */
144 if (ctlr->host_intr_pending) {
145 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
146 "Processing Host interrupt");
147 ctlr->host_intr_pending = 0;
148 tw_cli_process_host_intr(ctlr);
149 }
150 if (ctlr->attn_intr_pending) {
151 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
152 "Processing Attention interrupt");
153 ctlr->attn_intr_pending = 0;
154 tw_cli_process_attn_intr(ctlr);
155 }
156 if (ctlr->cmd_intr_pending) {
157 tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
158 "Processing Command interrupt");
159 ctlr->cmd_intr_pending = 0;
160 tw_cli_process_cmd_intr(ctlr);
161 }
162 if (ctlr->resp_intr_pending) {
163 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
164 "Processing Response interrupt");
165 ctlr->resp_intr_pending = 0;
166 tw_cli_process_resp_intr(ctlr);
167 }
168 }
169
170
171
172 /*
173 * Function name: tw_cli_process_host_intr
174 * Description: This function gets called if we triggered an interrupt.
175 * We don't use it as of now.
176 *
177 * Input: ctlr -- ptr to CL internal ctlr context
178 * Output: None
179 * Return value: None
180 */
181 TW_VOID
182 tw_cli_process_host_intr(struct tw_cli_ctlr_context *ctlr)
183 {
184 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
185 }
186
187
188
189 /*
190 * Function name: tw_cli_process_attn_intr
191 * Description: This function gets called if the fw posted an AEN
192 * (Asynchronous Event Notification). It fetches
193 * all the AEN's that the fw might have posted.
194 *
195 * Input: ctlr -- ptr to CL internal ctlr context
196 * Output: None
197 * Return value: None
198 */
199 TW_VOID
200 tw_cli_process_attn_intr(struct tw_cli_ctlr_context *ctlr)
201 {
202 TW_INT32 error;
203
204 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
205
206 if ((error = tw_cli_get_aen(ctlr))) {
207 /*
208 * If the driver is already in the process of retrieveing AEN's,
209 * we will be returned TW_OSL_EBUSY. In this case,
210 * tw_cli_param_callback or tw_cli_aen_callback will eventually
211 * retrieve the AEN this attention interrupt is for. So, we
212 * don't need to print the failure.
213 */
214 if (error != TW_OSL_EBUSY)
215 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
216 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
217 0x1200, 0x1, TW_CL_SEVERITY_ERROR_STRING,
218 "Failed to fetch AEN",
219 "error = %d", error);
220 }
221 }
222
223
224
225 /*
226 * Function name: tw_cli_process_cmd_intr
227 * Description: This function gets called if we hit a queue full
228 * condition earlier, and the fw is now ready for
229 * new cmds. Submits any pending requests.
230 *
231 * Input: ctlr -- ptr to CL internal ctlr context
232 * Output: None
233 * Return value: None
234 */
235 TW_VOID
236 tw_cli_process_cmd_intr(struct tw_cli_ctlr_context *ctlr)
237 {
238 tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
239
240 /*
241 * Let the OS Layer submit any requests in its pending queue,
242 * if it has one.
243 */
244 tw_osl_ctlr_ready(ctlr->ctlr_handle);
245
246 /* Start any requests that might be in the pending queue. */
247 tw_cli_submit_pending_queue(ctlr);
248
249 /*
250 * If tw_cli_submit_pending_queue was unsuccessful due to a "cmd queue
251 * full" condition, cmd_intr will already have been unmasked by
252 * tw_cli_submit_cmd. We don't need to do it again... simply return.
253 */
254 }
255
256
257
258 /*
259 * Function name: tw_cli_process_resp_intr
260 * Description: Looks for cmd completions from fw; queues cmds completed
261 * by fw into complete queue.
262 *
263 * Input: ctlr -- ptr to CL internal ctlr context
264 * Output: None
265 * Return value: 0 -- no ctlr error
266 * non-zero-- ctlr error
267 */
268 TW_INT32
269 tw_cli_process_resp_intr(struct tw_cli_ctlr_context *ctlr)
270 {
271 TW_UINT32 resp;
272 struct tw_cli_req_context *req;
273 TW_INT32 error;
274 TW_UINT32 status_reg;
275
276 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
277
278 /* Serialize access to the controller response queue. */
279 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->intr_lock);
280
281 for (;;) {
282 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle);
283 if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
284 break;
285 if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY) {
286 tw_cli_dbg_printf(7, ctlr->ctlr_handle,
287 tw_osl_cur_func(), "Response queue empty");
288 break;
289 }
290
291 /* Response queue is not empty. */
292 resp = TW_CLI_READ_RESPONSE_QUEUE(ctlr->ctlr_handle);
293 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
294 if (GET_RESP_ID(resp) >= 1)
295 req = ctlr->busy_reqs[GET_RESP_ID(resp)];
296 else
297 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
298 {
299 req = &(ctlr->req_ctxt_buf[GET_RESP_ID(resp)]);
300 }
301
302 if (req->state != TW_CLI_REQ_STATE_BUSY) {
303 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
304 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
305 0x1201, 0x1, TW_CL_SEVERITY_ERROR_STRING,
306 "Unposted command completed!!",
307 "request = %p, status = %d",
308 req, req->state);
309 #ifdef TW_OSL_DEBUG
310 tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
311 #endif /* TW_OSL_DEBUG */
312 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
313 tw_cl_reset_ctlr(ctlr->ctlr_handle);
314 return(TW_OSL_EIO);
315 }
316
317 /*
318 * Remove the request from the busy queue, mark it as complete,
319 * and enqueue it in the complete queue.
320 */
321 tw_cli_req_q_remove_item(req, TW_CLI_BUSY_Q);
322 req->state = TW_CLI_REQ_STATE_COMPLETE;
323 tw_cli_req_q_insert_tail(req, TW_CLI_COMPLETE_Q);
324
325 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
326 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
327 /* Call the CL internal callback, if there's one. */
328 if (req->tw_cli_callback)
329 req->tw_cli_callback(req);
330 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->intr_lock);
331 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
332 }
333
334 /* Unmask the response interrupt. */
335 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
336 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT);
337
338 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
339
340 #ifndef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
341 /* Complete this, and other requests in the complete queue. */
342 tw_cli_process_complete_queue(ctlr);
343 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
344
345 return(error);
346 }
347
348
349
350 /*
351 * Function name: tw_cli_submit_pending_queue
352 * Description: Kick starts any requests in the pending queue.
353 *
354 * Input: ctlr -- ptr to CL internal ctlr context
355 * Output: None
356 * Return value: 0 -- all pending requests submitted successfully
357 * non-zero-- otherwise
358 */
359 TW_INT32
360 tw_cli_submit_pending_queue(struct tw_cli_ctlr_context *ctlr)
361 {
362 struct tw_cli_req_context *req;
363 TW_INT32 error = TW_OSL_ESUCCESS;
364
365 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
366
367 /*
368 * Pull requests off the pending queue, and submit them.
369 */
370 while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_PENDING_Q)) !=
371 TW_CL_NULL) {
372 if ((error = tw_cli_submit_cmd(req))) {
373 if (error == TW_OSL_EBUSY) {
374 tw_cli_dbg_printf(2, ctlr->ctlr_handle,
375 tw_osl_cur_func(),
376 "Requeueing pending request");
377 req->state = TW_CLI_REQ_STATE_PENDING;
378 /*
379 * Queue the request at the head of the pending
380 * queue, and break away, so we don't try to
381 * submit any more requests.
382 */
383 tw_cli_req_q_insert_head(req, TW_CLI_PENDING_Q);
384 break;
385 } else {
386 tw_cl_create_event(ctlr->ctlr_handle,
387 TW_CL_FALSE,
388 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
389 0x1202, 0x1,
390 TW_CL_SEVERITY_ERROR_STRING,
391 "Could not start request "
392 "in pending queue",
393 "request = %p, opcode = 0x%x, "
394 "error = %d", req,
395 GET_OPCODE(req->cmd_pkt->
396 command.cmd_pkt_9k.res__opcode),
397 error);
398 /*
399 * Set the appropriate error and call the CL
400 * internal callback if there's one. If the
401 * request originator is polling for completion,
402 * he should be checking req->error to
403 * determine that the request did not go
404 * through. The request originators are
405 * responsible for the clean-up.
406 */
407 req->error_code = error;
408 req->state = TW_CLI_REQ_STATE_COMPLETE;
409 if (req->tw_cli_callback)
410 req->tw_cli_callback(req);
411 error = TW_OSL_ESUCCESS;
412 }
413 }
414 }
415 return(error);
416 }
417
418
419
420 /*
421 * Function name: tw_cli_process_complete_queue
422 * Description: Calls the CL internal callback routine, if any, for
423 * each request in the complete queue.
424 *
425 * Input: ctlr -- ptr to CL internal ctlr context
426 * Output: None
427 * Return value: None
428 */
429 TW_VOID
430 tw_cli_process_complete_queue(struct tw_cli_ctlr_context *ctlr)
431 {
432 struct tw_cli_req_context *req;
433
434 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
435
436 /*
437 * Pull commands off the completed list, dispatch them appropriately.
438 */
439 while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_COMPLETE_Q)) !=
440 TW_CL_NULL) {
441 /* Call the CL internal callback, if there's one. */
442 if (req->tw_cli_callback)
443 req->tw_cli_callback(req);
444 }
445 }
446
447
448
449 /*
450 * Function name: tw_cli_complete_io
451 * Description: CL internal callback for SCSI/fw passthru requests.
452 *
453 * Input: req -- ptr to CL internal request context
454 * Output: None
455 * Return value: None
456 */
457 TW_VOID
458 tw_cli_complete_io(struct tw_cli_req_context *req)
459 {
460 struct tw_cli_ctlr_context *ctlr = req->ctlr;
461 struct tw_cl_req_packet *req_pkt =
462 (struct tw_cl_req_packet *)(req->orig_req);
463
464 tw_cli_dbg_printf(8, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
465
466 req_pkt->status = TW_CL_ERR_REQ_SUCCESS;
467 if (req->error_code) {
468 req_pkt->status = TW_CL_ERR_REQ_UNABLE_TO_SUBMIT_COMMAND;
469 goto out;
470 }
471
472 if (req->state != TW_CLI_REQ_STATE_COMPLETE) {
473 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
474 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
475 0x1203, 0x1, TW_CL_SEVERITY_ERROR_STRING,
476 "I/O completion on incomplete command!!",
477 "request = %p, status = %d",
478 req, req->state);
479 #ifdef TW_OSL_DEBUG
480 tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
481 #endif /* TW_OSL_DEBUG */
482 tw_cl_reset_ctlr(ctlr->ctlr_handle);
483 req_pkt->status = TW_CL_ERR_REQ_BUS_RESET;
484 goto out;
485 }
486
487 if (req->flags & TW_CLI_REQ_FLAGS_PASSTHRU) {
488 /* Copy the command packet back into OSL's space. */
489 tw_osl_memcpy(req_pkt->gen_req_pkt.pt_req.cmd_pkt, req->cmd_pkt,
490 sizeof(struct tw_cl_command_packet));
491 } else
492 tw_cli_scsi_complete(req);
493
494 out:
495 req_pkt->tw_osl_callback(req->req_handle);
496 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
497 }
498
499
500
501 /*
502 * Function name: tw_cli_scsi_complete
503 * Description: Completion routine for SCSI requests.
504 *
505 * Input: req -- ptr to CL internal request context
506 * Output: None
507 * Return value: None
508 */
509 TW_VOID
510 tw_cli_scsi_complete(struct tw_cli_req_context *req)
511 {
512 struct tw_cl_req_packet *req_pkt =
513 (struct tw_cl_req_packet *)(req->orig_req);
514 struct tw_cl_scsi_req_packet *scsi_req =
515 &(req_pkt->gen_req_pkt.scsi_req);
516 struct tw_cl_command_9k *cmd =
517 &(req->cmd_pkt->command.cmd_pkt_9k);
518 struct tw_cl_command_header *cmd_hdr;
519 TW_UINT16 error;
520 TW_UINT8 *cdb;
521
522 tw_cli_dbg_printf(8, req->ctlr->ctlr_handle, tw_osl_cur_func(),
523 "entered");
524
525 scsi_req->scsi_status = cmd->status;
526 if (! cmd->status)
527 return;
528
529 tw_cli_dbg_printf(1, req->ctlr->ctlr_handle, tw_osl_cur_func(),
530 "req_id = 0x%x, status = 0x%x",
531 GET_REQ_ID(cmd->lun_l4__req_id), cmd->status);
532
533 cmd_hdr = &(req->cmd_pkt->cmd_hdr);
534 error = cmd_hdr->status_block.error;
535 if ((error == TWA_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) ||
536 (error == TWA_ERROR_UNIT_OFFLINE)) {
537 if (GET_LUN_L4(cmd->lun_l4__req_id))
538 req_pkt->status |= TW_CL_ERR_REQ_INVALID_LUN;
539 else
540 req_pkt->status |= TW_CL_ERR_REQ_INVALID_TARGET;
541 } else {
542 tw_cli_dbg_printf(2, req->ctlr->ctlr_handle,
543 tw_osl_cur_func(),
544 "cmd = %x %x %x %x %x %x %x",
545 GET_OPCODE(cmd->res__opcode),
546 GET_SGL_OFF(cmd->res__opcode),
547 cmd->unit,
548 cmd->lun_l4__req_id,
549 cmd->status,
550 cmd->sgl_offset,
551 cmd->lun_h4__sgl_entries);
552
553 cdb = (TW_UINT8 *)(cmd->cdb);
554 tw_cli_dbg_printf(2, req->ctlr->ctlr_handle,
555 tw_osl_cur_func(),
556 "cdb = %x %x %x %x %x %x %x %x "
557 "%x %x %x %x %x %x %x %x",
558 cdb[0], cdb[1], cdb[2], cdb[3],
559 cdb[4], cdb[5], cdb[6], cdb[7],
560 cdb[8], cdb[9], cdb[10], cdb[11],
561 cdb[12], cdb[13], cdb[14], cdb[15]);
562
563 /*
564 * Print the error. Firmware doesn't yet support
565 * the 'Mode Sense' cmd. Don't print if the cmd
566 * is 'Mode Sense', and the error is 'Invalid field
567 * in CDB'.
568 */
569 if (! ((cdb[0] == 0x1A) && (error == 0x10D)))
570 tw_cli_create_ctlr_event(req->ctlr,
571 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
572 cmd_hdr);
573 }
574
575 if (scsi_req->sense_data) {
576 tw_osl_memcpy(scsi_req->sense_data, cmd_hdr->sense_data,
577 TWA_SENSE_DATA_LENGTH);
578 scsi_req->sense_len = TWA_SENSE_DATA_LENGTH;
579 req_pkt->status |= TW_CL_ERR_REQ_AUTO_SENSE_VALID;
580 }
581 req_pkt->status |= TW_CL_ERR_REQ_SCSI_ERROR;
582 }
583
584
585
586 /*
587 * Function name: tw_cli_param_callback
588 * Description: Callback for get/set_param requests.
589 *
590 * Input: req -- ptr to completed request pkt
591 * Output: None
592 * Return value: None
593 */
594 TW_VOID
595 tw_cli_param_callback(struct tw_cli_req_context *req)
596 {
597 struct tw_cli_ctlr_context *ctlr = req->ctlr;
598 union tw_cl_command_7k *cmd =
599 &(req->cmd_pkt->command.cmd_pkt_7k);
600 TW_INT32 error;
601
602 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
603
604 /*
605 * If the request was never submitted to the controller, the function
606 * that sets req->error is responsible for calling tw_cl_create_event.
607 */
608 if (! req->error_code)
609 if (cmd->param.status) {
610 tw_cli_create_ctlr_event(ctlr,
611 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
612 &(req->cmd_pkt->cmd_hdr));
613 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
614 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
615 0x1204, 0x1, TW_CL_SEVERITY_ERROR_STRING,
616 "get/set_param failed",
617 "status = %d", cmd->param.status);
618 }
619
620 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
621 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
622
623 if ((ctlr->state & TW_CLI_CTLR_STATE_GET_MORE_AENS) &&
624 (!(ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS))) {
625 ctlr->state &= ~TW_CLI_CTLR_STATE_GET_MORE_AENS;
626 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
627 "Fetching more AEN's");
628 if ((error = tw_cli_get_aen(ctlr)))
629 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
630 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
631 0x1205, 0x1, TW_CL_SEVERITY_ERROR_STRING,
632 "Failed to fetch all AEN's from param_callback",
633 "error = %d", error);
634 }
635 }
636
637
638
639 /*
640 * Function name: tw_cli_aen_callback
641 * Description: Callback for requests to fetch AEN's.
642 *
643 * Input: req -- ptr to completed request pkt
644 * Output: None
645 * Return value: None
646 */
647 TW_VOID
648 tw_cli_aen_callback(struct tw_cli_req_context *req)
649 {
650 struct tw_cli_ctlr_context *ctlr = req->ctlr;
651 struct tw_cl_command_header *cmd_hdr;
652 struct tw_cl_command_9k *cmd =
653 &(req->cmd_pkt->command.cmd_pkt_9k);
654 TW_UINT16 aen_code = TWA_AEN_QUEUE_EMPTY;
655 TW_INT32 error;
656
657 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
658
659 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
660 "req_id = 0x%x, req error = %d, status = 0x%x",
661 GET_REQ_ID(cmd->lun_l4__req_id), req->error_code, cmd->status);
662
663 /*
664 * If the request was never submitted to the controller, the function
665 * that sets error is responsible for calling tw_cl_create_event.
666 */
667 if (!(error = req->error_code))
668 if ((error = cmd->status)) {
669 cmd_hdr = (struct tw_cl_command_header *)
670 (&(req->cmd_pkt->cmd_hdr));
671 tw_cli_create_ctlr_event(ctlr,
672 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
673 cmd_hdr);
674 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
675 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
676 0x1206, 0x1, TW_CL_SEVERITY_ERROR_STRING,
677 "Request Sense failed",
678 "opcode = 0x%x, status = %d",
679 GET_OPCODE(cmd->res__opcode), cmd->status);
680 }
681
682 if (error) {
683 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
684 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
685 return;
686 }
687
688 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
689 "Request Sense command succeeded");
690
691 aen_code = tw_cli_manage_aen(ctlr, req);
692
693 if (aen_code != TWA_AEN_SYNC_TIME_WITH_HOST) {
694 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
695 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
696 if (aen_code != TWA_AEN_QUEUE_EMPTY)
697 if ((error = tw_cli_get_aen(ctlr)))
698 tw_cl_create_event(ctlr->ctlr_handle,
699 TW_CL_FALSE,
700 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
701 0x1207, 0x1,
702 TW_CL_SEVERITY_ERROR_STRING,
703 "Failed to fetch all AEN's",
704 "error = %d", error);
705 }
706 }
707
708
709
710 /*
711 * Function name: tw_cli_manage_aen
712 * Description: Handles AEN's.
713 *
714 * Input: ctlr -- ptr to CL internal ctlr context
715 * req -- ptr to CL internal request context
716 * Output: None
717 * Return value: None
718 */
719 TW_UINT16
720 tw_cli_manage_aen(struct tw_cli_ctlr_context *ctlr,
721 struct tw_cli_req_context *req)
722 {
723 struct tw_cl_command_header *cmd_hdr;
724 TW_UINT16 aen_code;
725 TW_TIME local_time;
726 TW_TIME sync_time;
727 TW_UINT32 error;
728
729 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
730
731 cmd_hdr = (struct tw_cl_command_header *)(req->data);
732 aen_code = cmd_hdr->status_block.error;
733
734 switch (aen_code) {
735 case TWA_AEN_SYNC_TIME_WITH_HOST:
736 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
737 "Received AEN_SYNC_TIME");
738 /*
739 * Free the internal req pkt right here, since
740 * tw_cli_set_param will need it.
741 */
742 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
743 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
744
745 /*
746 * We will use a callback in tw_cli_set_param only when
747 * interrupts are enabled and we can expect our callback
748 * to get called. Setting the TW_CLI_CTLR_STATE_GET_MORE_AENS
749 * flag will make the callback continue to try to retrieve
750 * more AEN's.
751 */
752 if (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED)
753 ctlr->state |= TW_CLI_CTLR_STATE_GET_MORE_AENS;
754 /* Calculate time (in seconds) since last Sunday 12.00 AM. */
755 local_time = tw_osl_get_local_time();
756 sync_time = (local_time - (3 * 86400)) % 604800;
757 if ((error = tw_cli_set_param(ctlr, TWA_PARAM_TIME_TABLE,
758 TWA_PARAM_TIME_SCHED_TIME, 4,
759 &sync_time,
760 (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED)
761 ? tw_cli_param_callback : TW_CL_NULL)))
762 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
763 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
764 0x1208, 0x1, TW_CL_SEVERITY_ERROR_STRING,
765 "Unable to sync time with ctlr",
766 "error = %d", error);
767
768 break;
769
770
771 case TWA_AEN_QUEUE_EMPTY:
772 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
773 "AEN queue empty");
774 break;
775
776
777 default:
778 /* Queue the event. */
779
780 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
781 "Queueing AEN");
782 tw_cli_create_ctlr_event(ctlr,
783 TW_CL_MESSAGE_SOURCE_CONTROLLER_EVENT,
784 cmd_hdr);
785 break;
786 } /* switch */
787 return(aen_code);
788 }
789
790
791
792 /*
793 * Function name: tw_cli_enable_interrupts
794 * Description: Enables interrupts on the controller
795 *
796 * Input: ctlr -- ptr to CL internal ctlr context
797 * Output: None
798 * Return value: None
799 */
800 TW_VOID
801 tw_cli_enable_interrupts(struct tw_cli_ctlr_context *ctlr)
802 {
803 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
804
805 ctlr->state |= TW_CLI_CTLR_STATE_INTR_ENABLED;
806 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
807 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
808 TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
809 TWA_CONTROL_ENABLE_INTERRUPTS);
810 }
811
812
813
814 /*
815 * Function name: twa_setup
816 * Description: Disables interrupts on the controller
817 *
818 * Input: ctlr -- ptr to CL internal ctlr context
819 * Output: None
820 * Return value: None
821 */
822 TW_VOID
823 tw_cli_disable_interrupts(struct tw_cli_ctlr_context *ctlr)
824 {
825 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
826
827 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
828 TWA_CONTROL_DISABLE_INTERRUPTS);
829 ctlr->state &= ~TW_CLI_CTLR_STATE_INTR_ENABLED;
830 }
831
Cache object: ae654b83d968708cedb03c16a2f430a4
|