FreeBSD/Linux Kernel Cross Reference
sys/dev/twa/tw_cl_io.c
1 /*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29
30 /*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 * Modifications by: Adam Radford
35 */
36
37
38 /*
39 * Common Layer I/O functions.
40 */
41
42
43 #include "tw_osl_share.h"
44 #include "tw_cl_share.h"
45 #include "tw_cl_fwif.h"
46 #include "tw_cl_ioctl.h"
47 #include "tw_cl.h"
48 #include "tw_cl_externs.h"
49 #include "tw_osl_ioctl.h"
50
51
52
53 /*
54 * Function name: tw_cl_start_io
55 * Description: Interface to OS Layer for accepting SCSI requests.
56 *
57 * Input: ctlr_handle -- controller handle
58 * req_pkt -- OSL built request packet
59 * req_handle -- request handle
60 * Output: None
61 * Return value: 0 -- success
62 * non-zero-- failure
63 */
64 TW_INT32
65 tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
66 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
67 {
68 struct tw_cli_ctlr_context *ctlr;
69 struct tw_cli_req_context *req;
70 struct tw_cl_command_9k *cmd;
71 struct tw_cl_scsi_req_packet *scsi_req;
72 TW_INT32 error;
73
74 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
75
76 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
77
78 if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
79 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
80 "I/O during reset: returning busy. Ctlr state = 0x%x",
81 ctlr->state);
82 tw_osl_ctlr_busy(ctlr_handle, req_handle);
83 return(TW_OSL_EBUSY);
84 }
85
86 /*
87 * If working with a firmware version that does not support multiple
88 * luns, and this request is directed at a non-zero lun, error it
89 * back right away.
90 */
91 if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
92 (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
93 req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
94 TW_CL_ERR_REQ_SCSI_ERROR);
95 req_pkt->tw_osl_callback(req_handle);
96 return(TW_CL_ERR_REQ_SUCCESS);
97 }
98
99 if ((req = tw_cli_get_request(ctlr
100 )) == TW_CL_NULL) {
101 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
102 "Out of request context packets: returning busy");
103 tw_osl_ctlr_busy(ctlr_handle, req_handle);
104 return(TW_OSL_EBUSY);
105 }
106
107 req_handle->cl_req_ctxt = req;
108 req->req_handle = req_handle;
109 req->orig_req = req_pkt;
110 req->tw_cli_callback = tw_cli_complete_io;
111
112 req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
113 req->flags |= TW_CLI_REQ_FLAGS_9K;
114
115 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
116
117 /* Build the cmd pkt. */
118 cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
119
120 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
121
122 cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
123 cmd->unit = (TW_UINT8)(scsi_req->unit);
124 cmd->lun_l4__req_id = TW_CL_SWAP16(
125 BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
126 cmd->status = 0;
127 cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
128 tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
129
130 if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
131 TW_UINT32 num_sgl_entries;
132
133 req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
134 &num_sgl_entries);
135 cmd->lun_h4__sgl_entries =
136 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
137 num_sgl_entries));
138 } else {
139 cmd->lun_h4__sgl_entries =
140 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
141 scsi_req->sgl_entries));
142 tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
143 cmd->sg_list, scsi_req->sgl_entries);
144 }
145
146 if ((error = tw_cli_submit_cmd(req))) {
147 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
148 "Could not start request. request = %p, error = %d",
149 req, error);
150 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
151 }
152 return(error);
153 }
154
155
156
157 /*
158 * Function name: tw_cli_submit_cmd
159 * Description: Submits a cmd to firmware.
160 *
161 * Input: req -- ptr to CL internal request context
162 * Output: None
163 * Return value: 0 -- success
164 * non-zero-- failure
165 */
166 TW_INT32
167 tw_cli_submit_cmd(struct tw_cli_req_context *req)
168 {
169 struct tw_cli_ctlr_context *ctlr = req->ctlr;
170 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
171 TW_UINT32 status_reg;
172 TW_INT32 error;
173 TW_UINT8 notify_osl_of_ctlr_busy = TW_CL_FALSE;
174
175 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
176
177 /* Serialize access to the controller cmd queue. */
178 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
179
180 /* For 9650SE first write low 4 bytes */
181 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
182 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
183 tw_osl_write_reg(ctlr_handle,
184 TWA_COMMAND_QUEUE_OFFSET_LOW,
185 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
186
187 /* Check to see if we can post a command. */
188 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
189 if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
190 goto out;
191
192 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
193 struct tw_cl_req_packet *req_pkt =
194 (struct tw_cl_req_packet *)(req->orig_req);
195
196 tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
197 "Cmd queue full");
198
199 if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
200 || ((req_pkt) &&
201 (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
202 ) {
203 if (req->state != TW_CLI_REQ_STATE_PENDING) {
204 tw_cli_dbg_printf(2, ctlr_handle,
205 tw_osl_cur_func(),
206 "pending internal/ioctl request");
207 req->state = TW_CLI_REQ_STATE_PENDING;
208 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
209 error = 0;
210 } else
211 error = TW_OSL_EBUSY;
212 } else {
213 notify_osl_of_ctlr_busy = TW_CL_TRUE;
214 error = TW_OSL_EBUSY;
215 }
216 } else {
217 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
218 "Submitting command");
219
220 /* Insert command into busy queue */
221 req->state = TW_CLI_REQ_STATE_BUSY;
222 tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
223
224 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
225 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
226 /* Now write the high 4 bytes */
227 tw_osl_write_reg(ctlr_handle,
228 TWA_COMMAND_QUEUE_OFFSET_HIGH,
229 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
230 } else {
231 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
232 /* First write the low 4 bytes, then the high 4. */
233 tw_osl_write_reg(ctlr_handle,
234 TWA_COMMAND_QUEUE_OFFSET_LOW,
235 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
236 tw_osl_write_reg(ctlr_handle,
237 TWA_COMMAND_QUEUE_OFFSET_HIGH,
238 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
239 } else
240 tw_osl_write_reg(ctlr_handle,
241 TWA_COMMAND_QUEUE_OFFSET,
242 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
243 }
244 }
245 out:
246 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
247
248 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
249 if (notify_osl_of_ctlr_busy)
250 tw_osl_ctlr_busy(ctlr_handle, req->req_handle);
251
252 /*
253 * Synchronize access between writes to command and control
254 * registers in 64-bit environments, on G66.
255 */
256 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
257 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
258
259 /* Unmask command interrupt. */
260 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
261 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
262
263 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
264 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
265 }
266
267 return(error);
268 }
269
270
271
272 /*
273 * Function name: tw_cl_fw_passthru
274 * Description: Interface to OS Layer for accepting firmware
275 * passthru requests.
276 * Input: ctlr_handle -- controller handle
277 * req_pkt -- OSL built request packet
278 * req_handle -- request handle
279 * Output: None
280 * Return value: 0 -- success
281 * non-zero-- failure
282 */
283 TW_INT32
284 tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
285 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
286 {
287 struct tw_cli_ctlr_context *ctlr;
288 struct tw_cli_req_context *req;
289 union tw_cl_command_7k *cmd_7k;
290 struct tw_cl_command_9k *cmd_9k;
291 struct tw_cl_passthru_req_packet *pt_req;
292 TW_UINT8 opcode;
293 TW_UINT8 sgl_offset;
294 TW_VOID *sgl = TW_CL_NULL;
295 TW_INT32 error;
296
297 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
298
299 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
300
301 if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
302 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
303 "Passthru request during reset: returning busy. "
304 "Ctlr state = 0x%x",
305 ctlr->state);
306 tw_osl_ctlr_busy(ctlr_handle, req_handle);
307 return(TW_OSL_EBUSY);
308 }
309
310 if ((req = tw_cli_get_request(ctlr
311 )) == TW_CL_NULL) {
312 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
313 "Out of request context packets: returning busy");
314 tw_osl_ctlr_busy(ctlr_handle, req_handle);
315 return(TW_OSL_EBUSY);
316 }
317
318 req_handle->cl_req_ctxt = req;
319 req->req_handle = req_handle;
320 req->orig_req = req_pkt;
321 req->tw_cli_callback = tw_cli_complete_io;
322
323 req->flags |= (TW_CLI_REQ_FLAGS_EXTERNAL | TW_CLI_REQ_FLAGS_PASSTHRU);
324
325 pt_req = &(req_pkt->gen_req_pkt.pt_req);
326
327 tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
328 pt_req->cmd_pkt_length);
329 /* Build the cmd pkt. */
330 if ((opcode = GET_OPCODE(((TW_UINT8 *)
331 (pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
332 == TWA_FW_CMD_EXECUTE_SCSI) {
333 TW_UINT16 lun_l4, lun_h4;
334
335 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
336 "passthru: 9k cmd pkt");
337 req->flags |= TW_CLI_REQ_FLAGS_9K;
338 cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
339 lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
340 lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
341 cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
342 BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
343 if (pt_req->sgl_entries) {
344 cmd_9k->lun_h4__sgl_entries =
345 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
346 pt_req->sgl_entries));
347 sgl = (TW_VOID *)(cmd_9k->sg_list);
348 }
349 } else {
350 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
351 "passthru: 7k cmd pkt");
352 cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
353 cmd_7k->generic.request_id =
354 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
355 if ((sgl_offset =
356 GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
357 if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
358 sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
359 else
360 sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
361 cmd_7k->generic.size += pt_req->sgl_entries *
362 ((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
363 }
364 }
365
366 if (sgl)
367 tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
368 sgl, pt_req->sgl_entries);
369
370 if ((error = tw_cli_submit_cmd(req))) {
371 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
372 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
373 0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
374 "Failed to start passthru command",
375 "error = %d", error);
376 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
377 }
378 return(error);
379 }
380
381
382
383 /*
384 * Function name: tw_cl_ioctl
385 * Description: Handler of CL supported ioctl cmds.
386 *
387 * Input: ctlr -- ptr to per ctlr structure
388 * cmd -- ioctl cmd
389 * buf -- ptr to buffer in kernel memory, which is
390 * a copy of the input buffer in user-space
391 * Output: buf -- ptr to buffer in kernel memory, which will
392 * need to be copied to the output buffer in
393 * user-space
394 * Return value: 0 -- success
395 * non-zero-- failure
396 */
397 TW_INT32
398 tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, TW_INT32 cmd, TW_VOID *buf)
399 {
400 struct tw_cli_ctlr_context *ctlr =
401 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
402 struct tw_cl_ioctl_packet *user_buf =
403 (struct tw_cl_ioctl_packet *)buf;
404 struct tw_cl_event_packet event_buf;
405 TW_INT32 event_index;
406 TW_INT32 start_index;
407 TW_INT32 error = TW_OSL_ESUCCESS;
408
409 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
410
411 /* Serialize access to the AEN queue and the ioctl lock. */
412 tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
413
414 switch (cmd) {
415 case TW_CL_IOCTL_GET_FIRST_EVENT:
416 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
417 "Get First Event");
418
419 if (ctlr->aen_q_wrapped) {
420 if (ctlr->aen_q_overflow) {
421 /*
422 * The aen queue has wrapped, even before some
423 * events have been retrieved. Let the caller
424 * know that he missed out on some AEN's.
425 */
426 user_buf->driver_pkt.status =
427 TW_CL_ERROR_AEN_OVERFLOW;
428 ctlr->aen_q_overflow = TW_CL_FALSE;
429 } else
430 user_buf->driver_pkt.status = 0;
431 event_index = ctlr->aen_head;
432 } else {
433 if (ctlr->aen_head == ctlr->aen_tail) {
434 user_buf->driver_pkt.status =
435 TW_CL_ERROR_AEN_NO_EVENTS;
436 break;
437 }
438 user_buf->driver_pkt.status = 0;
439 event_index = ctlr->aen_tail; /* = 0 */
440 }
441 tw_osl_memcpy(user_buf->data_buf,
442 &(ctlr->aen_queue[event_index]),
443 sizeof(struct tw_cl_event_packet));
444
445 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
446
447 break;
448
449
450 case TW_CL_IOCTL_GET_LAST_EVENT:
451 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
452 "Get Last Event");
453
454 if (ctlr->aen_q_wrapped) {
455 if (ctlr->aen_q_overflow) {
456 /*
457 * The aen queue has wrapped, even before some
458 * events have been retrieved. Let the caller
459 * know that he missed out on some AEN's.
460 */
461 user_buf->driver_pkt.status =
462 TW_CL_ERROR_AEN_OVERFLOW;
463 ctlr->aen_q_overflow = TW_CL_FALSE;
464 } else
465 user_buf->driver_pkt.status = 0;
466 } else {
467 if (ctlr->aen_head == ctlr->aen_tail) {
468 user_buf->driver_pkt.status =
469 TW_CL_ERROR_AEN_NO_EVENTS;
470 break;
471 }
472 user_buf->driver_pkt.status = 0;
473 }
474 event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
475 ctlr->max_aens_supported;
476
477 tw_osl_memcpy(user_buf->data_buf,
478 &(ctlr->aen_queue[event_index]),
479 sizeof(struct tw_cl_event_packet));
480
481 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
482
483 break;
484
485
486 case TW_CL_IOCTL_GET_NEXT_EVENT:
487 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
488 "Get Next Event");
489
490 user_buf->driver_pkt.status = 0;
491 if (ctlr->aen_q_wrapped) {
492 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
493 "Get Next Event: wrapped");
494 if (ctlr->aen_q_overflow) {
495 /*
496 * The aen queue has wrapped, even before some
497 * events have been retrieved. Let the caller
498 * know that he missed out on some AEN's.
499 */
500 tw_cli_dbg_printf(2, ctlr_handle,
501 tw_osl_cur_func(),
502 "Get Next Event: overflow");
503 user_buf->driver_pkt.status =
504 TW_CL_ERROR_AEN_OVERFLOW;
505 ctlr->aen_q_overflow = TW_CL_FALSE;
506 }
507 start_index = ctlr->aen_head;
508 } else {
509 if (ctlr->aen_head == ctlr->aen_tail) {
510 tw_cli_dbg_printf(3, ctlr_handle,
511 tw_osl_cur_func(),
512 "Get Next Event: empty queue");
513 user_buf->driver_pkt.status =
514 TW_CL_ERROR_AEN_NO_EVENTS;
515 break;
516 }
517 start_index = ctlr->aen_tail; /* = 0 */
518 }
519 tw_osl_memcpy(&event_buf, user_buf->data_buf,
520 sizeof(struct tw_cl_event_packet));
521
522 event_index = (start_index + event_buf.sequence_id -
523 ctlr->aen_queue[start_index].sequence_id + 1) %
524 ctlr->max_aens_supported;
525
526 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
527 "Get Next Event: si = %x, ei = %x, ebsi = %x, "
528 "sisi = %x, eisi = %x",
529 start_index, event_index, event_buf.sequence_id,
530 ctlr->aen_queue[start_index].sequence_id,
531 ctlr->aen_queue[event_index].sequence_id);
532
533 if (! (ctlr->aen_queue[event_index].sequence_id >
534 event_buf.sequence_id)) {
535 /*
536 * We don't have any event matching the criterion. So,
537 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
538 * encountered an overflow condition above, we cannot
539 * report both conditions during this call. We choose
540 * to report NO_EVENTS this time, and an overflow the
541 * next time we are called.
542 */
543 if (user_buf->driver_pkt.status ==
544 TW_CL_ERROR_AEN_OVERFLOW) {
545 /*
546 * Make a note so we report the overflow
547 * next time.
548 */
549 ctlr->aen_q_overflow = TW_CL_TRUE;
550 }
551 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
552 break;
553 }
554 /* Copy the event -- even if there has been an overflow. */
555 tw_osl_memcpy(user_buf->data_buf,
556 &(ctlr->aen_queue[event_index]),
557 sizeof(struct tw_cl_event_packet));
558
559 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
560
561 break;
562
563
564 case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
565 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
566 "Get Previous Event");
567
568 user_buf->driver_pkt.status = 0;
569 if (ctlr->aen_q_wrapped) {
570 if (ctlr->aen_q_overflow) {
571 /*
572 * The aen queue has wrapped, even before some
573 * events have been retrieved. Let the caller
574 * know that he missed out on some AEN's.
575 */
576 user_buf->driver_pkt.status =
577 TW_CL_ERROR_AEN_OVERFLOW;
578 ctlr->aen_q_overflow = TW_CL_FALSE;
579 }
580 start_index = ctlr->aen_head;
581 } else {
582 if (ctlr->aen_head == ctlr->aen_tail) {
583 user_buf->driver_pkt.status =
584 TW_CL_ERROR_AEN_NO_EVENTS;
585 break;
586 }
587 start_index = ctlr->aen_tail; /* = 0 */
588 }
589 tw_osl_memcpy(&event_buf, user_buf->data_buf,
590 sizeof(struct tw_cl_event_packet));
591
592 event_index = (start_index + event_buf.sequence_id -
593 ctlr->aen_queue[start_index].sequence_id - 1) %
594 ctlr->max_aens_supported;
595
596 if (! (ctlr->aen_queue[event_index].sequence_id <
597 event_buf.sequence_id)) {
598 /*
599 * We don't have any event matching the criterion. So,
600 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
601 * encountered an overflow condition above, we cannot
602 * report both conditions during this call. We choose
603 * to report NO_EVENTS this time, and an overflow the
604 * next time we are called.
605 */
606 if (user_buf->driver_pkt.status ==
607 TW_CL_ERROR_AEN_OVERFLOW) {
608 /*
609 * Make a note so we report the overflow
610 * next time.
611 */
612 ctlr->aen_q_overflow = TW_CL_TRUE;
613 }
614 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
615 break;
616 }
617 /* Copy the event -- even if there has been an overflow. */
618 tw_osl_memcpy(user_buf->data_buf,
619 &(ctlr->aen_queue[event_index]),
620 sizeof(struct tw_cl_event_packet));
621
622 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
623
624 break;
625
626
627 case TW_CL_IOCTL_GET_LOCK:
628 {
629 struct tw_cl_lock_packet lock_pkt;
630 TW_TIME cur_time;
631
632 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
633 "Get ioctl lock");
634
635 cur_time = tw_osl_get_local_time();
636 tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
637 sizeof(struct tw_cl_lock_packet));
638
639 if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
640 (lock_pkt.force_flag) ||
641 (cur_time >= ctlr->ioctl_lock.timeout)) {
642 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
643 "GET_LOCK: Getting lock!");
644 ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
645 ctlr->ioctl_lock.timeout =
646 cur_time + (lock_pkt.timeout_msec / 1000);
647 lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
648 user_buf->driver_pkt.status = 0;
649 } else {
650 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
651 "GET_LOCK: Lock already held!");
652 lock_pkt.time_remaining_msec = (TW_UINT32)(
653 (ctlr->ioctl_lock.timeout - cur_time) * 1000);
654 user_buf->driver_pkt.status =
655 TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
656 }
657 tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
658 sizeof(struct tw_cl_lock_packet));
659 break;
660 }
661
662
663 case TW_CL_IOCTL_RELEASE_LOCK:
664 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
665 "Release ioctl lock");
666
667 if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
668 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
669 "twa_ioctl: RELEASE_LOCK: Lock not held!");
670 user_buf->driver_pkt.status =
671 TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
672 } else {
673 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
674 "RELEASE_LOCK: Releasing lock!");
675 ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
676 user_buf->driver_pkt.status = 0;
677 }
678 break;
679
680
681 case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
682 {
683 struct tw_cl_compatibility_packet comp_pkt;
684
685 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
686 "Get compatibility info");
687
688 tw_osl_memcpy(comp_pkt.driver_version,
689 TW_OSL_DRIVER_VERSION_STRING,
690 sizeof(TW_OSL_DRIVER_VERSION_STRING));
691 comp_pkt.working_srl = ctlr->working_srl;
692 comp_pkt.working_branch = ctlr->working_branch;
693 comp_pkt.working_build = ctlr->working_build;
694 comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
695 comp_pkt.driver_branch_high =
696 TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
697 comp_pkt.driver_build_high =
698 TWA_CURRENT_FW_BUILD(ctlr->arch_id);
699 comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
700 comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
701 comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
702 comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
703 comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
704 comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
705 user_buf->driver_pkt.status = 0;
706
707 /* Copy compatibility information to user space. */
708 tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
709 (sizeof(struct tw_cl_compatibility_packet) <
710 user_buf->driver_pkt.buffer_length) ?
711 sizeof(struct tw_cl_compatibility_packet) :
712 user_buf->driver_pkt.buffer_length);
713 break;
714 }
715
716 default:
717 /* Unknown opcode. */
718 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
719 "Unknown ioctl cmd 0x%x", cmd);
720 error = TW_OSL_ENOTTY;
721 }
722
723 tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
724 return(error);
725 }
726
727
728
729 /*
730 * Function name: tw_cli_get_param
731 * Description: Get a firmware parameter.
732 *
733 * Input: ctlr -- ptr to per ctlr structure
734 * table_id -- parameter table #
735 * param_id -- index of the parameter in the table
736 * param_size -- size of the parameter in bytes
737 * callback -- ptr to function, if any, to be called
738 * back on completion; TW_CL_NULL if no callback.
739 * Output: param_data -- param value
740 * Return value: 0 -- success
741 * non-zero-- failure
742 */
743 TW_INT32
744 tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
745 TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
746 TW_VOID (* callback)(struct tw_cli_req_context *req))
747 {
748 struct tw_cli_req_context *req;
749 union tw_cl_command_7k *cmd;
750 struct tw_cl_param_9k *param = TW_CL_NULL;
751 TW_INT32 error = TW_OSL_EBUSY;
752
753 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
754
755 /* Get a request packet. */
756 if ((req = tw_cli_get_request(ctlr
757 )) == TW_CL_NULL)
758 goto out;
759
760 /* Make sure this is the only CL internal request at this time. */
761 if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
762 error = TW_OSL_EBUSY;
763 goto out;
764 }
765 ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
766 req->data = ctlr->internal_req_data;
767 req->data_phys = ctlr->internal_req_data_phys;
768 req->length = TW_CLI_SECTOR_SIZE;
769 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
770
771 /* Initialize memory to read data into. */
772 param = (struct tw_cl_param_9k *)(req->data);
773 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
774
775 /* Build the cmd pkt. */
776 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
777
778 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
779
780 cmd->param.sgl_off__opcode =
781 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
782 cmd->param.request_id =
783 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
784 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
785 cmd->param.param_count = TW_CL_SWAP16(1);
786
787 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
788 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
789 TW_CL_SWAP64(req->data_phys);
790 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
791 TW_CL_SWAP32(req->length);
792 cmd->param.size = 2 + 3;
793 } else {
794 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
795 TW_CL_SWAP32(req->data_phys);
796 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
797 TW_CL_SWAP32(req->length);
798 cmd->param.size = 2 + 2;
799 }
800
801 /* Specify which parameter we need. */
802 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
803 param->parameter_id = (TW_UINT8)(param_id);
804 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
805
806 /* Submit the command. */
807 if (callback == TW_CL_NULL) {
808 /* There's no call back; wait till the command completes. */
809 error = tw_cli_submit_and_poll_request(req,
810 TW_CLI_REQUEST_TIMEOUT_PERIOD);
811 if (error == TW_OSL_ETIMEDOUT)
812 /* Clean-up done by tw_cli_submit_and_poll_request. */
813 return(error);
814 if (error)
815 goto out;
816 if ((error = cmd->param.status)) {
817 tw_cli_create_ctlr_event(ctlr,
818 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
819 &(req->cmd_pkt->cmd_hdr));
820 goto out;
821 }
822 tw_osl_memcpy(param_data, param->data, param_size);
823 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
824 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
825 } else {
826 /* There's a call back. Simply submit the command. */
827 req->tw_cli_callback = callback;
828 if ((error = tw_cli_submit_cmd(req)))
829 goto out;
830 }
831 return(0);
832
833 out:
834 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
835 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
836 0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
837 "get_param failed",
838 "error = %d", error);
839 if (param)
840 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
841 if (req)
842 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
843 return(1);
844 }
845
846
847
848 /*
849 * Function name: tw_cli_set_param
850 * Description: Set a firmware parameter.
851 *
852 * Input: ctlr -- ptr to per ctlr structure
853 * table_id -- parameter table #
854 * param_id -- index of the parameter in the table
855 * param_size -- size of the parameter in bytes
856 * callback -- ptr to function, if any, to be called
857 * back on completion; TW_CL_NULL if no callback.
858 * Output: None
859 * Return value: 0 -- success
860 * non-zero-- failure
861 */
862 TW_INT32
863 tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
864 TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
865 TW_VOID (* callback)(struct tw_cli_req_context *req))
866 {
867 struct tw_cli_req_context *req;
868 union tw_cl_command_7k *cmd;
869 struct tw_cl_param_9k *param = TW_CL_NULL;
870 TW_INT32 error = TW_OSL_EBUSY;
871
872 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
873
874 /* Get a request packet. */
875 if ((req = tw_cli_get_request(ctlr
876 )) == TW_CL_NULL)
877 goto out;
878
879 /* Make sure this is the only CL internal request at this time. */
880 if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
881 error = TW_OSL_EBUSY;
882 goto out;
883 }
884 ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
885 req->data = ctlr->internal_req_data;
886 req->data_phys = ctlr->internal_req_data_phys;
887 req->length = TW_CLI_SECTOR_SIZE;
888 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
889
890 /* Initialize memory to send data using. */
891 param = (struct tw_cl_param_9k *)(req->data);
892 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
893
894 /* Build the cmd pkt. */
895 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
896
897 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
898
899 cmd->param.sgl_off__opcode =
900 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
901 cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
902 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
903 cmd->param.param_count = TW_CL_SWAP16(1);
904
905 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
906 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
907 TW_CL_SWAP64(req->data_phys);
908 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
909 TW_CL_SWAP32(req->length);
910 cmd->param.size = 2 + 3;
911 } else {
912 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
913 TW_CL_SWAP32(req->data_phys);
914 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
915 TW_CL_SWAP32(req->length);
916 cmd->param.size = 2 + 2;
917 }
918
919 /* Specify which parameter we want to set. */
920 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
921 param->parameter_id = (TW_UINT8)(param_id);
922 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
923 tw_osl_memcpy(param->data, data, param_size);
924
925 /* Submit the command. */
926 if (callback == TW_CL_NULL) {
927 /* There's no call back; wait till the command completes. */
928 error = tw_cli_submit_and_poll_request(req,
929 TW_CLI_REQUEST_TIMEOUT_PERIOD);
930 if (error == TW_OSL_ETIMEDOUT)
931 /* Clean-up done by tw_cli_submit_and_poll_request. */
932 return(error);
933 if (error)
934 goto out;
935 if ((error = cmd->param.status)) {
936 tw_cli_create_ctlr_event(ctlr,
937 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
938 &(req->cmd_pkt->cmd_hdr));
939 goto out;
940 }
941 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
942 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
943 } else {
944 /* There's a call back. Simply submit the command. */
945 req->tw_cli_callback = callback;
946 if ((error = tw_cli_submit_cmd(req)))
947 goto out;
948 }
949 return(error);
950
951 out:
952 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
953 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
954 0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
955 "set_param failed",
956 "error = %d", error);
957 if (param)
958 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
959 if (req)
960 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
961 return(error);
962 }
963
964
965
966 /*
967 * Function name: tw_cli_submit_and_poll_request
968 * Description: Sends down a firmware cmd, and waits for the completion
969 * in a tight loop.
970 *
971 * Input: req -- ptr to request pkt
972 * timeout -- max # of seconds to wait before giving up
973 * Output: None
974 * Return value: 0 -- success
975 * non-zero-- failure
976 */
977 TW_INT32
978 tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
979 TW_UINT32 timeout)
980 {
981 struct tw_cli_ctlr_context *ctlr = req->ctlr;
982 TW_TIME end_time;
983 TW_INT32 error;
984
985 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
986
987 /*
988 * If the cmd queue is full, tw_cli_submit_cmd will queue this
989 * request in the pending queue, since this is an internal request.
990 */
991 if ((error = tw_cli_submit_cmd(req))) {
992 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
993 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
994 0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
995 "Failed to start internal request",
996 "error = %d", error);
997 return(error);
998 }
999
1000 /*
1001 * Poll for the response until the command gets completed, or there's
1002 * a timeout.
1003 */
1004 end_time = tw_osl_get_local_time() + timeout;
1005 do {
1006 if ((error = req->error_code))
1007 /*
1008 * This will take care of completion due to a reset,
1009 * or a failure in tw_cli_submit_pending_queue.
1010 * The caller should do the clean-up.
1011 */
1012 return(error);
1013
1014 /* See if the command completed. */
1015 tw_cli_process_resp_intr(ctlr);
1016
1017 if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
1018 (req->state != TW_CLI_REQ_STATE_PENDING))
1019 return(req->state != TW_CLI_REQ_STATE_COMPLETE);
1020 } while (tw_osl_get_local_time() <= end_time);
1021
1022 /* Time out! */
1023 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1024 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1025 0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1026 "Internal request timed out",
1027 "request = %p", req);
1028
1029 /*
1030 * We will reset the controller only if the request has already been
1031 * submitted, so as to not lose the request packet. If a busy request
1032 * timed out, the reset will take care of freeing resources. If a
1033 * pending request timed out, we will free resources for that request,
1034 * right here, thereby avoiding a reset. So, the caller is expected
1035 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1036 */
1037
1038 /*
1039 * We have to make sure that this timed out request, if it were in the
1040 * pending queue, doesn't get submitted while we are here, from
1041 * tw_cli_submit_pending_queue. There could be a race in that case.
1042 * Need to revisit.
1043 */
1044 if (req->state != TW_CLI_REQ_STATE_PENDING)
1045 tw_cl_reset_ctlr(ctlr->ctlr_handle);
1046 else {
1047 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1048 "Removing request from pending queue");
1049 /*
1050 * Request was never submitted. Clean up. Note that we did
1051 * not do a reset. So, we have to remove the request ourselves
1052 * from the pending queue (as against tw_cli_drain_pendinq_queue
1053 * taking care of it).
1054 */
1055 tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1056 if (req->data)
1057 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1058 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1059 }
1060
1061 return(TW_OSL_ETIMEDOUT);
1062 }
1063
1064
1065
1066 /*
1067 * Function name: tw_cl_reset_ctlr
1068 * Description: Soft resets and then initializes the controller;
1069 * drains any incomplete requests.
1070 *
1071 * Input: ctlr -- ptr to per ctlr structure
1072 * Output: None
1073 * Return value: 0 -- success
1074 * non-zero-- failure
1075 */
1076 TW_INT32
1077 tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1078 {
1079 struct tw_cli_ctlr_context *ctlr =
1080 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1081 TW_INT32 reset_attempt = 1;
1082 TW_INT32 error;
1083
1084 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1085
1086 ctlr->state |= TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
1087
1088 /*
1089 * Error back all requests in the complete, busy, and pending queues.
1090 * If any request is already on its way to getting submitted, it's in
1091 * none of these queues and so, will not be completed. That request
1092 * will continue its course and get submitted to the controller after
1093 * the reset is done (and io_lock is released).
1094 */
1095 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
1096 "Draining all queues following reset");
1097 tw_cli_drain_complete_queue(ctlr);
1098 tw_cli_drain_busy_queue(ctlr);
1099 tw_cli_drain_pending_queue(ctlr);
1100
1101 tw_cli_disable_interrupts(ctlr);
1102
1103 /* Soft reset the controller. */
1104 try_reset:
1105 if ((error = tw_cli_soft_reset(ctlr))) {
1106 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1107 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1108 0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1109 "Controller reset failed",
1110 "error = %d; attempt %d", error, reset_attempt++);
1111 if (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS)
1112 goto try_reset;
1113 else
1114 goto out;
1115 }
1116
1117 /* Re-establish logical connection with the controller. */
1118 if ((error = tw_cli_init_connection(ctlr,
1119 (TW_UINT16)(ctlr->max_simult_reqs),
1120 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1121 TW_CL_NULL, TW_CL_NULL))) {
1122 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1123 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1124 0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1125 "Can't initialize connection after reset",
1126 "error = %d", error);
1127 goto out;
1128 }
1129
1130 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1131 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1132 0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1133 "Controller reset done!",
1134 " ");
1135
1136 out:
1137 ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
1138 /*
1139 * Enable interrupts, and also clear attention and response interrupts.
1140 */
1141 tw_cli_enable_interrupts(ctlr);
1142
1143 /* Request for a bus re-scan. */
1144 if (!error)
1145 tw_osl_scan_bus(ctlr_handle);
1146 return(error);
1147 }
1148
1149
1150
1151 /*
1152 * Function name: tw_cli_soft_reset
1153 * Description: Does the actual soft reset.
1154 *
1155 * Input: ctlr -- ptr to per ctlr structure
1156 * Output: None
1157 * Return value: 0 -- success
1158 * non-zero-- failure
1159 */
1160 TW_INT32
1161 tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1162 {
1163 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
1164 TW_UINT32 status_reg;
1165 TW_UINT32 error;
1166
1167 tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1168
1169 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1170 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1171 0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1172 "Resetting controller...",
1173 " ");
1174
1175 /* Don't let any new commands get submitted to the controller. */
1176 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1177
1178 TW_CLI_SOFT_RESET(ctlr_handle);
1179
1180 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1181 (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1182 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1183 /*
1184 * There's a hardware bug in the G133 ASIC, which can lead to
1185 * PCI parity errors and hangs, if the host accesses any
1186 * registers when the firmware is resetting the hardware, as
1187 * part of a hard/soft reset. The window of time when the
1188 * problem can occur is about 10 ms. Here, we will handshake
1189 * with the firmware to find out when the firmware is pulling
1190 * down the hardware reset pin, and wait for about 500 ms to
1191 * make sure we don't access any hardware registers (for
1192 * polling) during that window.
1193 */
1194 ctlr->state |= TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
1195 while (tw_cli_find_response(ctlr,
1196 TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) != TW_OSL_ESUCCESS)
1197 tw_osl_delay(10);
1198 tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1199 ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
1200 }
1201
1202 if ((error = tw_cli_poll_status(ctlr,
1203 TWA_STATUS_MICROCONTROLLER_READY |
1204 TWA_STATUS_ATTENTION_INTERRUPT,
1205 TW_CLI_RESET_TIMEOUT_PERIOD))) {
1206 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1207 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1208 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1209 "Micro-ctlr not ready/No attn intr after reset",
1210 "error = %d", error);
1211 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1212 return(error);
1213 }
1214
1215 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1216 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1217
1218 if ((error = tw_cli_drain_response_queue(ctlr))) {
1219 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1220 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1221 0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1222 "Can't drain response queue after reset",
1223 "error = %d", error);
1224 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1225 return(error);
1226 }
1227
1228 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1229
1230 if ((error = tw_cli_drain_aen_queue(ctlr))) {
1231 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1232 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1233 0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1234 "Can't drain AEN queue after reset",
1235 "error = %d", error);
1236 return(error);
1237 }
1238
1239 if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1240 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1241 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1242 0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1243 "Reset not reported by controller",
1244 "error = %d", error);
1245 return(error);
1246 }
1247
1248 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
1249
1250 if ((error = TW_CLI_STATUS_ERRORS(status_reg)) ||
1251 (error = tw_cli_check_ctlr_state(ctlr, status_reg))) {
1252 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1253 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1254 0x110D, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1255 "Controller errors detected after reset",
1256 "error = %d", error);
1257 return(error);
1258 }
1259
1260 return(TW_OSL_ESUCCESS);
1261 }
1262
1263
1264
1265 /*
1266 * Function name: tw_cli_send_scsi_cmd
1267 * Description: Sends down a scsi cmd to fw.
1268 *
1269 * Input: req -- ptr to request pkt
1270 * cmd -- opcode of scsi cmd to send
1271 * Output: None
1272 * Return value: 0 -- success
1273 * non-zero-- failure
1274 */
1275 TW_INT32
1276 tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1277 {
1278 struct tw_cl_command_packet *cmdpkt;
1279 struct tw_cl_command_9k *cmd9k;
1280 struct tw_cli_ctlr_context *ctlr;
1281 TW_INT32 error;
1282
1283 ctlr = req->ctlr;
1284 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1285
1286 /* Make sure this is the only CL internal request at this time. */
1287 if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY)
1288 return(TW_OSL_EBUSY);
1289 ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1290 req->data = ctlr->internal_req_data;
1291 req->data_phys = ctlr->internal_req_data_phys;
1292 tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1293 req->length = TW_CLI_SECTOR_SIZE;
1294
1295 /* Build the cmd pkt. */
1296 cmdpkt = req->cmd_pkt;
1297
1298 cmdpkt->cmd_hdr.header_desc.size_header = 128;
1299
1300 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1301
1302 cmd9k->res__opcode =
1303 BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1304 cmd9k->unit = 0;
1305 cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1306 cmd9k->status = 0;
1307 cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1308 cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1309
1310 if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1311 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1312 TW_CL_SWAP64(req->data_phys);
1313 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1314 TW_CL_SWAP32(req->length);
1315 } else {
1316 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1317 TW_CL_SWAP32(req->data_phys);
1318 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1319 TW_CL_SWAP32(req->length);
1320 }
1321
1322 cmd9k->cdb[0] = (TW_UINT8)cmd;
1323 cmd9k->cdb[4] = 128;
1324
1325 if ((error = tw_cli_submit_cmd(req)))
1326 if (error != TW_OSL_EBUSY) {
1327 tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1328 tw_osl_cur_func(),
1329 "Failed to start SCSI command",
1330 "request = %p, error = %d", req, error);
1331 return(TW_OSL_EIO);
1332 }
1333 return(TW_OSL_ESUCCESS);
1334 }
1335
1336
1337
1338 /*
1339 * Function name: tw_cli_get_aen
1340 * Description: Sends down a Request Sense cmd to fw to fetch an AEN.
1341 *
1342 * Input: ctlr -- ptr to per ctlr structure
1343 * Output: None
1344 * Return value: 0 -- success
1345 * non-zero-- failure
1346 */
1347 TW_INT32
1348 tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1349 {
1350 struct tw_cli_req_context *req;
1351 TW_INT32 error;
1352
1353 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1354
1355 if ((req = tw_cli_get_request(ctlr
1356 )) == TW_CL_NULL)
1357 return(TW_OSL_EBUSY);
1358
1359 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1360 req->flags |= TW_CLI_REQ_FLAGS_9K;
1361 req->tw_cli_callback = tw_cli_aen_callback;
1362 if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1363 tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1364 "Could not send SCSI command",
1365 "request = %p, error = %d", req, error);
1366 if (req->data)
1367 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1368 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1369 }
1370 return(error);
1371 }
1372
1373
1374
1375 /*
1376 * Function name: tw_cli_fill_sg_list
1377 * Description: Fills in the scatter/gather list.
1378 *
1379 * Input: ctlr -- ptr to per ctlr structure
1380 * sgl_src -- ptr to fill the sg list from
1381 * sgl_dest-- ptr to sg list
1382 * nsegments--# of segments
1383 * Output: None
1384 * Return value: None
1385 */
1386 TW_VOID
1387 tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1388 TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1389 {
1390 TW_INT32 i;
1391
1392 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1393
1394 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1395 struct tw_cl_sg_desc64 *sgl_s =
1396 (struct tw_cl_sg_desc64 *)sgl_src;
1397 struct tw_cl_sg_desc64 *sgl_d =
1398 (struct tw_cl_sg_desc64 *)sgl_dest;
1399
1400 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1401 "64 bit addresses");
1402 for (i = 0; i < num_sgl_entries; i++) {
1403 sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1404 sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1405 sgl_s++;
1406 if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1407 sgl_s = (struct tw_cl_sg_desc64 *)
1408 (((TW_INT8 *)(sgl_s)) + 4);
1409 }
1410 } else {
1411 struct tw_cl_sg_desc32 *sgl_s =
1412 (struct tw_cl_sg_desc32 *)sgl_src;
1413 struct tw_cl_sg_desc32 *sgl_d =
1414 (struct tw_cl_sg_desc32 *)sgl_dest;
1415
1416 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1417 "32 bit addresses");
1418 for (i = 0; i < num_sgl_entries; i++) {
1419 sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1420 sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1421 }
1422 }
1423 }
1424
Cache object: e8a114c506dab25828d5fa4a2ce1a43c
|