1 /*
2 * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/6.2/sys/dev/twa/tw_cl_init.c 153207 2005-12-07 18:18:06Z vkashyap $
28 */
29
30 /*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 */
35
36
37 /*
38 * Common Layer initialization functions.
39 */
40
41
42 #include "tw_osl_share.h"
43 #include "tw_cl_share.h"
44 #include "tw_cl_fwif.h"
45 #include "tw_cl_ioctl.h"
46 #include "tw_cl.h"
47 #include "tw_cl_externs.h"
48 #include "tw_osl_ioctl.h"
49
50
51 /*
52 * Function name: tw_cl_ctlr_supported
53 * Description: Determines if a controller is supported.
54 *
55 * Input: vendor_id -- vendor id of the controller
56 * device_id -- device id of the controller
57 * Output: None
58 * Return value: TW_CL_TRUE-- controller supported
59 * TW_CL_FALSE-- controller not supported
60 */
61 TW_INT32
62 tw_cl_ctlr_supported(TW_INT32 vendor_id, TW_INT32 device_id)
63 {
64 if ((vendor_id == TW_CL_VENDOR_ID) &&
65 ((device_id == TW_CL_DEVICE_ID_9K) ||
66 (device_id == TW_CL_DEVICE_ID_9K_X)))
67 return(TW_CL_TRUE);
68 return(TW_CL_FALSE);
69 }
70
71
72
73 /*
74 * Function name: tw_cl_get_pci_bar_info
75 * Description: Returns PCI BAR info.
76 *
77 * Input: device_id -- device id of the controller
78 * bar_type -- type of PCI BAR in question
79 * Output: bar_num -- PCI BAR number corresponding to bar_type
80 * bar0_offset -- byte offset from BAR 0 (0x10 in
81 * PCI config space)
82 * bar_size -- size, in bytes, of the BAR in question
83 * Return value: 0 -- success
84 * non-zero -- failure
85 */
86 TW_INT32
87 tw_cl_get_pci_bar_info(TW_INT32 device_id, TW_INT32 bar_type,
88 TW_INT32 *bar_num, TW_INT32 *bar0_offset, TW_INT32 *bar_size)
89 {
90 TW_INT32 error = TW_OSL_ESUCCESS;
91
92 switch(device_id) {
93 case TW_CL_DEVICE_ID_9K:
94 switch(bar_type) {
95 case TW_CL_BAR_TYPE_IO:
96 *bar_num = 0;
97 *bar0_offset = 0;
98 *bar_size = 4;
99 break;
100
101 case TW_CL_BAR_TYPE_MEM:
102 *bar_num = 1;
103 *bar0_offset = 0x4;
104 *bar_size = 8;
105 break;
106
107 case TW_CL_BAR_TYPE_SBUF:
108 *bar_num = 2;
109 *bar0_offset = 0xC;
110 *bar_size = 8;
111 break;
112 }
113 break;
114
115 case TW_CL_DEVICE_ID_9K_X:
116 switch(bar_type) {
117 case TW_CL_BAR_TYPE_IO:
118 *bar_num = 2;
119 *bar0_offset = 0x10;
120 *bar_size = 4;
121 break;
122
123 case TW_CL_BAR_TYPE_MEM:
124 *bar_num = 1;
125 *bar0_offset = 0x8;
126 *bar_size = 8;
127 break;
128
129 case TW_CL_BAR_TYPE_SBUF:
130 *bar_num = 0;
131 *bar0_offset = 0;
132 *bar_size = 8;
133 break;
134 }
135 break;
136
137 default:
138 error = TW_OSL_ENOTTY;
139 break;
140 }
141
142 return(error);
143 }
144
145
146
147 /*
148 * Function name: tw_cl_get_mem_requirements
149 * Description: Provides info about Common Layer requirements for a
150 * controller, given the controller type (in 'flags').
151 * Input: ctlr_handle -- controller handle
152 * flags -- more info passed by the OS Layer
153 * device_id -- device id of the controller
154 * max_simult_reqs -- maximum # of simultaneous
155 * requests that the OS Layer expects
156 * the Common Layer to support
157 * max_aens -- maximun # of AEN's needed to be supported
158 * Output: alignment -- alignment needed for all DMA'able
159 * buffers
160 * sg_size_factor -- every SG element should have a size
161 * that's a multiple of this number
162 * non_dma_mem_size -- # of bytes of memory needed for
163 * non-DMA purposes
164 * dma_mem_size -- # of bytes of DMA'able memory needed
165 * flash_dma_mem_size -- # of bytes of DMA'able memory
166 * needed for firmware flash, if applicable
167 * per_req_dma_mem_size -- # of bytes of DMA'able memory
168 * needed per request, if applicable
169 * per_req_non_dma_mem_size -- # of bytes of memory needed
170 * per request for non-DMA purposes,
171 * if applicable
172 * Output: None
173 * Return value: 0 -- success
174 * non-zero-- failure
175 */
176 TW_INT32
177 tw_cl_get_mem_requirements(struct tw_cl_ctlr_handle *ctlr_handle,
178 TW_UINT32 flags, TW_INT32 device_id, TW_INT32 max_simult_reqs,
179 TW_INT32 max_aens, TW_UINT32 *alignment, TW_UINT32 *sg_size_factor,
180 TW_UINT32 *non_dma_mem_size, TW_UINT32 *dma_mem_size
181 #ifdef TW_OSL_FLASH_FIRMWARE
182 , TW_UINT32 *flash_dma_mem_size
183 #endif /* TW_OSL_FLASH_FIRMWARE */
184 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
185 , TW_UINT32 *per_req_dma_mem_size
186 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
187 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
188 , TW_UINT32 *per_req_non_dma_mem_size
189 #endif /* TW_OSL_N0N_DMA_MEM_ALLOC_PER_REQUEST */
190 )
191 {
192 if (device_id == 0)
193 device_id = TW_CL_DEVICE_ID_9K;
194
195 if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
196 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
197 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
198 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
199 "Too many simultaneous requests to support!",
200 "requested = %d, supported = %d, error = %d\n",
201 max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
202 TW_OSL_EBIG);
203 return(TW_OSL_EBIG);
204 }
205
206 *alignment = TWA_ALIGNMENT(device_id);
207 *sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
208
209 /*
210 * Total non-DMA memory needed is the sum total of memory needed for
211 * the controller context, request packets (including the 1 needed for
212 * CL internal requests), and event packets.
213 */
214 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
215
216 *non_dma_mem_size = sizeof(struct tw_cli_ctlr_context) +
217 (sizeof(struct tw_cli_req_context)) +
218 (sizeof(struct tw_cl_event_packet) * max_aens);
219 *per_req_non_dma_mem_size = sizeof(struct tw_cli_req_context);
220
221 #else /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
222
223 *non_dma_mem_size = sizeof(struct tw_cli_ctlr_context) +
224 (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) +
225 (sizeof(struct tw_cl_event_packet) * max_aens);
226
227 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
228
229 /*
230 * Total DMA'able memory needed is the sum total of memory needed for
231 * all command packets (including the 1 needed for CL internal
232 * requests), and memory needed to hold the payload for internal
233 * requests.
234 */
235 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
236
237 *dma_mem_size = sizeof(struct tw_cl_command_packet) +
238 TW_CLI_SECTOR_SIZE;
239 *per_req_dma_mem_size = sizeof(struct tw_cl_command_packet);
240
241 #else /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
242
243 *dma_mem_size = (sizeof(struct tw_cl_command_packet) *
244 (max_simult_reqs + 1)) + (TW_CLI_SECTOR_SIZE);
245
246 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
247
248
249 #ifdef TW_OSL_FLASH_FIRMWARE
250
251 /* Memory needed to hold the firmware image while flashing. */
252 *flash_dma_mem_size =
253 ((tw_cli_fw_img_size / TW_CLI_NUM_FW_IMAGE_CHUNKS) +
254 511) & ~511;
255 /* (TWA_SG_ELEMENT_SIZE_FACTOR(device_id) - 1)) &
256 ~(TWA_SG_ELEMENT_SIZE_FACTOR(device_id) - 1); */
257
258 #endif /* TW_OSL_FLASH_FIRMWARE */
259
260 return(0);
261 }
262
263
264
265 /*
266 * Function name: tw_cl_init_ctlr
267 * Description: Initializes driver data structures for the controller.
268 *
269 * Input: ctlr_handle -- controller handle
270 * flags -- more info passed by the OS Layer
271 * device_id -- device id of the controller
272 * max_simult_reqs -- maximum # of simultaneous requests
273 * that the OS Layer expects the Common
274 * Layer to support
275 * max_aens -- maximun # of AEN's needed to be supported
276 * non_dma_mem -- ptr to allocated non-DMA memory
277 * dma_mem -- ptr to allocated DMA'able memory
278 * dma_mem_phys -- physical address of dma_mem
279 * flash_dma_mem -- ptr to allocated DMA'able memory
280 * needed for firmware flash, if applicable
281 * flash_dma_mem_phys -- physical address of flash_dma_mem
282 * Output: None
283 * Return value: 0 -- success
284 * non-zero-- failure
285 */
286 TW_INT32
287 tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
288 TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens,
289 TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys
290 #ifdef TW_OSL_FLASH_FIRMWARE
291 , TW_VOID *flash_dma_mem,
292 TW_UINT64 flash_dma_mem_phys
293 #endif /* TW_OSL_FLASH_FIRMWARE */
294 )
295 {
296 struct tw_cli_ctlr_context *ctlr;
297 struct tw_cli_req_context *req;
298 TW_UINT8 *free_non_dma_mem;
299 TW_INT32 error = TW_OSL_ESUCCESS;
300 TW_INT32 i;
301
302 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");
303
304 if (flags & TW_CL_START_CTLR_ONLY) {
305 ctlr = (struct tw_cli_ctlr_context *)
306 (ctlr_handle->cl_ctlr_ctxt);
307 goto start_ctlr;
308 }
309
310 if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
311 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
312 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
313 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
314 "Too many simultaneous requests to support!",
315 "requested = %d, supported = %d, error = %d\n",
316 max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
317 TW_OSL_EBIG);
318 return(TW_OSL_EBIG);
319 }
320
321 if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL)
322 #ifdef TW_OSL_FLASH_FIRMWARE
323 || ((flags & TW_CL_FLASH_FIRMWARE) ?
324 (flash_dma_mem == TW_CL_NULL) : TW_CL_FALSE)
325 #endif /* TW_OSL_FLASH_FIRMWARE */
326 ) {
327 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
328 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
329 0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING,
330 "Insufficient memory for Common Layer's internal usage",
331 "error = %d\n", TW_OSL_ENOMEM);
332 return(TW_OSL_ENOMEM);
333 }
334
335 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
336 tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
337 sizeof(struct tw_cli_req_context) +
338 (sizeof(struct tw_cl_event_packet) * max_aens));
339 #else /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
340 tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
341 (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) +
342 (sizeof(struct tw_cl_event_packet) * max_aens));
343 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
344
345 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
346 tw_osl_memzero(dma_mem,
347 sizeof(struct tw_cl_command_packet) +
348 TW_CLI_SECTOR_SIZE);
349 #else /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
350 tw_osl_memzero(dma_mem,
351 (sizeof(struct tw_cl_command_packet) *
352 (max_simult_reqs + 1)) +
353 TW_CLI_SECTOR_SIZE);
354 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
355
356
357 free_non_dma_mem = (TW_UINT8 *)non_dma_mem;
358
359 ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem;
360 free_non_dma_mem += sizeof(struct tw_cli_ctlr_context);
361
362 ctlr_handle->cl_ctlr_ctxt = ctlr;
363 ctlr->ctlr_handle = ctlr_handle;
364
365 ctlr->device_id = (TW_UINT32)device_id;
366 ctlr->arch_id = TWA_ARCH_ID(device_id);
367 ctlr->flags = flags;
368 ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
369 ctlr->max_simult_reqs = max_simult_reqs + 1;
370 ctlr->max_aens_supported = max_aens;
371
372 #ifdef TW_OSL_FLASH_FIRMWARE
373 ctlr->flash_dma_mem = flash_dma_mem;
374 ctlr->flash_dma_mem_phys = flash_dma_mem_phys;
375 #endif /* TW_OSL_FLASH_FIRMWARE */
376
377 /* Initialize queues of CL internal request context packets. */
378 tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q);
379 tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q);
380 tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q);
381 tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q);
382
383 /* Initialize all locks used by CL. */
384 ctlr->gen_lock = &(ctlr->gen_lock_handle);
385 tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
386 ctlr->io_lock = &(ctlr->io_lock_handle);
387 tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);
388 /*
389 * If 64 bit cmd pkt addresses are used, we will need to serialize
390 * writes to the hardware (across registers), since existing (G66)
391 * hardware will get confused if, for example, we wrote the low 32 bits
392 * of the cmd pkt address, followed by a response interrupt mask to the
393 * control register, followed by the high 32 bits of the cmd pkt
394 * address. It will then interpret the value written to the control
395 * register as the low cmd pkt address. So, for this case, we will
396 * make a note that we will need to synchronize control register writes
397 * with command register writes.
398 */
399 if ((ctlr->flags & TW_CL_64BIT_ADDRESSES) &&
400 (ctlr->device_id == TW_CL_DEVICE_ID_9K)) {
401 ctlr->state |= TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED;
402 ctlr->intr_lock = ctlr->io_lock;
403 } else {
404 ctlr->intr_lock = &(ctlr->intr_lock_handle);
405 tw_osl_init_lock(ctlr_handle, "tw_cl_intr_lock",
406 ctlr->intr_lock);
407 }
408
409 /* Initialize CL internal request context packets. */
410 ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
411 free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
412 (
413 #ifndef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
414 max_simult_reqs +
415 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
416 1));
417
418 ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
419 ctlr->cmd_pkt_phys = dma_mem_phys;
420
421 ctlr->internal_req_data = (TW_UINT8 *)
422 (ctlr->cmd_pkt_buf +
423 (
424 #ifndef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
425 max_simult_reqs +
426 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
427 1));
428 ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
429 (sizeof(struct tw_cl_command_packet) *
430 (
431 #ifndef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
432 max_simult_reqs +
433 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
434 1));
435
436 for (i = 0;
437 i < (
438 #ifndef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
439 max_simult_reqs +
440 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
441 1); i++) {
442 req = &(ctlr->req_ctxt_buf[i]);
443
444 #ifndef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
445
446 req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
447 req->cmd_pkt_phys = ctlr->cmd_pkt_phys +
448 (i * sizeof(struct tw_cl_command_packet));
449
450 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
451
452 req->request_id = i;
453 req->ctlr = ctlr;
454
455 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
456 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
457 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
458
459 /* Insert request into the free queue. */
460 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
461 }
462
463
464 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
465
466 ctlr->free_req_head = i - 1;
467 ctlr->free_req_tail = i - 1;
468
469 for (; i < (max_simult_reqs + 1); i++)
470 ctlr->free_req_ids[i - 1] = i;
471
472 ctlr->num_free_req_ids = max_simult_reqs;
473
474 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
475
476
477 /* Initialize the AEN queue. */
478 ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem;
479
480
481 start_ctlr:
482 /*
483 * Disable interrupts. Interrupts will be enabled in tw_cli_start_ctlr
484 * (only) if initialization succeeded.
485 */
486 tw_cli_disable_interrupts(ctlr);
487
488 /* Initialize the controller. */
489 if ((error = tw_cli_start_ctlr(ctlr))) {
490 /* Soft reset the controller, and try one more time. */
491 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
492 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
493 0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING,
494 "Controller initialization failed. Retrying...",
495 "error = %d\n", error);
496 if ((error = tw_cli_soft_reset(ctlr))) {
497 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
498 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
499 0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING,
500 "Controller soft reset failed",
501 "error = %d\n", error);
502 return(error);
503 } else if ((error = tw_cli_start_ctlr(ctlr))) {
504 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
505 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
506 0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING,
507 "Controller initialization retry failed",
508 "error = %d\n", error);
509 return(error);
510 }
511 }
512 /* Notify some info about the controller to the OSL. */
513 tw_cli_notify_ctlr_info(ctlr);
514
515 /* Mark the controller as active. */
516 ctlr->state |= TW_CLI_CTLR_STATE_ACTIVE;
517 return(error);
518 }
519
520
521
522 #ifdef TW_OSL_FLASH_FIRMWARE
523 /*
524 * Function name: tw_cli_flash_firmware
525 * Description: Flashes bundled firmware image onto controller.
526 *
527 * Input: ctlr -- ptr to per ctlr structure
528 * Output: None
529 * Return value: 0 -- success
530 * non-zero-- failure
531 */
532 TW_INT32
533 tw_cli_flash_firmware(struct tw_cli_ctlr_context *ctlr)
534 {
535 struct tw_cli_req_context *req;
536 struct tw_cl_command_header *cmd_hdr;
537 struct tw_cl_command_download_firmware *cmd;
538 TW_UINT32 fw_img_chunk_size;
539 TW_UINT32 num_chunks;
540 TW_UINT32 this_chunk_size = 0;
541 TW_INT32 remaining_img_size = 0;
542 TW_INT32 hard_reset_needed = TW_CL_FALSE;
543 TW_INT32 error = TW_OSL_EGENFAILURE;
544 TW_UINT32 i;
545
546 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
547 if ((req = tw_cli_get_request(ctlr
548 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
549 , TW_CL_NULL
550 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
551 )) == TW_CL_NULL) {
552 /* No free request packets available. Can't proceed. */
553 error = TW_OSL_EBUSY;
554 goto out;
555 }
556
557 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
558
559 req->cmd_pkt = ctlr->cmd_pkt_buf;
560 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
561 tw_osl_memzero(req->cmd_pkt,
562 sizeof(struct tw_cl_command_header) +
563 28 /* max bytes before sglist */);
564
565 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
566
567 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
568
569 /*
570 * Determine amount of memory needed to hold a chunk of the
571 * firmware image. As yet, the Download_Firmware command does not
572 * support SG elements that are ctlr->sg_size_factor multiples. It
573 * requires them to be 512-byte multiples.
574 */
575 fw_img_chunk_size = ((tw_cli_fw_img_size / TW_CLI_NUM_FW_IMAGE_CHUNKS) +
576 511) & ~511;
577 /* (ctlr->sg_size_factor - 1)) &
578 ~(ctlr->sg_size_factor - 1); */
579
580 /* Calculate the actual number of chunks needed. */
581 num_chunks = (tw_cli_fw_img_size / fw_img_chunk_size) +
582 ((tw_cli_fw_img_size % fw_img_chunk_size) ? 1 : 0);
583
584 req->data = ctlr->flash_dma_mem;
585 req->data_phys = ctlr->flash_dma_mem_phys;
586
587 remaining_img_size = tw_cli_fw_img_size;
588
589 cmd_hdr = &(req->cmd_pkt->cmd_hdr);
590 cmd = &(req->cmd_pkt->command.cmd_pkt_7k.download_fw);
591
592 for (i = 0; i < num_chunks; i++) {
593 /* Build a cmd pkt for downloading firmware. */
594 tw_osl_memzero(req->cmd_pkt,
595 sizeof(struct tw_cl_command_packet));
596
597 cmd_hdr->header_desc.size_header = 128;
598
599 /* sgl_offset (offset in dwords, to sg list) is 2. */
600 cmd->sgl_off__opcode =
601 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_DOWNLOAD_FIRMWARE);
602 cmd->request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
603 cmd->unit = 0;
604 cmd->status = 0;
605 cmd->flags = 0;
606 cmd->param = TW_CL_SWAP16(8); /* prom image */
607
608 if (i != (num_chunks - 1))
609 this_chunk_size = fw_img_chunk_size;
610 else /* last chunk */
611 this_chunk_size = remaining_img_size;
612
613 remaining_img_size -= this_chunk_size;
614
615 tw_osl_memcpy(req->data, tw_cli_fw_img + (i * fw_img_chunk_size),
616 this_chunk_size);
617
618 /*
619 * The next line will effect only the last chunk.
620 */
621 req->length = (this_chunk_size + 511) & ~511;
622 /* (ctlr->sg_size_factor - 1)) &
623 ~(ctlr->sg_size_factor - 1); */
624
625 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
626 ((struct tw_cl_sg_desc64 *)(cmd->sgl))[0].address =
627 TW_CL_SWAP64(req->data_phys);
628 ((struct tw_cl_sg_desc64 *)(cmd->sgl))[0].length =
629 TW_CL_SWAP32(req->length);
630 cmd->size = 2 + 3;
631 } else {
632 ((struct tw_cl_sg_desc32 *)(cmd->sgl))[0].address =
633 TW_CL_SWAP32(req->data_phys);
634 ((struct tw_cl_sg_desc32 *)(cmd->sgl))[0].length =
635 TW_CL_SWAP32(req->length);
636 cmd->size = 2 + 2;
637 }
638
639 error = tw_cli_submit_and_poll_request(req,
640 TW_CLI_REQUEST_TIMEOUT_PERIOD);
641 if (error) {
642 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
643 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
644 0x1005, 0x1, TW_CL_SEVERITY_ERROR_STRING,
645 "Firmware flash request could not be posted",
646 "error = %d\n", error);
647 if (error == TW_OSL_ETIMEDOUT)
648 /* clean-up done by tw_cli_submit_and_poll_request */
649 return(error);
650 break;
651 }
652 error = cmd->status;
653
654 if (((i == (num_chunks - 1)) && (error)) ||
655 ((i != (num_chunks - 1)) &&
656 ((error = cmd_hdr->status_block.error) !=
657 TWA_ERROR_MORE_DATA))) {
658 /*
659 * It's either that download of the last chunk
660 * failed, or the download of one of the earlier
661 * chunks failed with an error other than
662 * TWA_ERROR_MORE_DATA. Report the error.
663 */
664 tw_cli_create_ctlr_event(ctlr,
665 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
666 cmd_hdr);
667 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
668 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
669 0x1006, 0x1, TW_CL_SEVERITY_ERROR_STRING,
670 "Firmware flash failed",
671 "cmd = 0x%x, chunk # %d, cmd status = %d",
672 GET_OPCODE(cmd->sgl_off__opcode),
673 i, cmd->status);
674 /*
675 * Make a note to hard reset the controller,
676 * so that it doesn't wait for the remaining
677 * chunks. Don't call the hard reset function
678 * right here, since we have committed to having
679 * only 1 active internal request at a time, and
680 * this request has not yet been freed.
681 */
682 hard_reset_needed = TW_CL_TRUE;
683 break;
684 }
685 } /* for */
686
687 out:
688 if (req)
689 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
690
691 if (hard_reset_needed)
692 tw_cli_hard_reset(ctlr);
693
694 return(error);
695 }
696
697
698
699 /*
700 * Function name: tw_cli_hard_reset
701 * Description: Hard resets the controller.
702 *
703 * Input: ctlr -- ptr to per ctlr structure
704 * Output: None
705 * Return value: 0 -- success
706 * non-zero-- failure
707 */
708 TW_INT32
709 tw_cli_hard_reset(struct tw_cli_ctlr_context *ctlr)
710 {
711 struct tw_cli_req_context *req;
712 struct tw_cl_command_reset_firmware *cmd;
713 TW_INT32 error;
714
715 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
716
717 if ((req = tw_cli_get_request(ctlr
718 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
719 , TW_CL_NULL
720 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
721 )) == TW_CL_NULL)
722 return(TW_OSL_EBUSY);
723
724 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
725
726 req->cmd_pkt = ctlr->cmd_pkt_buf;
727 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
728 tw_osl_memzero(req->cmd_pkt,
729 sizeof(struct tw_cl_command_header) +
730 28 /* max bytes before sglist */);
731
732 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
733
734 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
735
736 /* Build a cmd pkt for sending down the hard reset command. */
737 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
738
739 cmd = &(req->cmd_pkt->command.cmd_pkt_7k.reset_fw);
740 cmd->res1__opcode =
741 BUILD_RES__OPCODE(0, TWA_FW_CMD_HARD_RESET_FIRMWARE);
742 cmd->size = 2;
743 cmd->request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
744 cmd->unit = 0;
745 cmd->status = 0;
746 cmd->flags = 0;
747 cmd->param = 0; /* don't reload FPGA logic */
748
749 req->data = TW_CL_NULL;
750 req->length = 0;
751
752 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
753 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
754 0x1017, 0x3, TW_CL_SEVERITY_INFO_STRING,
755 "Issuing hard (commanded) reset to the controller...",
756 " ");
757
758 error = tw_cli_submit_and_poll_request(req,
759 TW_CLI_REQUEST_TIMEOUT_PERIOD);
760 if (error) {
761 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
762 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
763 0x1007, 0x1, TW_CL_SEVERITY_ERROR_STRING,
764 "Hard reset request could not be posted",
765 "error = %d", error);
766 if (error == TW_OSL_ETIMEDOUT)
767 /* clean-up done by tw_cli_submit_and_poll_request */
768 return(error);
769 goto out;
770 }
771 if ((error = cmd->status)) {
772 tw_cli_create_ctlr_event(ctlr,
773 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
774 &(req->cmd_pkt->cmd_hdr));
775 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
776 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
777 0x1008, 0x1, TW_CL_SEVERITY_ERROR_STRING,
778 "Hard reset request failed",
779 "error = %d", error);
780 }
781
782 if (ctlr->device_id == TW_CL_DEVICE_ID_9K_X) {
783 /*
784 * There's a hardware bug in the G133 ASIC, which can lead to
785 * PCI parity errors and hangs, if the host accesses any
786 * registers when the firmware is resetting the hardware, as
787 * part of a hard/soft reset. The window of time when the
788 * problem can occur is about 10 ms. Here, we will handshake
789 * with the firmware to find out when the firmware is pulling
790 * down the hardware reset pin, and wait for about 500 ms to
791 * make sure we don't access any hardware registers (for
792 * polling) during that window.
793 */
794 ctlr->state |= TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
795 while (tw_cli_find_response(ctlr,
796 TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) != TW_OSL_ESUCCESS)
797 tw_osl_delay(10);
798 tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
799 ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
800 }
801
802 /* Wait for the MC_RDY bit to get set. */
803 if ((error = tw_cli_poll_status(ctlr, TWA_STATUS_MICROCONTROLLER_READY,
804 TW_CLI_RESET_TIMEOUT_PERIOD))) {
805 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
806 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
807 0x1018, 0x1, TW_CL_SEVERITY_ERROR_STRING,
808 "Micro-ctlr not ready following hard reset",
809 "error = %d", error);
810 }
811
812 out:
813 if (req)
814 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
815 return(error);
816 }
817
818 #endif /* TW_OSL_FLASH_FIRMWARE */
819
820
821
822 /*
823 * Function name: tw_cli_start_ctlr
824 * Description: Establishes a logical connection with the controller.
825 * If bundled with firmware, determines whether or not
826 * to flash firmware, based on arch_id, fw SRL (Spec.
827 * Revision Level), branch & build #'s. Also determines
828 * whether or not the driver is compatible with the
829 * firmware on the controller, before proceeding to work
830 * with it.
831 *
832 * Input: ctlr -- ptr to per ctlr structure
833 * Output: None
834 * Return value: 0 -- success
835 * non-zero-- failure
836 */
837 TW_INT32
838 tw_cli_start_ctlr(struct tw_cli_ctlr_context *ctlr)
839 {
840 TW_UINT16 fw_on_ctlr_srl = 0;
841 TW_UINT16 fw_on_ctlr_arch_id = 0;
842 TW_UINT16 fw_on_ctlr_branch = 0;
843 TW_UINT16 fw_on_ctlr_build = 0;
844 TW_UINT32 init_connect_result = 0;
845 TW_INT32 error = TW_OSL_ESUCCESS;
846 #ifdef TW_OSL_FLASH_FIRMWARE
847 TW_INT8 fw_flashed = TW_CL_FALSE;
848 TW_INT8 fw_flash_failed = TW_CL_FALSE;
849 #endif /* TW_OSL_FLASH_FIRMWARE */
850
851 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
852
853 /* Wait for the controller to become ready. */
854 if ((error = tw_cli_poll_status(ctlr,
855 TWA_STATUS_MICROCONTROLLER_READY,
856 TW_CLI_REQUEST_TIMEOUT_PERIOD))) {
857 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
858 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
859 0x1009, 0x1, TW_CL_SEVERITY_ERROR_STRING,
860 "Microcontroller not ready",
861 "error = %d", error);
862 return(error);
863 }
864 /* Drain the response queue. */
865 if ((error = tw_cli_drain_response_queue(ctlr))) {
866 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
867 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
868 0x100A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
869 "Can't drain response queue",
870 "error = %d", error);
871 return(error);
872 }
873 /* Establish a logical connection with the controller. */
874 if ((error = tw_cli_init_connection(ctlr,
875 (TW_UINT16)(ctlr->max_simult_reqs),
876 TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL,
877 (TW_UINT16)(ctlr->arch_id),
878 TWA_CURRENT_FW_BRANCH(ctlr->arch_id),
879 TWA_CURRENT_FW_BUILD(ctlr->arch_id),
880 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
881 &fw_on_ctlr_branch, &fw_on_ctlr_build,
882 &init_connect_result))) {
883 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
884 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
885 0x100B, 0x2, TW_CL_SEVERITY_WARNING_STRING,
886 "Can't initialize connection in current mode",
887 "error = %d", error);
888 return(error);
889 }
890
891 #ifdef TW_OSL_FLASH_FIRMWARE
892
893 if ((ctlr->flags & TW_CL_FLASH_FIRMWARE) &&
894 (init_connect_result & TWA_BUNDLED_FW_SAFE_TO_FLASH) &&
895 (init_connect_result & TWA_CTLR_FW_RECOMMENDS_FLASH)) {
896 /*
897 * The bundled firmware is safe to flash, and the firmware
898 * on the controller recommends a flash. So, flash!
899 */
900 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
901 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
902 0x100C, 0x3, TW_CL_SEVERITY_INFO_STRING,
903 "Flashing bundled firmware...",
904 " ");
905 if ((error = tw_cli_flash_firmware(ctlr))) {
906 fw_flash_failed = TW_CL_TRUE;
907 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
908 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
909 0x100D, 0x2, TW_CL_SEVERITY_WARNING_STRING,
910 "Unable to flash bundled firmware. "
911 "Attempting to work with fw on ctlr...",
912 " ");
913 } else {
914 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
915 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
916 0x100E, 0x3, TW_CL_SEVERITY_INFO_STRING,
917 "Successfully flashed bundled firmware",
918 " ");
919 fw_flashed = TW_CL_TRUE;
920 }
921 }
922
923 if (fw_flashed) {
924 /* The firmware was flashed. Have the new image loaded */
925 error = tw_cli_hard_reset(ctlr);
926 if (error)
927 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
928 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
929 0x100F, 0x1, TW_CL_SEVERITY_ERROR_STRING,
930 "Could not reset controller after flash!",
931 " ");
932 else /* Go through initialization again. */
933 error = tw_cli_start_ctlr(ctlr);
934 /*
935 * If hard reset of controller failed, we need to return.
936 * Otherwise, the above recursive call to tw_cli_start_ctlr
937 * will have completed the rest of the initialization (starting
938 * from tw_cli_drain_aen_queue below). Don't do it again.
939 * Just return.
940 */
941 return(error);
942 } else
943 #endif /* TW_OSL_FLASH_FIRMWARE */
944 {
945 /*
946 * Either we are not bundled with a firmware image, or
947 * the bundled firmware is not safe to flash,
948 * or flash failed for some reason. See if we can at
949 * least work with the firmware on the controller in the
950 * current mode.
951 */
952 if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) {
953 /* Yes, we can. Make note of the operating mode. */
954 if (init_connect_result & TWA_CTLR_FW_SAME_OR_NEWER) {
955 ctlr->working_srl = TWA_CURRENT_FW_SRL;
956 ctlr->working_branch =
957 TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
958 ctlr->working_build =
959 TWA_CURRENT_FW_BUILD(ctlr->arch_id);
960 } else {
961 ctlr->working_srl = fw_on_ctlr_srl;
962 ctlr->working_branch = fw_on_ctlr_branch;
963 ctlr->working_build = fw_on_ctlr_build;
964 }
965 } else {
966 /*
967 * No, we can't. See if we can at least work with
968 * it in the base mode. We should never come here
969 * if firmware has just been flashed.
970 */
971 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
972 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
973 0x1010, 0x2, TW_CL_SEVERITY_WARNING_STRING,
974 "Driver/Firmware mismatch. "
975 "Negotiating for base level...",
976 " ");
977 if ((error = tw_cli_init_connection(ctlr,
978 (TW_UINT16)(ctlr->max_simult_reqs),
979 TWA_EXTENDED_INIT_CONNECT,
980 TWA_BASE_FW_SRL,
981 (TW_UINT16)(ctlr->arch_id),
982 TWA_BASE_FW_BRANCH, TWA_BASE_FW_BUILD,
983 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
984 &fw_on_ctlr_branch, &fw_on_ctlr_build,
985 &init_connect_result))) {
986 tw_cl_create_event(ctlr->ctlr_handle,
987 TW_CL_FALSE,
988 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
989 0x1011, 0x1,
990 TW_CL_SEVERITY_ERROR_STRING,
991 "Can't initialize connection in "
992 "base mode",
993 " ");
994 return(error);
995 }
996 if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) {
997 /*
998 * The firmware on the controller is not even
999 * compatible with our base mode. We cannot
1000 * work with it. Bail...
1001 */
1002 #ifdef TW_OSL_FLASH_FIRMWARE
1003 if (fw_flash_failed)
1004 tw_cl_create_event(ctlr->ctlr_handle,
1005 TW_CL_FALSE,
1006 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1007 0x1012, 0x1,
1008 TW_CL_SEVERITY_ERROR_STRING,
1009 "Incompatible firmware on controller"
1010 "...and could not flash bundled "
1011 "firmware",
1012 " ");
1013 else
1014 tw_cl_create_event(ctlr->ctlr_handle,
1015 TW_CL_FALSE,
1016 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1017 0x1013, 0x1,
1018 TW_CL_SEVERITY_ERROR_STRING,
1019 "Incompatible firmware on controller"
1020 "...and bundled firmware not safe to "
1021 "flash",
1022 " ");
1023 #endif /* TW_OSL_FLASH_FIRMWARE */
1024 return(1);
1025 }
1026 /*
1027 * We can work with this firmware, but only in
1028 * base mode.
1029 */
1030 ctlr->working_srl = TWA_BASE_FW_SRL;
1031 ctlr->working_branch = TWA_BASE_FW_BRANCH;
1032 ctlr->working_build = TWA_BASE_FW_BUILD;
1033 ctlr->operating_mode = TWA_BASE_MODE;
1034 }
1035 ctlr->fw_on_ctlr_srl = fw_on_ctlr_srl;
1036 ctlr->fw_on_ctlr_branch = fw_on_ctlr_branch;
1037 ctlr->fw_on_ctlr_build = fw_on_ctlr_build;
1038 }
1039
1040 /* Drain the AEN queue */
1041 if ((error = tw_cli_drain_aen_queue(ctlr)))
1042 /*
1043 * We will just print that we couldn't drain the AEN queue.
1044 * There's no need to bail out.
1045 */
1046 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1047 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1048 0x1014, 0x2, TW_CL_SEVERITY_WARNING_STRING,
1049 "Can't drain AEN queue",
1050 "error = %d", error);
1051
1052 /* Enable interrupts. */
1053 tw_cli_enable_interrupts(ctlr);
1054
1055 return(TW_OSL_ESUCCESS);
1056 }
1057
1058
1059 /*
1060 * Function name: tw_cl_shutdown_ctlr
1061 * Description: Closes logical connection with the controller.
1062 *
1063 * Input: ctlr -- ptr to per ctlr structure
1064 * flags -- more info passed by the OS Layer
1065 * Output: None
1066 * Return value: 0 -- success
1067 * non-zero-- failure
1068 */
1069 TW_INT32
1070 tw_cl_shutdown_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags)
1071 {
1072 struct tw_cli_ctlr_context *ctlr =
1073 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1074 TW_INT32 error;
1075
1076 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");
1077 /*
1078 * Mark the controller as inactive, disable any further interrupts,
1079 * and notify the controller that we are going down.
1080 */
1081 ctlr->state &= ~TW_CLI_CTLR_STATE_ACTIVE;
1082
1083 tw_cli_disable_interrupts(ctlr);
1084
1085 /* Let the controller know that we are going down. */
1086 if ((error = tw_cli_init_connection(ctlr, TWA_SHUTDOWN_MESSAGE_CREDITS,
1087 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1088 TW_CL_NULL, TW_CL_NULL)))
1089 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1090 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1091 0x1015, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1092 "Can't close connection with controller",
1093 "error = %d", error);
1094
1095 if (flags & TW_CL_STOP_CTLR_ONLY)
1096 goto ret;
1097
1098 /* Destroy all locks used by CL. */
1099 tw_osl_destroy_lock(ctlr_handle, ctlr->gen_lock);
1100 tw_osl_destroy_lock(ctlr_handle, ctlr->io_lock);
1101 if (!(ctlr->flags & TW_CL_64BIT_ADDRESSES))
1102 tw_osl_destroy_lock(ctlr_handle, ctlr->intr_lock);
1103
1104 ret:
1105 return(error);
1106 }
1107
1108
1109
1110 /*
1111 * Function name: tw_cli_init_connection
1112 * Description: Sends init_connection cmd to firmware
1113 *
1114 * Input: ctlr -- ptr to per ctlr structure
1115 * message_credits -- max # of requests that we might send
1116 * down simultaneously. This will be
1117 * typically set to 256 at init-time or
1118 * after a reset, and to 1 at shutdown-time
1119 * set_features -- indicates if we intend to use 64-bit
1120 * sg, also indicates if we want to do a
1121 * basic or an extended init_connection;
1122 *
1123 * Note: The following input/output parameters are valid, only in case of an
1124 * extended init_connection:
1125 *
1126 * current_fw_srl -- srl of fw we are bundled
1127 * with, if any; 0 otherwise
1128 * current_fw_arch_id -- arch_id of fw we are bundled
1129 * with, if any; 0 otherwise
1130 * current_fw_branch -- branch # of fw we are bundled
1131 * with, if any; 0 otherwise
1132 * current_fw_build -- build # of fw we are bundled
1133 * with, if any; 0 otherwise
1134 * Output: fw_on_ctlr_srl -- srl of fw on ctlr
1135 * fw_on_ctlr_arch_id -- arch_id of fw on ctlr
1136 * fw_on_ctlr_branch -- branch # of fw on ctlr
1137 * fw_on_ctlr_build -- build # of fw on ctlr
1138 * init_connect_result -- result bitmap of fw response
1139 * Return value: 0 -- success
1140 * non-zero-- failure
1141 */
1142 TW_INT32
1143 tw_cli_init_connection(struct tw_cli_ctlr_context *ctlr,
1144 TW_UINT16 message_credits, TW_UINT32 set_features,
1145 TW_UINT16 current_fw_srl, TW_UINT16 current_fw_arch_id,
1146 TW_UINT16 current_fw_branch, TW_UINT16 current_fw_build,
1147 TW_UINT16 *fw_on_ctlr_srl, TW_UINT16 *fw_on_ctlr_arch_id,
1148 TW_UINT16 *fw_on_ctlr_branch, TW_UINT16 *fw_on_ctlr_build,
1149 TW_UINT32 *init_connect_result)
1150 {
1151 struct tw_cli_req_context *req;
1152 struct tw_cl_command_init_connect *init_connect;
1153 TW_INT32 error = TW_OSL_EBUSY;
1154
1155 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1156
1157 /* Get a request packet. */
1158 if ((req = tw_cli_get_request(ctlr
1159 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
1160 , TW_CL_NULL
1161 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
1162 )) == TW_CL_NULL)
1163 goto out;
1164
1165 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
1166
1167 req->cmd_pkt = ctlr->cmd_pkt_buf;
1168 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
1169 tw_osl_memzero(req->cmd_pkt,
1170 sizeof(struct tw_cl_command_header) +
1171 28 /* max bytes before sglist */);
1172
1173 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
1174
1175 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1176
1177 /* Build the cmd pkt. */
1178 init_connect = &(req->cmd_pkt->command.cmd_pkt_7k.init_connect);
1179
1180 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
1181
1182 init_connect->res1__opcode =
1183 BUILD_RES__OPCODE(0, TWA_FW_CMD_INIT_CONNECTION);
1184 init_connect->request_id =
1185 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
1186 init_connect->message_credits = TW_CL_SWAP16(message_credits);
1187 init_connect->features = TW_CL_SWAP32(set_features);
1188 if (ctlr->flags & TW_CL_64BIT_ADDRESSES)
1189 init_connect->features |= TWA_64BIT_SG_ADDRESSES;
1190 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
1191 /*
1192 * Fill in the extra fields needed for an extended
1193 * init_connect.
1194 */
1195 init_connect->size = 6;
1196 init_connect->fw_srl = TW_CL_SWAP16(current_fw_srl);
1197 init_connect->fw_arch_id = TW_CL_SWAP16(current_fw_arch_id);
1198 init_connect->fw_branch = TW_CL_SWAP16(current_fw_branch);
1199 init_connect->fw_build = TW_CL_SWAP16(current_fw_build);
1200 } else
1201 init_connect->size = 3;
1202
1203 /* Submit the command, and wait for it to complete. */
1204 error = tw_cli_submit_and_poll_request(req,
1205 TW_CLI_REQUEST_TIMEOUT_PERIOD);
1206 if (error == TW_OSL_ETIMEDOUT)
1207 /* Clean-up done by tw_cli_submit_and_poll_request. */
1208 return(error);
1209 if (error)
1210 goto out;
1211 if ((error = init_connect->status)) {
1212 tw_cli_create_ctlr_event(ctlr,
1213 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
1214 &(req->cmd_pkt->cmd_hdr));
1215 goto out;
1216 }
1217 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
1218 *fw_on_ctlr_srl = TW_CL_SWAP16(init_connect->fw_srl);
1219 *fw_on_ctlr_arch_id = TW_CL_SWAP16(init_connect->fw_arch_id);
1220 *fw_on_ctlr_branch = TW_CL_SWAP16(init_connect->fw_branch);
1221 *fw_on_ctlr_build = TW_CL_SWAP16(init_connect->fw_build);
1222 *init_connect_result = TW_CL_SWAP32(init_connect->result);
1223 }
1224 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1225 return(error);
1226
1227 out:
1228 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1229 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1230 0x1016, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1231 "init_connection failed",
1232 "error = %d", error);
1233 if (req)
1234 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1235 return(error);
1236 }
1237
1238
Cache object: fe96a5257f8604d31859480d5c6ab8de
|