1 /*
2 * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/6.0/sys/dev/twa/tw_cl_init.c 144966 2005-04-12 22:07:11Z vkashyap $
28 */
29
30 /*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 */
35
36
37 /*
38 * Common Layer initialization functions.
39 */
40
41
42 #include "tw_osl_share.h"
43 #include "tw_cl_share.h"
44 #include "tw_cl_fwif.h"
45 #include "tw_cl_ioctl.h"
46 #include "tw_cl.h"
47 #include "tw_cl_externs.h"
48 #include "tw_osl_ioctl.h"
49
50
51 /*
52 * Function name: tw_cl_ctlr_supported
53 * Description: Determines if a controller is supported.
54 *
55 * Input: vendor_id -- vendor id of the controller
56 * device_id -- device id of the controller
57 * Output: None
58 * Return value: TW_CL_TRUE-- controller supported
59 * TW_CL_FALSE-- controller not supported
60 */
61 TW_INT32
62 tw_cl_ctlr_supported(TW_INT32 vendor_id, TW_INT32 device_id)
63 {
64 if ((vendor_id == TW_CL_VENDOR_ID) && (device_id == TW_CL_DEVICE_ID_9K))
65 return(TW_CL_TRUE);
66 return(TW_CL_FALSE);
67 }
68
69
70
71 /*
72 * Function name: tw_cl_get_mem_requirements
73 * Description: Provides info about Common Layer requirements for a
74 * controller, given the controller type (in 'flags').
75 * Input: ctlr_handle -- controller handle
76 * flags -- more info passed by the OS Layer
77 * max_simult_reqs -- maximum # of simultaneous
78 * requests that the OS Layer expects
79 * the Common Layer to support
80 * max_aens -- maximun # of AEN's needed to be supported
81 * Output: alignment -- alignment needed for all DMA'able
82 * buffers
83 * sg_size_factor -- every SG element should have a size
84 * that's a multiple of this number
85 * non_dma_mem_size -- # of bytes of memory needed for
86 * non-DMA purposes
87 * dma_mem_size -- # of bytes of DMA'able memory needed
88 * flash_dma_mem_size -- # of bytes of DMA'able memory
89 * needed for firmware flash, if applicable
90 * per_req_dma_mem_size -- # of bytes of DMA'able memory
91 * needed per request, if applicable
92 * per_req_non_dma_mem_size -- # of bytes of memory needed
93 * per request for non-DMA purposes,
94 * if applicable
95 * Output: None
96 * Return value: 0 -- success
97 * non-zero-- failure
98 */
99 TW_INT32
100 tw_cl_get_mem_requirements(struct tw_cl_ctlr_handle *ctlr_handle,
101 TW_UINT32 flags, TW_INT32 max_simult_reqs, TW_INT32 max_aens,
102 TW_UINT32 *alignment, TW_UINT32 *sg_size_factor,
103 TW_UINT32 *non_dma_mem_size, TW_UINT32 *dma_mem_size
104 #ifdef TW_OSL_FLASH_FIRMWARE
105 , TW_UINT32 *flash_dma_mem_size
106 #endif /* TW_OSL_FLASH_FIRMWARE */
107 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
108 , TW_UINT32 *per_req_dma_mem_size
109 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
110 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
111 , TW_UINT32 *per_req_non_dma_mem_size
112 #endif /* TW_OSL_N0N_DMA_MEM_ALLOC_PER_REQUEST */
113 )
114 {
115 if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
116 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
117 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
118 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
119 "Too many simultaneous requests to support!",
120 "requested = %d, supported = %d, error = %d\n",
121 max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
122 TW_OSL_EBIG);
123 return(TW_OSL_EBIG);
124 }
125
126 *alignment = TWA_ALIGNMENT;
127 *sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR;
128
129 /*
130 * Total non-DMA memory needed is the sum total of memory needed for
131 * the controller context, request packets (including the 1 needed for
132 * CL internal requests), and event packets.
133 */
134 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
135
136 *non_dma_mem_size = sizeof(struct tw_cli_ctlr_context) +
137 (sizeof(struct tw_cli_req_context)) +
138 (sizeof(struct tw_cl_event_packet) * max_aens);
139 *per_req_non_dma_mem_size = sizeof(struct tw_cli_req_context);
140
141 #else /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
142
143 *non_dma_mem_size = sizeof(struct tw_cli_ctlr_context) +
144 (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) +
145 (sizeof(struct tw_cl_event_packet) * max_aens);
146
147 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
148
149 /*
150 * Total DMA'able memory needed is the sum total of memory needed for
151 * all command packets (including the 1 needed for CL internal
152 * requests), and memory needed to hold the payload for internal
153 * requests.
154 */
155 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
156
157 *dma_mem_size = sizeof(struct tw_cl_command_packet) +
158 TW_CLI_SECTOR_SIZE;
159 *per_req_dma_mem_size = sizeof(struct tw_cl_command_packet);
160
161 #else /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
162
163 *dma_mem_size = (sizeof(struct tw_cl_command_packet) *
164 (max_simult_reqs + 1)) + (TW_CLI_SECTOR_SIZE);
165
166 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
167
168
169 #ifdef TW_OSL_FLASH_FIRMWARE
170
171 /* Memory needed to hold the firmware image while flashing. */
172 *flash_dma_mem_size =
173 ((tw_cli_fw_img_size / TW_CLI_NUM_FW_IMAGE_CHUNKS) +
174 (TWA_SG_ELEMENT_SIZE_FACTOR - 1)) &
175 ~(TWA_SG_ELEMENT_SIZE_FACTOR - 1);
176
177 #endif /* TW_OSL_FLASH_FIRMWARE */
178
179 return(0);
180 }
181
182
183
184 /*
185 * Function name: tw_cl_init_ctlr
186 * Description: Initializes driver data structures for the controller.
187 *
188 * Input: ctlr_handle -- controller handle
189 * flags -- more info passed by the OS Layer
190 * max_simult_reqs -- maximum # of simultaneous requests
191 * that the OS Layer expects the Common
192 * Layer to support
193 * max_aens -- maximun # of AEN's needed to be supported
194 * non_dma_mem -- ptr to allocated non-DMA memory
195 * dma_mem -- ptr to allocated DMA'able memory
196 * dma_mem_phys -- physical address of dma_mem
197 * flash_dma_mem -- ptr to allocated DMA'able memory
198 * needed for firmware flash, if applicable
199 * flash_dma_mem_phys -- physical address of flash_dma_mem
200 * Output: None
201 * Return value: 0 -- success
202 * non-zero-- failure
203 */
204 TW_INT32
205 tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
206 TW_INT32 max_simult_reqs, TW_INT32 max_aens, TW_VOID *non_dma_mem,
207 TW_VOID *dma_mem, TW_UINT64 dma_mem_phys
208 #ifdef TW_OSL_FLASH_FIRMWARE
209 , TW_VOID *flash_dma_mem,
210 TW_UINT64 flash_dma_mem_phys
211 #endif /* TW_OSL_FLASH_FIRMWARE */
212 )
213 {
214 struct tw_cli_ctlr_context *ctlr;
215 struct tw_cli_req_context *req;
216 TW_UINT8 *free_non_dma_mem;
217 TW_INT32 error = TW_OSL_ESUCCESS;
218 TW_INT32 i;
219
220 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");
221
222 if (flags & TW_CL_START_CTLR_ONLY) {
223 ctlr = (struct tw_cli_ctlr_context *)
224 (ctlr_handle->cl_ctlr_ctxt);
225 goto start_ctlr;
226 }
227
228 if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
229 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
230 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
231 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
232 "Too many simultaneous requests to support!",
233 "requested = %d, supported = %d, error = %d\n",
234 max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
235 TW_OSL_EBIG);
236 return(TW_OSL_EBIG);
237 }
238
239 if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL)
240 #ifdef TW_OSL_FLASH_FIRMWARE
241 || ((flags & TW_CL_FLASH_FIRMWARE) ?
242 (flash_dma_mem == TW_CL_NULL) : TW_CL_FALSE)
243 #endif /* TW_OSL_FLASH_FIRMWARE */
244 ) {
245 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
246 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
247 0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING,
248 "Insufficient memory for Common Layer's internal usage",
249 "error = %d\n", TW_OSL_ENOMEM);
250 return(TW_OSL_ENOMEM);
251 }
252
253 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
254 tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
255 sizeof(struct tw_cli_req_context) +
256 (sizeof(struct tw_cl_event_packet) * max_aens));
257 #else /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
258 tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
259 (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) +
260 (sizeof(struct tw_cl_event_packet) * max_aens));
261 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
262
263 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
264 tw_osl_memzero(dma_mem,
265 sizeof(struct tw_cl_command_packet) +
266 TW_CLI_SECTOR_SIZE);
267 #else /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
268 tw_osl_memzero(dma_mem,
269 (sizeof(struct tw_cl_command_packet) *
270 (max_simult_reqs + 1)) +
271 TW_CLI_SECTOR_SIZE);
272 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
273
274
275 free_non_dma_mem = (TW_UINT8 *)non_dma_mem;
276
277 ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem;
278 free_non_dma_mem += sizeof(struct tw_cli_ctlr_context);
279
280 ctlr_handle->cl_ctlr_ctxt = ctlr;
281 ctlr->ctlr_handle = ctlr_handle;
282
283 ctlr->max_simult_reqs = max_simult_reqs + 1;
284 ctlr->max_aens_supported = max_aens;
285 ctlr->flags = flags;
286
287 #ifdef TW_OSL_FLASH_FIRMWARE
288 ctlr->flash_dma_mem = flash_dma_mem;
289 ctlr->flash_dma_mem_phys = flash_dma_mem_phys;
290 #endif /* TW_OSL_FLASH_FIRMWARE */
291
292 /* Initialize queues of CL internal request context packets. */
293 tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q);
294 tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q);
295 tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q);
296 tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q);
297
298 /* Initialize all locks used by CL. */
299 ctlr->gen_lock = &(ctlr->gen_lock_handle);
300 tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
301 ctlr->io_lock = &(ctlr->io_lock_handle);
302 tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);
303 /*
304 * If 64 bit cmd pkt addresses are used, we will need to serialize
305 * writes to the hardware (across registers), since existing hardware
306 * will get confused if, for example, we wrote the low 32 bits of the
307 * cmd pkt address, followed by a response interrupt mask to the
308 * control register, followed by the high 32 bits of the cmd pkt
309 * address. It will then interpret the value written to the control
310 * register as the low cmd pkt address. So, for this case, we will
311 * only use one lock (io_lock) by making io_lock & intr_lock one and
312 * the same.
313 */
314 if (ctlr->flags & TW_CL_64BIT_ADDRESSES)
315 ctlr->intr_lock = ctlr->io_lock;
316 else {
317 ctlr->intr_lock = &(ctlr->intr_lock_handle);
318 tw_osl_init_lock(ctlr_handle, "tw_cl_intr_lock",
319 ctlr->intr_lock);
320 }
321
322 /* Initialize CL internal request context packets. */
323 ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
324 free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
325 (
326 #ifndef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
327 max_simult_reqs +
328 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
329 1));
330
331 ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
332 ctlr->cmd_pkt_phys = dma_mem_phys;
333
334 ctlr->internal_req_data = (TW_UINT8 *)
335 (ctlr->cmd_pkt_buf +
336 (
337 #ifndef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
338 max_simult_reqs +
339 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
340 1));
341 ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
342 (sizeof(struct tw_cl_command_packet) *
343 (
344 #ifndef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
345 max_simult_reqs +
346 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
347 1));
348
349 for (i = 0;
350 i < (
351 #ifndef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
352 max_simult_reqs +
353 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
354 1); i++) {
355 req = &(ctlr->req_ctxt_buf[i]);
356
357 #ifndef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
358
359 req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
360 req->cmd_pkt_phys = ctlr->cmd_pkt_phys +
361 (i * sizeof(struct tw_cl_command_packet));
362
363 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
364
365 req->request_id = i;
366 req->ctlr = ctlr;
367
368 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
369 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
370 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
371
372 /* Insert request into the free queue. */
373 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
374 }
375
376
377 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
378
379 ctlr->free_req_head = i - 1;
380 ctlr->free_req_tail = i - 1;
381
382 for (; i < (max_simult_reqs + 1); i++)
383 ctlr->free_req_ids[i - 1] = i;
384
385 ctlr->num_free_req_ids = max_simult_reqs;
386
387 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
388
389
390 /* Initialize the AEN queue. */
391 ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem;
392
393
394 start_ctlr:
395 /*
396 * Disable interrupts. Interrupts will be enabled in tw_cli_start_ctlr
397 * (only) if initialization succeeded.
398 */
399 tw_cli_disable_interrupts(ctlr);
400
401 /* Initialize the controller. */
402 if ((error = tw_cli_start_ctlr(ctlr))) {
403 /* Soft reset the controller, and try one more time. */
404 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
405 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
406 0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING,
407 "Controller initialization failed. Retrying...",
408 "error = %d\n", error);
409 if ((error = tw_cli_soft_reset(ctlr))) {
410 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
411 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
412 0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING,
413 "Controller soft reset failed",
414 "error = %d\n", error);
415 return(error);
416 } else if ((error = tw_cli_start_ctlr(ctlr))) {
417 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
418 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
419 0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING,
420 "Controller initialization retry failed",
421 "error = %d\n", error);
422 return(error);
423 }
424 }
425 /* Notify some info about the controller to the OSL. */
426 tw_cli_notify_ctlr_info(ctlr);
427
428 /* Mark the controller as active. */
429 ctlr->state |= TW_CLI_CTLR_STATE_ACTIVE;
430 return(error);
431 }
432
433
434
435 #ifdef TW_OSL_FLASH_FIRMWARE
436 /*
437 * Function name: tw_cli_flash_firmware
438 * Description: Flashes bundled firmware image onto controller.
439 *
440 * Input: ctlr -- ptr to per ctlr structure
441 * Output: None
442 * Return value: 0 -- success
443 * non-zero-- failure
444 */
445 TW_INT32
446 tw_cli_flash_firmware(struct tw_cli_ctlr_context *ctlr)
447 {
448 struct tw_cli_req_context *req;
449 struct tw_cl_command_header *cmd_hdr;
450 struct tw_cl_command_download_firmware *cmd;
451 TW_UINT32 fw_img_chunk_size;
452 TW_UINT32 num_chunks;
453 TW_UINT32 this_chunk_size = 0;
454 TW_INT32 remaining_img_size = 0;
455 TW_INT32 hard_reset_needed = TW_CL_FALSE;
456 TW_INT32 error = TW_OSL_EGENFAILURE;
457 TW_UINT32 i;
458
459 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
460 if ((req = tw_cli_get_request(ctlr
461 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
462 , TW_CL_NULL
463 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
464 )) == TW_CL_NULL) {
465 /* No free request packets available. Can't proceed. */
466 error = TW_OSL_EBUSY;
467 goto out;
468 }
469
470 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
471
472 req->cmd_pkt = ctlr->cmd_pkt_buf;
473 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
474 tw_osl_memzero(req->cmd_pkt,
475 sizeof(struct tw_cl_command_header) +
476 28 /* max bytes before sglist */);
477
478 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
479
480 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
481
482 /*
483 * Determine amount of memory needed to hold a chunk of the
484 * firmware image.
485 */
486 fw_img_chunk_size = ((tw_cli_fw_img_size / TW_CLI_NUM_FW_IMAGE_CHUNKS) +
487 (TWA_SG_ELEMENT_SIZE_FACTOR - 1)) &
488 ~(TWA_SG_ELEMENT_SIZE_FACTOR - 1);
489
490 /* Calculate the actual number of chunks needed. */
491 num_chunks = (tw_cli_fw_img_size / fw_img_chunk_size) +
492 ((tw_cli_fw_img_size % fw_img_chunk_size) ? 1 : 0);
493
494 req->data = ctlr->flash_dma_mem;
495 req->data_phys = ctlr->flash_dma_mem_phys;
496
497 remaining_img_size = tw_cli_fw_img_size;
498
499 cmd_hdr = &(req->cmd_pkt->cmd_hdr);
500 cmd = &(req->cmd_pkt->command.cmd_pkt_7k.download_fw);
501
502 for (i = 0; i < num_chunks; i++) {
503 /* Build a cmd pkt for downloading firmware. */
504 tw_osl_memzero(req->cmd_pkt,
505 sizeof(struct tw_cl_command_packet));
506
507 cmd_hdr->header_desc.size_header = 128;
508
509 /* sgl_offset (offset in dwords, to sg list) is 2. */
510 cmd->sgl_off__opcode =
511 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_DOWNLOAD_FIRMWARE);
512 cmd->request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
513 cmd->unit = 0;
514 cmd->status = 0;
515 cmd->flags = 0;
516 cmd->param = TW_CL_SWAP16(8); /* prom image */
517
518 if (i != (num_chunks - 1))
519 this_chunk_size = fw_img_chunk_size;
520 else /* last chunk */
521 this_chunk_size = remaining_img_size;
522
523 remaining_img_size -= this_chunk_size;
524
525 tw_osl_memcpy(req->data, tw_cli_fw_img + (i * fw_img_chunk_size),
526 this_chunk_size);
527
528 /*
529 * The next line will effect only the last chunk.
530 */
531 req->length = (this_chunk_size +
532 (TWA_SG_ELEMENT_SIZE_FACTOR - 1)) &
533 ~(TWA_SG_ELEMENT_SIZE_FACTOR - 1);
534
535 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
536 ((struct tw_cl_sg_desc64 *)(cmd->sgl))[0].address =
537 TW_CL_SWAP64(req->data_phys);
538 ((struct tw_cl_sg_desc64 *)(cmd->sgl))[0].length =
539 TW_CL_SWAP32(req->length);
540 cmd->size = 2 + 3;
541 } else {
542 ((struct tw_cl_sg_desc32 *)(cmd->sgl))[0].address =
543 TW_CL_SWAP32(req->data_phys);
544 ((struct tw_cl_sg_desc32 *)(cmd->sgl))[0].length =
545 TW_CL_SWAP32(req->length);
546 cmd->size = 2 + 2;
547 }
548
549 error = tw_cli_submit_and_poll_request(req,
550 TW_CLI_REQUEST_TIMEOUT_PERIOD);
551 if (error) {
552 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
553 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
554 0x1005, 0x1, TW_CL_SEVERITY_ERROR_STRING,
555 "Firmware flash request could not be posted",
556 "error = %d\n", error);
557 if (error == TW_OSL_ETIMEDOUT)
558 /* clean-up done by tw_cli_submit_and_poll_request */
559 return(error);
560 break;
561 }
562 error = cmd->status;
563
564 if (((i == (num_chunks - 1)) && (error)) ||
565 ((i != (num_chunks - 1)) &&
566 ((error = cmd_hdr->status_block.error) !=
567 TWA_ERROR_MORE_DATA))) {
568 /*
569 * It's either that download of the last chunk
570 * failed, or the download of one of the earlier
571 * chunks failed with an error other than
572 * TWA_ERROR_MORE_DATA. Report the error.
573 */
574 tw_cli_create_ctlr_event(ctlr,
575 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
576 cmd_hdr);
577 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
578 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
579 0x1006, 0x1, TW_CL_SEVERITY_ERROR_STRING,
580 "Firmware flash failed",
581 "cmd = 0x%x, chunk # %d, cmd status = %d",
582 GET_OPCODE(cmd->sgl_off__opcode),
583 i, cmd->status);
584 /*
585 * Make a note to hard reset the controller,
586 * so that it doesn't wait for the remaining
587 * chunks. Don't call the hard reset function
588 * right here, since we have committed to having
589 * only 1 active internal request at a time, and
590 * this request has not yet been freed.
591 */
592 hard_reset_needed = TW_CL_TRUE;
593 break;
594 }
595 } /* for */
596
597 out:
598 if (req)
599 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
600
601 if (hard_reset_needed)
602 tw_cli_hard_reset(ctlr);
603
604 return(error);
605 }
606
607
608
609 /*
610 * Function name: tw_cli_hard_reset
611 * Description: Hard resets the controller.
612 *
613 * Input: ctlr -- ptr to per ctlr structure
614 * Output: None
615 * Return value: 0 -- success
616 * non-zero-- failure
617 */
618 TW_INT32
619 tw_cli_hard_reset(struct tw_cli_ctlr_context *ctlr)
620 {
621 struct tw_cli_req_context *req;
622 struct tw_cl_command_reset_firmware *cmd;
623 TW_INT32 error;
624
625 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
626
627 if ((req = tw_cli_get_request(ctlr
628 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
629 , TW_CL_NULL
630 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
631 )) == TW_CL_NULL)
632 return(TW_OSL_EBUSY);
633
634 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
635
636 req->cmd_pkt = ctlr->cmd_pkt_buf;
637 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
638 tw_osl_memzero(req->cmd_pkt,
639 sizeof(struct tw_cl_command_header) +
640 28 /* max bytes before sglist */);
641
642 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
643
644 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
645
646 /* Build a cmd pkt for sending down the hard reset command. */
647 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
648
649 cmd = &(req->cmd_pkt->command.cmd_pkt_7k.reset_fw);
650 cmd->res1__opcode =
651 BUILD_RES__OPCODE(0, TWA_FW_CMD_HARD_RESET_FIRMWARE);
652 cmd->size = 2;
653 cmd->request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
654 cmd->unit = 0;
655 cmd->status = 0;
656 cmd->flags = 0;
657 cmd->param = 0; /* don't reload FPGA logic */
658
659 req->data = TW_CL_NULL;
660 req->length = 0;
661
662 error = tw_cli_submit_and_poll_request(req,
663 TW_CLI_REQUEST_TIMEOUT_PERIOD);
664 if (error) {
665 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
666 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
667 0x1007, 0x1, TW_CL_SEVERITY_ERROR_STRING,
668 "Hard reset request could not be posted",
669 "error = %d", error);
670 if (error == TW_OSL_ETIMEDOUT)
671 /* clean-up done by tw_cli_submit_and_poll_request */
672 return(error);
673 goto out;
674 }
675 if ((error = cmd->status)) {
676 tw_cli_create_ctlr_event(ctlr,
677 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
678 &(req->cmd_pkt->cmd_hdr));
679 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
680 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
681 0x1008, 0x1, TW_CL_SEVERITY_ERROR_STRING,
682 "Hard reset request failed",
683 "error = %d", error);
684 }
685
686 out:
687 if (req)
688 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
689 return(error);
690 }
691
692 #endif /* TW_OSL_FLASH_FIRMWARE */
693
694
695
696 /*
697 * Function name: tw_cli_start_ctlr
698 * Description: Establishes a logical connection with the controller.
699 * If bundled with firmware, determines whether or not
700 * to flash firmware, based on arch_id, fw SRL (Spec.
701 * Revision Level), branch & build #'s. Also determines
702 * whether or not the driver is compatible with the
703 * firmware on the controller, before proceeding to work
704 * with it.
705 *
706 * Input: ctlr -- ptr to per ctlr structure
707 * Output: None
708 * Return value: 0 -- success
709 * non-zero-- failure
710 */
711 TW_INT32
712 tw_cli_start_ctlr(struct tw_cli_ctlr_context *ctlr)
713 {
714 TW_UINT16 fw_on_ctlr_srl = 0;
715 TW_UINT16 fw_on_ctlr_arch_id = 0;
716 TW_UINT16 fw_on_ctlr_branch = 0;
717 TW_UINT16 fw_on_ctlr_build = 0;
718 TW_UINT32 init_connect_result = 0;
719 TW_INT32 error = TW_OSL_ESUCCESS;
720 #ifdef TW_OSL_FLASH_FIRMWARE
721 TW_INT8 fw_flashed = TW_CL_FALSE;
722 TW_INT8 fw_flash_failed = TW_CL_FALSE;
723 #endif /* TW_OSL_FLASH_FIRMWARE */
724
725 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
726
727 /* Wait for the controller to become ready. */
728 if ((error = tw_cli_poll_status(ctlr,
729 TWA_STATUS_MICROCONTROLLER_READY,
730 TW_CLI_REQUEST_TIMEOUT_PERIOD))) {
731 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
732 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
733 0x1009, 0x1, TW_CL_SEVERITY_ERROR_STRING,
734 "Microcontroller not ready",
735 "error = %d", error);
736 return(error);
737 }
738 /* Drain the response queue. */
739 if ((error = tw_cli_drain_response_queue(ctlr))) {
740 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
741 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
742 0x100A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
743 "Can't drain response queue",
744 "error = %d", error);
745 return(error);
746 }
747 /* Establish a logical connection with the controller. */
748 if ((error = tw_cli_init_connection(ctlr,
749 (TW_UINT16)(ctlr->max_simult_reqs),
750 TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL,
751 TWA_9000_ARCH_ID, TWA_CURRENT_FW_BRANCH,
752 TWA_CURRENT_FW_BUILD, &fw_on_ctlr_srl,
753 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
754 &fw_on_ctlr_build, &init_connect_result))) {
755 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
756 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
757 0x100B, 0x2, TW_CL_SEVERITY_WARNING_STRING,
758 "Can't initialize connection in current mode",
759 "error = %d", error);
760 return(error);
761 }
762
763 #ifdef TW_OSL_FLASH_FIRMWARE
764
765 if ((ctlr->flags & TW_CL_FLASH_FIRMWARE) &&
766 (init_connect_result & TWA_BUNDLED_FW_SAFE_TO_FLASH) &&
767 (init_connect_result & TWA_CTLR_FW_RECOMMENDS_FLASH)) {
768 /*
769 * The bundled firmware is safe to flash, and the firmware
770 * on the controller recommends a flash. So, flash!
771 */
772 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
773 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
774 0x100C, 0x3, TW_CL_SEVERITY_INFO_STRING,
775 "Flashing bundled firmware...",
776 " ");
777 if ((error = tw_cli_flash_firmware(ctlr))) {
778 fw_flash_failed = TW_CL_TRUE;
779 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
780 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
781 0x100D, 0x2, TW_CL_SEVERITY_WARNING_STRING,
782 "Unable to flash bundled firmware. "
783 "Attempting to work with fw on ctlr...",
784 " ");
785 } else {
786 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
787 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
788 0x100E, 0x3, TW_CL_SEVERITY_INFO_STRING,
789 "Successfully flashed bundled firmware",
790 " ");
791 fw_flashed = TW_CL_TRUE;
792 }
793 }
794
795 if (fw_flashed) {
796 /* The firmware was flashed. Have the new image loaded */
797 error = tw_cli_hard_reset(ctlr);
798 if (error)
799 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
800 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
801 0x100F, 0x1, TW_CL_SEVERITY_ERROR_STRING,
802 "Could not reset controller after flash!",
803 " ");
804 else /* Go through initialization again. */
805 error = tw_cli_start_ctlr(ctlr);
806 /*
807 * If hard reset of controller failed, we need to return.
808 * Otherwise, the above recursive call to tw_cli_start_ctlr
809 * will have completed the rest of the initialization (starting
810 * from tw_cli_drain_aen_queue below). Don't do it again.
811 * Just return.
812 */
813 return(error);
814 } else
815 #endif /* TW_OSL_FLASH_FIRMWARE */
816 {
817 /*
818 * Either we are not bundled with a firmware image, or
819 * the bundled firmware is not safe to flash,
820 * or flash failed for some reason. See if we can at
821 * least work with the firmware on the controller in the
822 * current mode.
823 */
824 if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) {
825 /* Yes, we can. Make note of the operating mode. */
826 if (init_connect_result & TWA_CTLR_FW_SAME_OR_NEWER) {
827 ctlr->working_srl = TWA_CURRENT_FW_SRL;
828 ctlr->working_branch = TWA_CURRENT_FW_BRANCH;
829 ctlr->working_build = TWA_CURRENT_FW_BUILD;
830 } else {
831 ctlr->working_srl = fw_on_ctlr_srl;
832 ctlr->working_branch = fw_on_ctlr_branch;
833 ctlr->working_build = fw_on_ctlr_build;
834 }
835 } else {
836 /*
837 * No, we can't. See if we can at least work with
838 * it in the base mode. We should never come here
839 * if firmware has just been flashed.
840 */
841 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
842 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
843 0x1010, 0x2, TW_CL_SEVERITY_WARNING_STRING,
844 "Driver/Firmware mismatch. "
845 "Negotiating for base level...",
846 " ");
847 if ((error = tw_cli_init_connection(ctlr,
848 (TW_UINT16)(ctlr->max_simult_reqs),
849 TWA_EXTENDED_INIT_CONNECT,
850 TWA_BASE_FW_SRL, TWA_9000_ARCH_ID,
851 TWA_BASE_FW_BRANCH, TWA_BASE_FW_BUILD,
852 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
853 &fw_on_ctlr_branch, &fw_on_ctlr_build,
854 &init_connect_result))) {
855 tw_cl_create_event(ctlr->ctlr_handle,
856 TW_CL_FALSE,
857 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
858 0x1011, 0x1,
859 TW_CL_SEVERITY_ERROR_STRING,
860 "Can't initialize connection in "
861 "base mode",
862 " ");
863 return(error);
864 }
865 if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) {
866 /*
867 * The firmware on the controller is not even
868 * compatible with our base mode. We cannot
869 * work with it. Bail...
870 */
871 #ifdef TW_OSL_FLASH_FIRMWARE
872 if (fw_flash_failed)
873 tw_cl_create_event(ctlr->ctlr_handle,
874 TW_CL_FALSE,
875 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
876 0x1012, 0x1,
877 TW_CL_SEVERITY_ERROR_STRING,
878 "Incompatible firmware on controller"
879 "...and could not flash bundled "
880 "firmware",
881 " ");
882 else
883 tw_cl_create_event(ctlr->ctlr_handle,
884 TW_CL_FALSE,
885 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
886 0x1013, 0x1,
887 TW_CL_SEVERITY_ERROR_STRING,
888 "Incompatible firmware on controller"
889 "...and bundled firmware not safe to "
890 "flash",
891 " ");
892 #endif /* TW_OSL_FLASH_FIRMWARE */
893 return(1);
894 }
895 /*
896 * We can work with this firmware, but only in
897 * base mode.
898 */
899 ctlr->working_srl = TWA_BASE_FW_SRL;
900 ctlr->working_branch = TWA_BASE_FW_BRANCH;
901 ctlr->working_build = TWA_BASE_FW_BUILD;
902 ctlr->operating_mode = TWA_BASE_MODE;
903 }
904 }
905
906 /* Drain the AEN queue */
907 if ((error = tw_cli_drain_aen_queue(ctlr)))
908 /*
909 * We will just print that we couldn't drain the AEN queue.
910 * There's no need to bail out.
911 */
912 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
913 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
914 0x1014, 0x2, TW_CL_SEVERITY_WARNING_STRING,
915 "Can't drain AEN queue",
916 "error = %d", error);
917
918 /* Enable interrupts. */
919 tw_cli_enable_interrupts(ctlr);
920
921 return(TW_OSL_ESUCCESS);
922 }
923
924
925 /*
926 * Function name: tw_cl_shutdown_ctlr
927 * Description: Closes logical connection with the controller.
928 *
929 * Input: ctlr -- ptr to per ctlr structure
930 * flags -- more info passed by the OS Layer
931 * Output: None
932 * Return value: 0 -- success
933 * non-zero-- failure
934 */
935 TW_INT32
936 tw_cl_shutdown_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags)
937 {
938 struct tw_cli_ctlr_context *ctlr =
939 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
940 TW_INT32 error;
941
942 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");
943 /*
944 * Mark the controller as inactive, disable any further interrupts,
945 * and notify the controller that we are going down.
946 */
947 ctlr->state &= ~TW_CLI_CTLR_STATE_ACTIVE;
948
949 tw_cli_disable_interrupts(ctlr);
950
951 /* Let the controller know that we are going down. */
952 if ((error = tw_cli_init_connection(ctlr, TWA_SHUTDOWN_MESSAGE_CREDITS,
953 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
954 TW_CL_NULL, TW_CL_NULL)))
955 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
956 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
957 0x1015, 0x1, TW_CL_SEVERITY_ERROR_STRING,
958 "Can't close connection with controller",
959 "error = %d", error);
960
961 if (flags & TW_CL_STOP_CTLR_ONLY)
962 goto ret;
963
964 /* Destroy all locks used by CL. */
965 tw_osl_destroy_lock(ctlr_handle, ctlr->gen_lock);
966 tw_osl_destroy_lock(ctlr_handle, ctlr->io_lock);
967 if (!(ctlr->flags & TW_CL_64BIT_ADDRESSES))
968 tw_osl_destroy_lock(ctlr_handle, ctlr->intr_lock);
969
970 ret:
971 return(error);
972 }
973
974
975
976 /*
977 * Function name: tw_cli_init_connection
978 * Description: Sends init_connection cmd to firmware
979 *
980 * Input: ctlr -- ptr to per ctlr structure
981 * message_credits -- max # of requests that we might send
982 * down simultaneously. This will be
983 * typically set to 256 at init-time or
984 * after a reset, and to 1 at shutdown-time
985 * set_features -- indicates if we intend to use 64-bit
986 * sg, also indicates if we want to do a
987 * basic or an extended init_connection;
988 *
989 * Note: The following input/output parameters are valid, only in case of an
990 * extended init_connection:
991 *
992 * current_fw_srl -- srl of fw we are bundled
993 * with, if any; 0 otherwise
994 * current_fw_arch_id -- arch_id of fw we are bundled
995 * with, if any; 0 otherwise
996 * current_fw_branch -- branch # of fw we are bundled
997 * with, if any; 0 otherwise
998 * current_fw_build -- build # of fw we are bundled
999 * with, if any; 0 otherwise
1000 * Output: fw_on_ctlr_srl -- srl of fw on ctlr
1001 * fw_on_ctlr_arch_id -- arch_id of fw on ctlr
1002 * fw_on_ctlr_branch -- branch # of fw on ctlr
1003 * fw_on_ctlr_build -- build # of fw on ctlr
1004 * init_connect_result -- result bitmap of fw response
1005 * Return value: 0 -- success
1006 * non-zero-- failure
1007 */
1008 TW_INT32
1009 tw_cli_init_connection(struct tw_cli_ctlr_context *ctlr,
1010 TW_UINT16 message_credits, TW_UINT32 set_features,
1011 TW_UINT16 current_fw_srl, TW_UINT16 current_fw_arch_id,
1012 TW_UINT16 current_fw_branch, TW_UINT16 current_fw_build,
1013 TW_UINT16 *fw_on_ctlr_srl, TW_UINT16 *fw_on_ctlr_arch_id,
1014 TW_UINT16 *fw_on_ctlr_branch, TW_UINT16 *fw_on_ctlr_build,
1015 TW_UINT32 *init_connect_result)
1016 {
1017 struct tw_cli_req_context *req;
1018 struct tw_cl_command_init_connect *init_connect;
1019 TW_INT32 error = TW_OSL_EBUSY;
1020
1021 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1022
1023 /* Get a request packet. */
1024 if ((req = tw_cli_get_request(ctlr
1025 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
1026 , TW_CL_NULL
1027 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
1028 )) == TW_CL_NULL)
1029 goto out;
1030
1031 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
1032
1033 req->cmd_pkt = ctlr->cmd_pkt_buf;
1034 req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
1035 tw_osl_memzero(req->cmd_pkt,
1036 sizeof(struct tw_cl_command_header) +
1037 28 /* max bytes before sglist */);
1038
1039 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
1040
1041 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1042
1043 /* Build the cmd pkt. */
1044 init_connect = &(req->cmd_pkt->command.cmd_pkt_7k.init_connect);
1045
1046 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
1047
1048 init_connect->res1__opcode =
1049 BUILD_RES__OPCODE(0, TWA_FW_CMD_INIT_CONNECTION);
1050 init_connect->request_id =
1051 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
1052 init_connect->message_credits = TW_CL_SWAP16(message_credits);
1053 init_connect->features = TW_CL_SWAP32(set_features);
1054 if (ctlr->flags & TW_CL_64BIT_ADDRESSES)
1055 init_connect->features |= TWA_64BIT_SG_ADDRESSES;
1056 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
1057 /*
1058 * Fill in the extra fields needed for an extended
1059 * init_connect.
1060 */
1061 init_connect->size = 6;
1062 init_connect->fw_srl = TW_CL_SWAP16(current_fw_srl);
1063 init_connect->fw_arch_id = TW_CL_SWAP16(current_fw_arch_id);
1064 init_connect->fw_branch = TW_CL_SWAP16(current_fw_branch);
1065 init_connect->fw_build = TW_CL_SWAP16(current_fw_build);
1066 } else
1067 init_connect->size = 3;
1068
1069 /* Submit the command, and wait for it to complete. */
1070 error = tw_cli_submit_and_poll_request(req,
1071 TW_CLI_REQUEST_TIMEOUT_PERIOD);
1072 if (error == TW_OSL_ETIMEDOUT)
1073 /* Clean-up done by tw_cli_submit_and_poll_request. */
1074 return(error);
1075 if (error)
1076 goto out;
1077 if ((error = init_connect->status)) {
1078 tw_cli_create_ctlr_event(ctlr,
1079 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
1080 &(req->cmd_pkt->cmd_hdr));
1081 goto out;
1082 }
1083 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
1084 *fw_on_ctlr_srl = TW_CL_SWAP16(init_connect->fw_srl);
1085 *fw_on_ctlr_arch_id = TW_CL_SWAP16(init_connect->fw_arch_id);
1086 *fw_on_ctlr_branch = TW_CL_SWAP16(init_connect->fw_branch);
1087 *fw_on_ctlr_build = TW_CL_SWAP16(init_connect->fw_build);
1088 *init_connect_result = TW_CL_SWAP32(init_connect->result);
1089 }
1090 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1091 return(error);
1092
1093 out:
1094 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1095 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1096 0x1016, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1097 "init_connection failed",
1098 "error = %d", error);
1099 if (req)
1100 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1101 return(error);
1102 }
1103
1104
Cache object: c63d52a5687a210ba72b5b8a5c49b0f2
|