1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
5 * Copyright (c) 2004-05 Vinod Kashyap
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/12.0/sys/dev/twa/tw_cl_init.c 326255 2017-11-27 14:52:40Z pfg $
30 */
31
32 /*
33 * AMCC'S 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 * Modifications by: Adam Radford
37 * Modifications by: Manjunath Ranganathaiah
38 */
39
40
41 /*
42 * Common Layer initialization functions.
43 */
44
45
46 #include "tw_osl_share.h"
47 #include "tw_cl_share.h"
48 #include "tw_cl_fwif.h"
49 #include "tw_cl_ioctl.h"
50 #include "tw_cl.h"
51 #include "tw_cl_externs.h"
52 #include "tw_osl_ioctl.h"
53
54
55 /*
56 * Function name: tw_cl_ctlr_supported
57 * Description: Determines if a controller is supported.
58 *
59 * Input: vendor_id -- vendor id of the controller
60 * device_id -- device id of the controller
61 * Output: None
62 * Return value: TW_CL_TRUE-- controller supported
63 * TW_CL_FALSE-- controller not supported
64 */
65 TW_INT32
66 tw_cl_ctlr_supported(TW_INT32 vendor_id, TW_INT32 device_id)
67 {
68 if ((vendor_id == TW_CL_VENDOR_ID) &&
69 ((device_id == TW_CL_DEVICE_ID_9K) ||
70 (device_id == TW_CL_DEVICE_ID_9K_X) ||
71 (device_id == TW_CL_DEVICE_ID_9K_E) ||
72 (device_id == TW_CL_DEVICE_ID_9K_SA)))
73 return(TW_CL_TRUE);
74 return(TW_CL_FALSE);
75 }
76
77
78
79 /*
80 * Function name: tw_cl_get_pci_bar_info
81 * Description: Returns PCI BAR info.
82 *
83 * Input: device_id -- device id of the controller
84 * bar_type -- type of PCI BAR in question
85 * Output: bar_num -- PCI BAR number corresponding to bar_type
86 * bar0_offset -- byte offset from BAR 0 (0x10 in
87 * PCI config space)
88 * bar_size -- size, in bytes, of the BAR in question
89 * Return value: 0 -- success
90 * non-zero -- failure
91 */
92 TW_INT32
93 tw_cl_get_pci_bar_info(TW_INT32 device_id, TW_INT32 bar_type,
94 TW_INT32 *bar_num, TW_INT32 *bar0_offset, TW_INT32 *bar_size)
95 {
96 TW_INT32 error = TW_OSL_ESUCCESS;
97
98 switch(device_id) {
99 case TW_CL_DEVICE_ID_9K:
100 switch(bar_type) {
101 case TW_CL_BAR_TYPE_IO:
102 *bar_num = 0;
103 *bar0_offset = 0;
104 *bar_size = 4;
105 break;
106
107 case TW_CL_BAR_TYPE_MEM:
108 *bar_num = 1;
109 *bar0_offset = 0x4;
110 *bar_size = 8;
111 break;
112
113 case TW_CL_BAR_TYPE_SBUF:
114 *bar_num = 2;
115 *bar0_offset = 0xC;
116 *bar_size = 8;
117 break;
118 }
119 break;
120
121 case TW_CL_DEVICE_ID_9K_X:
122 case TW_CL_DEVICE_ID_9K_E:
123 case TW_CL_DEVICE_ID_9K_SA:
124 switch(bar_type) {
125 case TW_CL_BAR_TYPE_IO:
126 *bar_num = 2;
127 *bar0_offset = 0x10;
128 *bar_size = 4;
129 break;
130
131 case TW_CL_BAR_TYPE_MEM:
132 *bar_num = 1;
133 *bar0_offset = 0x8;
134 *bar_size = 8;
135 break;
136
137 case TW_CL_BAR_TYPE_SBUF:
138 *bar_num = 0;
139 *bar0_offset = 0;
140 *bar_size = 8;
141 break;
142 }
143 break;
144
145 default:
146 error = TW_OSL_ENOTTY;
147 break;
148 }
149
150 return(error);
151 }
152
153
154
155 /*
156 * Function name: tw_cl_get_mem_requirements
157 * Description: Provides info about Common Layer requirements for a
158 * controller, given the controller type (in 'flags').
159 * Input: ctlr_handle -- controller handle
160 * flags -- more info passed by the OS Layer
161 * device_id -- device id of the controller
162 * max_simult_reqs -- maximum # of simultaneous
163 * requests that the OS Layer expects
164 * the Common Layer to support
165 * max_aens -- maximun # of AEN's needed to be supported
166 * Output: alignment -- alignment needed for all DMA'able
167 * buffers
168 * sg_size_factor -- every SG element should have a size
169 * that's a multiple of this number
170 * non_dma_mem_size -- # of bytes of memory needed for
171 * non-DMA purposes
172 * dma_mem_size -- # of bytes of DMA'able memory needed
173 * per_req_dma_mem_size -- # of bytes of DMA'able memory
174 * needed per request, if applicable
175 * per_req_non_dma_mem_size -- # of bytes of memory needed
176 * per request for non-DMA purposes,
177 * if applicable
178 * Output: None
179 * Return value: 0 -- success
180 * non-zero-- failure
181 */
182 TW_INT32
183 tw_cl_get_mem_requirements(struct tw_cl_ctlr_handle *ctlr_handle,
184 TW_UINT32 flags, TW_INT32 device_id, TW_INT32 max_simult_reqs,
185 TW_INT32 max_aens, TW_UINT32 *alignment, TW_UINT32 *sg_size_factor,
186 TW_UINT32 *non_dma_mem_size, TW_UINT32 *dma_mem_size
187 )
188 {
189 if (device_id == 0)
190 device_id = TW_CL_DEVICE_ID_9K;
191
192 if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
193 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
194 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
195 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
196 "Too many simultaneous requests to support!",
197 "requested = %d, supported = %d, error = %d\n",
198 max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
199 TW_OSL_EBIG);
200 return(TW_OSL_EBIG);
201 }
202
203 *alignment = TWA_ALIGNMENT(device_id);
204 *sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
205
206 /*
207 * Total non-DMA memory needed is the sum total of memory needed for
208 * the controller context, request packets (including the 1 needed for
209 * CL internal requests), and event packets.
210 */
211
212 *non_dma_mem_size = sizeof(struct tw_cli_ctlr_context) +
213 (sizeof(struct tw_cli_req_context) * max_simult_reqs) +
214 (sizeof(struct tw_cl_event_packet) * max_aens);
215
216
217 /*
218 * Total DMA'able memory needed is the sum total of memory needed for
219 * all command packets (including the 1 needed for CL internal
220 * requests), and memory needed to hold the payload for internal
221 * requests.
222 */
223
224 *dma_mem_size = (sizeof(struct tw_cl_command_packet) *
225 (max_simult_reqs)) + (TW_CLI_SECTOR_SIZE);
226
227 return(0);
228 }
229
230
231
232 /*
233 * Function name: tw_cl_init_ctlr
234 * Description: Initializes driver data structures for the controller.
235 *
236 * Input: ctlr_handle -- controller handle
237 * flags -- more info passed by the OS Layer
238 * device_id -- device id of the controller
239 * max_simult_reqs -- maximum # of simultaneous requests
240 * that the OS Layer expects the Common
241 * Layer to support
242 * max_aens -- maximun # of AEN's needed to be supported
243 * non_dma_mem -- ptr to allocated non-DMA memory
244 * dma_mem -- ptr to allocated DMA'able memory
245 * dma_mem_phys -- physical address of dma_mem
246 * Output: None
247 * Return value: 0 -- success
248 * non-zero-- failure
249 */
250 TW_INT32
251 tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
252 TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens,
253 TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys
254 )
255 {
256 struct tw_cli_ctlr_context *ctlr;
257 struct tw_cli_req_context *req;
258 TW_UINT8 *free_non_dma_mem;
259 TW_INT32 error = TW_OSL_ESUCCESS;
260 TW_INT32 i;
261
262 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");
263
264 if (flags & TW_CL_START_CTLR_ONLY) {
265 ctlr = (struct tw_cli_ctlr_context *)
266 (ctlr_handle->cl_ctlr_ctxt);
267 goto start_ctlr;
268 }
269
270 if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
271 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
272 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
273 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
274 "Too many simultaneous requests to support!",
275 "requested = %d, supported = %d, error = %d\n",
276 max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
277 TW_OSL_EBIG);
278 return(TW_OSL_EBIG);
279 }
280
281 if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL)
282 ) {
283 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
284 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
285 0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING,
286 "Insufficient memory for Common Layer's internal usage",
287 "error = %d\n", TW_OSL_ENOMEM);
288 return(TW_OSL_ENOMEM);
289 }
290
291 tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
292 (sizeof(struct tw_cli_req_context) * max_simult_reqs) +
293 (sizeof(struct tw_cl_event_packet) * max_aens));
294
295 tw_osl_memzero(dma_mem,
296 (sizeof(struct tw_cl_command_packet) *
297 max_simult_reqs) +
298 TW_CLI_SECTOR_SIZE);
299
300 free_non_dma_mem = (TW_UINT8 *)non_dma_mem;
301
302 ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem;
303 free_non_dma_mem += sizeof(struct tw_cli_ctlr_context);
304
305 ctlr_handle->cl_ctlr_ctxt = ctlr;
306 ctlr->ctlr_handle = ctlr_handle;
307
308 ctlr->device_id = (TW_UINT32)device_id;
309 ctlr->arch_id = TWA_ARCH_ID(device_id);
310 ctlr->flags = flags;
311 ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
312 ctlr->max_simult_reqs = max_simult_reqs;
313 ctlr->max_aens_supported = max_aens;
314
315 /* Initialize queues of CL internal request context packets. */
316 tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q);
317 tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q);
318 tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q);
319 tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q);
320 tw_cli_req_q_init(ctlr, TW_CLI_RESET_Q);
321
322 /* Initialize all locks used by CL. */
323 ctlr->gen_lock = &(ctlr->gen_lock_handle);
324 tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
325 ctlr->io_lock = &(ctlr->io_lock_handle);
326 tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);
327
328 /* Initialize CL internal request context packets. */
329 ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
330 free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
331 max_simult_reqs);
332
333 ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
334 ctlr->cmd_pkt_phys = dma_mem_phys;
335
336 ctlr->internal_req_data = (TW_UINT8 *)
337 (ctlr->cmd_pkt_buf +
338 max_simult_reqs);
339 ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
340 (sizeof(struct tw_cl_command_packet) *
341 max_simult_reqs);
342
343 for (i = 0; i < max_simult_reqs; i++) {
344 req = &(ctlr->req_ctxt_buf[i]);
345
346 req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
347 req->cmd_pkt_phys = ctlr->cmd_pkt_phys +
348 (i * sizeof(struct tw_cl_command_packet));
349
350 req->request_id = i;
351 req->ctlr = ctlr;
352
353 /* Insert request into the free queue. */
354 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
355 }
356
357 /* Initialize the AEN queue. */
358 ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem;
359
360
361 start_ctlr:
362 /*
363 * Disable interrupts. Interrupts will be enabled in tw_cli_start_ctlr
364 * (only) if initialization succeeded.
365 */
366 tw_cli_disable_interrupts(ctlr);
367
368 /* Initialize the controller. */
369 if ((error = tw_cli_start_ctlr(ctlr))) {
370 /* Soft reset the controller, and try one more time. */
371 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
372 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
373 0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING,
374 "Controller initialization failed. Retrying...",
375 "error = %d\n", error);
376 if ((error = tw_cli_soft_reset(ctlr))) {
377 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
378 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
379 0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING,
380 "Controller soft reset failed",
381 "error = %d\n", error);
382 return(error);
383 } else if ((error = tw_cli_start_ctlr(ctlr))) {
384 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
385 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
386 0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING,
387 "Controller initialization retry failed",
388 "error = %d\n", error);
389 return(error);
390 }
391 }
392 /* Notify some info about the controller to the OSL. */
393 tw_cli_notify_ctlr_info(ctlr);
394
395 /* Mark the controller active. */
396 ctlr->active = TW_CL_TRUE;
397 return(error);
398 }
399
400 /*
401 * Function name: tw_cli_start_ctlr
402 * Description: Establishes a logical connection with the controller.
403 * Determines whether or not the driver is compatible
404 * with the firmware on the controller, before proceeding
405 * to work with it.
406 *
407 * Input: ctlr -- ptr to per ctlr structure
408 * Output: None
409 * Return value: 0 -- success
410 * non-zero-- failure
411 */
412 TW_INT32
413 tw_cli_start_ctlr(struct tw_cli_ctlr_context *ctlr)
414 {
415 TW_UINT16 fw_on_ctlr_srl = 0;
416 TW_UINT16 fw_on_ctlr_arch_id = 0;
417 TW_UINT16 fw_on_ctlr_branch = 0;
418 TW_UINT16 fw_on_ctlr_build = 0;
419 TW_UINT32 init_connect_result = 0;
420 TW_INT32 error = TW_OSL_ESUCCESS;
421
422 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
423
424 /* Wait for the controller to become ready. */
425 if ((error = tw_cli_poll_status(ctlr,
426 TWA_STATUS_MICROCONTROLLER_READY,
427 TW_CLI_REQUEST_TIMEOUT_PERIOD))) {
428 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
429 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
430 0x1009, 0x1, TW_CL_SEVERITY_ERROR_STRING,
431 "Microcontroller not ready",
432 "error = %d", error);
433 return(error);
434 }
435 /* Drain the response queue. */
436 if ((error = tw_cli_drain_response_queue(ctlr))) {
437 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
438 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
439 0x100A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
440 "Can't drain response queue",
441 "error = %d", error);
442 return(error);
443 }
444 /* Establish a logical connection with the controller. */
445 if ((error = tw_cli_init_connection(ctlr,
446 (TW_UINT16)(ctlr->max_simult_reqs),
447 TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL,
448 (TW_UINT16)(ctlr->arch_id),
449 TWA_CURRENT_FW_BRANCH(ctlr->arch_id),
450 TWA_CURRENT_FW_BUILD(ctlr->arch_id),
451 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
452 &fw_on_ctlr_branch, &fw_on_ctlr_build,
453 &init_connect_result))) {
454 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
455 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
456 0x100B, 0x2, TW_CL_SEVERITY_WARNING_STRING,
457 "Can't initialize connection in current mode",
458 "error = %d", error);
459 return(error);
460 }
461 {
462 /* See if we can at least work with the firmware on the
463 * controller in the current mode.
464 */
465 if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) {
466 /* Yes, we can. Make note of the operating mode. */
467 if (init_connect_result & TWA_CTLR_FW_SAME_OR_NEWER) {
468 ctlr->working_srl = TWA_CURRENT_FW_SRL;
469 ctlr->working_branch =
470 TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
471 ctlr->working_build =
472 TWA_CURRENT_FW_BUILD(ctlr->arch_id);
473 } else {
474 ctlr->working_srl = fw_on_ctlr_srl;
475 ctlr->working_branch = fw_on_ctlr_branch;
476 ctlr->working_build = fw_on_ctlr_build;
477 }
478 } else {
479 /*
480 * No, we can't. See if we can at least work with
481 * it in the base mode.
482 */
483 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
484 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
485 0x1010, 0x2, TW_CL_SEVERITY_WARNING_STRING,
486 "Driver/Firmware mismatch. "
487 "Negotiating for base level...",
488 " ");
489 if ((error = tw_cli_init_connection(ctlr,
490 (TW_UINT16)(ctlr->max_simult_reqs),
491 TWA_EXTENDED_INIT_CONNECT,
492 TWA_BASE_FW_SRL,
493 (TW_UINT16)(ctlr->arch_id),
494 TWA_BASE_FW_BRANCH, TWA_BASE_FW_BUILD,
495 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
496 &fw_on_ctlr_branch, &fw_on_ctlr_build,
497 &init_connect_result))) {
498 tw_cl_create_event(ctlr->ctlr_handle,
499 TW_CL_FALSE,
500 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
501 0x1011, 0x1,
502 TW_CL_SEVERITY_ERROR_STRING,
503 "Can't initialize connection in "
504 "base mode",
505 " ");
506 return(error);
507 }
508 if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) {
509 /*
510 * The firmware on the controller is not even
511 * compatible with our base mode. We cannot
512 * work with it. Bail...
513 */
514 return(1);
515 }
516 /*
517 * We can work with this firmware, but only in
518 * base mode.
519 */
520 ctlr->working_srl = TWA_BASE_FW_SRL;
521 ctlr->working_branch = TWA_BASE_FW_BRANCH;
522 ctlr->working_build = TWA_BASE_FW_BUILD;
523 ctlr->operating_mode = TWA_BASE_MODE;
524 }
525 ctlr->fw_on_ctlr_srl = fw_on_ctlr_srl;
526 ctlr->fw_on_ctlr_branch = fw_on_ctlr_branch;
527 ctlr->fw_on_ctlr_build = fw_on_ctlr_build;
528 }
529
530 /* Drain the AEN queue */
531 if ((error = tw_cli_drain_aen_queue(ctlr)))
532 /*
533 * We will just print that we couldn't drain the AEN queue.
534 * There's no need to bail out.
535 */
536 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
537 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
538 0x1014, 0x2, TW_CL_SEVERITY_WARNING_STRING,
539 "Can't drain AEN queue",
540 "error = %d", error);
541
542 /* Enable interrupts. */
543 tw_cli_enable_interrupts(ctlr);
544
545 return(TW_OSL_ESUCCESS);
546 }
547
548
549 /*
550 * Function name: tw_cl_shutdown_ctlr
551 * Description: Closes logical connection with the controller.
552 *
553 * Input: ctlr -- ptr to per ctlr structure
554 * flags -- more info passed by the OS Layer
555 * Output: None
556 * Return value: 0 -- success
557 * non-zero-- failure
558 */
559 TW_INT32
560 tw_cl_shutdown_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags)
561 {
562 struct tw_cli_ctlr_context *ctlr =
563 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
564 TW_INT32 error;
565
566 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");
567 /*
568 * Mark the controller as inactive, disable any further interrupts,
569 * and notify the controller that we are going down.
570 */
571 ctlr->active = TW_CL_FALSE;
572
573 tw_cli_disable_interrupts(ctlr);
574
575 /* Let the controller know that we are going down. */
576 if ((error = tw_cli_init_connection(ctlr, TWA_SHUTDOWN_MESSAGE_CREDITS,
577 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
578 TW_CL_NULL, TW_CL_NULL)))
579 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
580 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
581 0x1015, 0x1, TW_CL_SEVERITY_ERROR_STRING,
582 "Can't close connection with controller",
583 "error = %d", error);
584
585 if (flags & TW_CL_STOP_CTLR_ONLY)
586 goto ret;
587
588 /* Destroy all locks used by CL. */
589 tw_osl_destroy_lock(ctlr_handle, ctlr->gen_lock);
590 tw_osl_destroy_lock(ctlr_handle, ctlr->io_lock);
591
592 ret:
593 return(error);
594 }
595
596
597
598 /*
599 * Function name: tw_cli_init_connection
600 * Description: Sends init_connection cmd to firmware
601 *
602 * Input: ctlr -- ptr to per ctlr structure
603 * message_credits -- max # of requests that we might send
604 * down simultaneously. This will be
605 * typically set to 256 at init-time or
606 * after a reset, and to 1 at shutdown-time
607 * set_features -- indicates if we intend to use 64-bit
608 * sg, also indicates if we want to do a
609 * basic or an extended init_connection;
610 *
611 * Note: The following input/output parameters are valid, only in case of an
612 * extended init_connection:
613 *
614 * current_fw_srl -- srl of fw we are bundled
615 * with, if any; 0 otherwise
616 * current_fw_arch_id -- arch_id of fw we are bundled
617 * with, if any; 0 otherwise
618 * current_fw_branch -- branch # of fw we are bundled
619 * with, if any; 0 otherwise
620 * current_fw_build -- build # of fw we are bundled
621 * with, if any; 0 otherwise
622 * Output: fw_on_ctlr_srl -- srl of fw on ctlr
623 * fw_on_ctlr_arch_id -- arch_id of fw on ctlr
624 * fw_on_ctlr_branch -- branch # of fw on ctlr
625 * fw_on_ctlr_build -- build # of fw on ctlr
626 * init_connect_result -- result bitmap of fw response
627 * Return value: 0 -- success
628 * non-zero-- failure
629 */
630 TW_INT32
631 tw_cli_init_connection(struct tw_cli_ctlr_context *ctlr,
632 TW_UINT16 message_credits, TW_UINT32 set_features,
633 TW_UINT16 current_fw_srl, TW_UINT16 current_fw_arch_id,
634 TW_UINT16 current_fw_branch, TW_UINT16 current_fw_build,
635 TW_UINT16 *fw_on_ctlr_srl, TW_UINT16 *fw_on_ctlr_arch_id,
636 TW_UINT16 *fw_on_ctlr_branch, TW_UINT16 *fw_on_ctlr_build,
637 TW_UINT32 *init_connect_result)
638 {
639 struct tw_cli_req_context *req;
640 struct tw_cl_command_init_connect *init_connect;
641 TW_INT32 error = TW_OSL_EBUSY;
642
643 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
644
645 /* Get a request packet. */
646 if ((req = tw_cli_get_request(ctlr
647 )) == TW_CL_NULL)
648 goto out;
649
650 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
651
652 /* Build the cmd pkt. */
653 init_connect = &(req->cmd_pkt->command.cmd_pkt_7k.init_connect);
654
655 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
656
657 init_connect->res1__opcode =
658 BUILD_RES__OPCODE(0, TWA_FW_CMD_INIT_CONNECTION);
659 init_connect->request_id =
660 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
661 init_connect->message_credits = TW_CL_SWAP16(message_credits);
662 init_connect->features = TW_CL_SWAP32(set_features);
663 if (ctlr->flags & TW_CL_64BIT_ADDRESSES)
664 init_connect->features |= TW_CL_SWAP32(TWA_64BIT_SG_ADDRESSES);
665 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
666 /*
667 * Fill in the extra fields needed for an extended
668 * init_connect.
669 */
670 init_connect->size = 6;
671 init_connect->fw_srl = TW_CL_SWAP16(current_fw_srl);
672 init_connect->fw_arch_id = TW_CL_SWAP16(current_fw_arch_id);
673 init_connect->fw_branch = TW_CL_SWAP16(current_fw_branch);
674 init_connect->fw_build = TW_CL_SWAP16(current_fw_build);
675 } else
676 init_connect->size = 3;
677
678 /* Submit the command, and wait for it to complete. */
679 error = tw_cli_submit_and_poll_request(req,
680 TW_CLI_REQUEST_TIMEOUT_PERIOD);
681 if (error)
682 goto out;
683 if ((error = init_connect->status)) {
684 #if 0
685 tw_cli_create_ctlr_event(ctlr,
686 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
687 &(req->cmd_pkt->cmd_hdr));
688 #endif // 0
689 goto out;
690 }
691 if (set_features & TWA_EXTENDED_INIT_CONNECT) {
692 *fw_on_ctlr_srl = TW_CL_SWAP16(init_connect->fw_srl);
693 *fw_on_ctlr_arch_id = TW_CL_SWAP16(init_connect->fw_arch_id);
694 *fw_on_ctlr_branch = TW_CL_SWAP16(init_connect->fw_branch);
695 *fw_on_ctlr_build = TW_CL_SWAP16(init_connect->fw_build);
696 *init_connect_result = TW_CL_SWAP32(init_connect->result);
697 }
698 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
699 return(error);
700
701 out:
702 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
703 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
704 0x1016, 0x1, TW_CL_SEVERITY_ERROR_STRING,
705 "init_connection failed",
706 "error = %d", error);
707 if (req)
708 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
709 return(error);
710 }
711
712
Cache object: 19e670753f2d3a48f604d4720be25d33
|