FreeBSD/Linux Kernel Cross Reference
sys/dev/twa/tw_cl.h
1 /*
2 * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/6.1/sys/dev/twa/tw_cl.h 153207 2005-12-07 18:18:06Z vkashyap $
28 */
29
30 /*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 */
35
36
37
38 #ifndef TW_CL_H
39
40 #define TW_CL_H
41
42
43 /*
44 * Common Layer internal macros, structures and functions.
45 */
46
47
48 #define TW_CLI_SECTOR_SIZE 0x200
49 #define TW_CLI_REQUEST_TIMEOUT_PERIOD 60 /* seconds */
50 #define TW_CLI_RESET_TIMEOUT_PERIOD 60 /* seconds */
51 #define TW_CLI_MAX_RESET_ATTEMPTS 2
52
53 #ifdef TW_OSL_FLASH_FIRMWARE
54 /* Number of chunks the fw image is broken into, while flashing. */
55 #define TW_CLI_NUM_FW_IMAGE_CHUNKS 500
56 #endif /* TW_OSL_FLASH_FIRMWARE */
57
58 /* Possible values of ctlr->state. */
59 /* Initialization done, and controller is active. */
60 #define TW_CLI_CTLR_STATE_ACTIVE (1<<0)
61 /* Interrupts on controller enabled. */
62 #define TW_CLI_CTLR_STATE_INTR_ENABLED (1<<1)
63 /* Data buffer for internal requests in use. */
64 #define TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY (1<<2)
65 /* More AEN's need to be retrieved. */
66 #define TW_CLI_CTLR_STATE_GET_MORE_AENS (1<<3)
67 /* Controller is being reset. */
68 #define TW_CLI_CTLR_STATE_RESET_IN_PROGRESS (1<<4)
69 /* G133 controller is in 'phase 1' of being reset. */
70 #define TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS (1<<5)
71 /* G66 register write access bug needs to be worked around. */
72 #define TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED (1<<6)
73
74 /* Possible values of ctlr->ioctl_lock.lock. */
75 #define TW_CLI_LOCK_FREE 0x0 /* lock is free */
76 #define TW_CLI_LOCK_HELD 0x1 /* lock is held */
77
78 /* Possible values of req->state. */
79 #define TW_CLI_REQ_STATE_INIT 0x0 /* being initialized */
80 #define TW_CLI_REQ_STATE_BUSY 0x1 /* submitted to controller */
81 #define TW_CLI_REQ_STATE_PENDING 0x2 /* in pending queue */
82 #define TW_CLI_REQ_STATE_COMPLETE 0x3 /* completed by controller */
83
84 /* Possible values of req->flags. */
85 #define TW_CLI_REQ_FLAGS_7K (1<<0) /* 7000 cmd pkt */
86 #define TW_CLI_REQ_FLAGS_9K (1<<1) /* 9000 cmd pkt */
87 #define TW_CLI_REQ_FLAGS_INTERNAL (1<<2) /* internal request */
88 #define TW_CLI_REQ_FLAGS_PASSTHRU (1<<3) /* passthru request */
89 #define TW_CLI_REQ_FLAGS_EXTERNAL (1<<4) /* external request */
90
91 #ifdef TW_OSL_PCI_CONFIG_ACCESSIBLE
92 /* Register offsets in PCI config space. */
93 #define TW_CLI_PCI_CONFIG_COMMAND_OFFSET 0x4 /* cmd register offset */
94 #define TW_CLI_PCI_CONFIG_STATUS_OFFSET 0x6 /* status register offset */
95 #endif /* TW_OSL_PCI_CONFIG_ACCESSIBLE */
96
97 #pragma pack(1)
98
99 #ifdef TW_OSL_DEBUG
100 struct tw_cli_q_stats {
101 TW_UINT32 cur_len;/* current # of entries in q */
102 TW_UINT32 max_len; /* max # of entries in q, ever reached */
103 };
104 #endif /* TW_OSL_DEBUG */
105
106
107 /* Queues of CL internal request context packets. */
108 #define TW_CLI_FREE_Q 0 /* free q */
109 #define TW_CLI_BUSY_Q 1 /* q of reqs submitted to fw */
110 #define TW_CLI_PENDING_Q 2 /* q of reqs deferred due to 'q full' */
111 #define TW_CLI_COMPLETE_Q 3 /* q of reqs completed by fw */
112 #define TW_CLI_Q_COUNT 4 /* total number of queues */
113
114
115 /* CL's internal request context. */
116 struct tw_cli_req_context {
117 struct tw_cl_req_handle *req_handle;/* handle to track requests between
118 OSL & CL */
119 struct tw_cli_ctlr_context *ctlr; /* ptr to CL's controller context */
120 struct tw_cl_command_packet *cmd_pkt;/* ptr to ctlr cmd pkt */
121 TW_UINT64 cmd_pkt_phys; /* cmd pkt physical address */
122 TW_VOID *data; /* ptr to data being passed to fw */
123 TW_UINT32 length; /* length of data being passed to fw */
124 TW_UINT64 data_phys; /* physical address of data */
125
126 TW_UINT32 state; /* request state */
127 TW_UINT32 flags; /* request flags */
128
129 TW_UINT32 error_code; /* error encountered before submission
130 of request to fw, if any */
131
132 TW_VOID *orig_req; /* ptr to original request for use
133 during callback */
134 TW_VOID (*tw_cli_callback)(struct tw_cli_req_context *req);
135 /* CL internal callback */
136 TW_UINT32 request_id; /* request id for tracking with fw */
137 struct tw_cl_link link; /* to link this request in a list */
138 };
139
140
141 /* CL's internal controller context. */
142 struct tw_cli_ctlr_context {
143 struct tw_cl_ctlr_handle *ctlr_handle; /* handle to track ctlr between
144 OSL & CL. */
145 struct tw_cli_req_context *req_ctxt_buf;/* pointer to the array of CL's
146 internal request context pkts */
147
148 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
149
150 TW_UINT32 free_req_ids[TW_CL_MAX_SIMULTANEOUS_REQUESTS];
151 /* Array of free req_id's */
152 struct tw_cli_req_context *busy_reqs[TW_CL_MAX_SIMULTANEOUS_REQUESTS + 1];
153 /* Array of busy reqs -- index is req_id */
154 TW_UINT32 free_req_head;
155 TW_UINT32 free_req_tail;
156 TW_UINT32 num_free_req_ids;
157
158 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
159
160 struct tw_cl_command_packet *cmd_pkt_buf;/* ptr to array of cmd pkts */
161
162 TW_UINT64 cmd_pkt_phys; /* phys addr of cmd_pkt_buf */
163
164 TW_UINT32 device_id; /* controller device id */
165 TW_UINT32 arch_id; /* controller architecture id */
166 TW_UINT32 state; /* controller state */
167 TW_UINT32 flags; /* controller settings */
168 TW_UINT32 sg_size_factor; /* SG element size should be a
169 multiple of this */
170
171 /* Request queues and arrays. */
172 struct tw_cl_link req_q_head[TW_CLI_Q_COUNT];
173
174 #ifdef TW_OSL_FLASH_FIRMWARE
175 TW_VOID *flash_dma_mem; /* mem for flashing fw image */
176 TW_UINT64 flash_dma_mem_phys;/* flash_dma_mem phys addr */
177 #endif /* TW_OSL_FLASH_FIRMWARE */
178
179 TW_UINT8 *internal_req_data;/* internal req data buf */
180 TW_UINT64 internal_req_data_phys;/* phys addr of internal
181 req data buf */
182 TW_UINT32 max_simult_reqs; /* max simultaneous requests
183 supported */
184 TW_UINT32 max_aens_supported;/* max AEN's supported */
185 /* AEN handler fields. */
186 struct tw_cl_event_packet *aen_queue; /* circular queue of AENs from
187 firmware/CL/OSL */
188 TW_UINT32 aen_head; /* AEN queue head */
189 TW_UINT32 aen_tail; /* AEN queue tail */
190 TW_UINT32 aen_cur_seq_id; /* index of the last event+1 */
191 TW_UINT32 aen_q_overflow; /* indicates if unretrieved
192 events were overwritten */
193 TW_UINT32 aen_q_wrapped; /* indicates if AEN queue ever
194 wrapped */
195
196 TW_UINT16 working_srl; /* driver & firmware negotiated
197 srl */
198 TW_UINT16 working_branch; /* branch # of the firmware
199 that the driver is compatible with */
200 TW_UINT16 working_build; /* build # of the firmware
201 that the driver is compatible with */
202 TW_UINT16 fw_on_ctlr_srl; /* srl of running firmware */
203 TW_UINT16 fw_on_ctlr_branch;/* branch # of running
204 firmware */
205 TW_UINT16 fw_on_ctlr_build;/* build # of running
206 firmware */
207 TW_UINT32 operating_mode; /* base mode/current mode */
208
209 TW_INT32 host_intr_pending;/* host intr processing
210 needed */
211 TW_INT32 attn_intr_pending;/* attn intr processing
212 needed */
213 TW_INT32 cmd_intr_pending;/* cmd intr processing
214 needed */
215 TW_INT32 resp_intr_pending;/* resp intr processing
216 needed */
217
218 TW_LOCK_HANDLE gen_lock_handle;/* general purpose lock */
219 TW_LOCK_HANDLE *gen_lock;/* ptr to general purpose lock */
220 TW_LOCK_HANDLE io_lock_handle; /* lock held during cmd
221 submission */
222 TW_LOCK_HANDLE *io_lock;/* ptr to lock held during cmd
223 submission */
224 TW_LOCK_HANDLE intr_lock_handle;/* lock held during
225 ISR/response intr processing */
226 TW_LOCK_HANDLE *intr_lock;/* ptr to lock held during ISR/
227 response intr processing */
228
229 #ifdef TW_OSL_CAN_SLEEP
230 TW_SLEEP_HANDLE sleep_handle; /* handle to co-ordinate sleeps
231 & wakeups */
232 #endif /* TW_OSL_CAN_SLEEP */
233
234 struct {
235 TW_UINT32 lock; /* lock state */
236 TW_TIME timeout; /* time at which the lock will
237 become available, even if not
238 explicitly released */
239 } ioctl_lock; /* lock for use by user applications, for
240 synchronization between ioctl calls */
241 #ifdef TW_OSL_DEBUG
242 struct tw_cli_q_stats q_stats[TW_CLI_Q_COUNT];/* queue statistics */
243 #endif /* TW_OSL_DEBUG */
244 };
245
246 #pragma pack()
247
248
249 /*
250 * Queue primitives
251 */
252
253 #ifdef TW_OSL_DEBUG
254
255 #define TW_CLI_Q_INIT(ctlr, q_type) do { \
256 (ctlr)->q_stats[q_type].cur_len = 0; \
257 (ctlr)->q_stats[q_type].max_len = 0; \
258 } while (0)
259
260
261 #define TW_CLI_Q_INSERT(ctlr, q_type) do { \
262 struct tw_cli_q_stats *q_stats = &((ctlr)->q_stats[q_type]); \
263 \
264 if (++(q_stats->cur_len) > q_stats->max_len) \
265 q_stats->max_len = q_stats->cur_len; \
266 } while (0)
267
268
269 #define TW_CLI_Q_REMOVE(ctlr, q_type) \
270 (ctlr)->q_stats[q_type].cur_len--
271
272 #else /* TW_OSL_DEBUG */
273
274 #define TW_CLI_Q_INIT(ctlr, q_index)
275 #define TW_CLI_Q_INSERT(ctlr, q_index)
276 #define TW_CLI_Q_REMOVE(ctlr, q_index)
277
278 #endif /* TW_OSL_DEBUG */
279
280
281 /* Initialize a queue of requests. */
282 static __inline TW_VOID
283 tw_cli_req_q_init(struct tw_cli_ctlr_context *ctlr, TW_UINT8 q_type)
284 {
285 TW_CL_Q_INIT(&(ctlr->req_q_head[q_type]));
286 TW_CLI_Q_INIT(ctlr, q_type);
287 }
288
289
290
291 /* Insert the given request at the head of the given queue (q_type). */
292 static __inline TW_VOID
293 tw_cli_req_q_insert_head(struct tw_cli_req_context *req, TW_UINT8 q_type)
294 {
295 struct tw_cli_ctlr_context *ctlr = req->ctlr;
296
297 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
298 if ((q_type == TW_CLI_BUSY_Q) || (q_type == TW_CLI_COMPLETE_Q) ||
299 ((q_type == TW_CLI_PENDING_Q) &&
300 (!(req->flags & TW_CLI_REQ_FLAGS_INTERNAL))))
301 return;
302 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
303
304 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->gen_lock);
305 TW_CL_Q_INSERT_HEAD(&(ctlr->req_q_head[q_type]), &(req->link));
306 TW_CLI_Q_INSERT(ctlr, q_type);
307 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->gen_lock);
308 }
309
310
311
312 /* Insert the given request at the tail of the given queue (q_type). */
313 static __inline TW_VOID
314 tw_cli_req_q_insert_tail(struct tw_cli_req_context *req, TW_UINT8 q_type)
315 {
316 struct tw_cli_ctlr_context *ctlr = req->ctlr;
317
318 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
319 if ((q_type == TW_CLI_BUSY_Q) || (q_type == TW_CLI_COMPLETE_Q) ||
320 ((q_type == TW_CLI_PENDING_Q) &&
321 (!(req->flags & TW_CLI_REQ_FLAGS_INTERNAL))))
322 return;
323 if ((q_type == TW_CLI_FREE_Q) &&
324 (!(req->flags & TW_CLI_REQ_FLAGS_INTERNAL))) {
325 TW_SYNC_HANDLE sync_handle;
326
327 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->gen_lock);
328 if (req->state == TW_CLI_REQ_STATE_COMPLETE) {
329 if (ctlr->flags & TW_CL_DEFERRED_INTR_USED)
330 tw_osl_sync_io_block(ctlr->ctlr_handle,
331 &sync_handle);
332 } else {
333 if (!(ctlr->flags & TW_CL_DEFERRED_INTR_USED))
334 tw_osl_sync_isr_block(ctlr->ctlr_handle,
335 &sync_handle);
336 }
337 ctlr->free_req_ids[ctlr->free_req_tail] = req->request_id;
338 ctlr->busy_reqs[req->request_id] = TW_CL_NULL;
339 ctlr->free_req_tail = (ctlr->free_req_tail + 1) %
340 (ctlr->max_simult_reqs - 1);
341 ctlr->num_free_req_ids++;
342
343 if (req->state == TW_CLI_REQ_STATE_COMPLETE) {
344 if (ctlr->flags & TW_CL_DEFERRED_INTR_USED)
345 tw_osl_sync_io_unblock(ctlr->ctlr_handle,
346 &sync_handle);
347 } else {
348 if (!(ctlr->flags & TW_CL_DEFERRED_INTR_USED))
349 tw_osl_sync_isr_unblock(ctlr->ctlr_handle,
350 &sync_handle);
351 }
352 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->gen_lock);
353 return;
354 }
355 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
356
357 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->gen_lock);
358 TW_CL_Q_INSERT_TAIL(&(ctlr->req_q_head[q_type]), &(req->link));
359 TW_CLI_Q_INSERT(ctlr, q_type);
360 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->gen_lock);
361 }
362
363
364
365 /* Remove and return the request at the head of the given queue (q_type). */
366 static __inline struct tw_cli_req_context *
367 tw_cli_req_q_remove_head(struct tw_cli_ctlr_context *ctlr, TW_UINT8 q_type)
368 {
369 struct tw_cli_req_context *req = TW_CL_NULL;
370 struct tw_cl_link *link;
371
372 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
373 if ((q_type == TW_CLI_BUSY_Q) || (q_type == TW_CLI_COMPLETE_Q))
374 return(req);
375 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
376
377 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->gen_lock);
378 if ((link = TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[q_type]))) !=
379 TW_CL_NULL) {
380 req = TW_CL_STRUCT_HEAD(link,
381 struct tw_cli_req_context, link);
382 TW_CL_Q_REMOVE_ITEM(&(ctlr->req_q_head[q_type]), &(req->link));
383 TW_CLI_Q_REMOVE(ctlr, q_type);
384 }
385 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->gen_lock);
386 return(req);
387 }
388
389
390
391 /* Remove the given request from the given queue (q_type). */
392 static __inline TW_VOID
393 tw_cli_req_q_remove_item(struct tw_cli_req_context *req, TW_UINT8 q_type)
394 {
395 struct tw_cli_ctlr_context *ctlr = req->ctlr;
396
397 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
398 if ((q_type == TW_CLI_BUSY_Q) || (q_type == TW_CLI_COMPLETE_Q) ||
399 ((q_type == TW_CLI_PENDING_Q) &&
400 (!(req->flags & TW_CLI_REQ_FLAGS_INTERNAL))))
401 return;
402 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
403
404 tw_osl_get_lock(ctlr->ctlr_handle, ctlr->gen_lock);
405 TW_CL_Q_REMOVE_ITEM(&(ctlr->req_q_head[q_type]), &(req->link));
406 TW_CLI_Q_REMOVE(ctlr, q_type);
407 tw_osl_free_lock(ctlr->ctlr_handle, ctlr->gen_lock);
408 }
409
410
411
412 /* Create an event packet for an event/error posted by the controller. */
413 #define tw_cli_create_ctlr_event(ctlr, event_src, cmd_hdr) do { \
414 TW_UINT8 severity = \
415 GET_SEVERITY((cmd_hdr)->status_block.res__severity); \
416 \
417 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_TRUE, event_src, \
418 (cmd_hdr)->status_block.error, \
419 severity, \
420 tw_cli_severity_string_table[severity], \
421 (cmd_hdr)->err_specific_desc + \
422 tw_osl_strlen((cmd_hdr)->err_specific_desc) + 1, \
423 (cmd_hdr)->err_specific_desc); \
424 /* Print 18 bytes of sense information. */ \
425 tw_cli_dbg_printf(2, ctlr->ctlr_handle, \
426 tw_osl_cur_func(), \
427 "sense info: %x %x %x %x %x %x %x %x %x " \
428 "%x %x %x %x %x %x %x %x %x", \
429 (cmd_hdr)->sense_data[0], (cmd_hdr)->sense_data[1], \
430 (cmd_hdr)->sense_data[2], (cmd_hdr)->sense_data[3], \
431 (cmd_hdr)->sense_data[4], (cmd_hdr)->sense_data[5], \
432 (cmd_hdr)->sense_data[6], (cmd_hdr)->sense_data[7], \
433 (cmd_hdr)->sense_data[8], (cmd_hdr)->sense_data[9], \
434 (cmd_hdr)->sense_data[10], (cmd_hdr)->sense_data[11], \
435 (cmd_hdr)->sense_data[12], (cmd_hdr)->sense_data[13], \
436 (cmd_hdr)->sense_data[14], (cmd_hdr)->sense_data[15], \
437 (cmd_hdr)->sense_data[16], (cmd_hdr)->sense_data[17]); \
438 } while (0)
439
440
441
442 #endif /* TW_CL_H */
Cache object: c11dc0e40d99962627ec2477ca7fd971
|