FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_queue.h
1 /*-
2 * CAM request queue management definitions.
3 *
4 * Copyright (c) 1997 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifndef _CAM_CAM_QUEUE_H
32 #define _CAM_CAM_QUEUE_H 1
33
34 #ifdef _KERNEL
35
36 #include <sys/queue.h>
37 #include <cam/cam.h>
38
39 /*
40 * This structure implements a heap based priority queue. The queue
41 * assumes that the objects stored in it begin with a cam_qentry
42 * structure holding the priority information used to sort the objects.
43 * This structure is opaque to clients (outside of the XPT layer) to allow
44 * the implementation to change without affecting them.
45 */
46 struct camq {
47 cam_pinfo **queue_array;
48 int array_size;
49 int entries;
50 u_int32_t generation;
51 u_int32_t qfrozen_cnt[CAM_RL_VALUES];
52 };
53
54 TAILQ_HEAD(ccb_hdr_tailq, ccb_hdr);
55 LIST_HEAD(ccb_hdr_list, ccb_hdr);
56 SLIST_HEAD(ccb_hdr_slist, ccb_hdr);
57
58 struct cam_ccbq {
59 struct camq queue;
60 int devq_openings;
61 int dev_openings;
62 int dev_active;
63 int held;
64 };
65
66 struct cam_ed;
67
68 struct cam_devq {
69 struct camq alloc_queue;
70 struct camq send_queue;
71 struct cam_ed *active_dev;
72 int alloc_openings;
73 int alloc_active;
74 int send_openings;
75 int send_active;
76 };
77
78
79 struct cam_devq *cam_devq_alloc(int devices, int openings);
80
81 int cam_devq_init(struct cam_devq *devq, int devices,
82 int openings);
83
84 void cam_devq_free(struct cam_devq *devq);
85
86 u_int32_t cam_devq_resize(struct cam_devq *camq, int openings);
87
88 /*
89 * Allocate a cam_ccb_queue structure and initialize it.
90 */
91 struct cam_ccbq *cam_ccbq_alloc(int openings);
92
93 u_int32_t cam_ccbq_resize(struct cam_ccbq *ccbq, int devices);
94
95 int cam_ccbq_init(struct cam_ccbq *ccbq, int openings);
96
97 void cam_ccbq_free(struct cam_ccbq *ccbq);
98
99 void cam_ccbq_fini(struct cam_ccbq *ccbq);
100
101 /*
102 * Allocate and initialize a cam_queue structure.
103 */
104 struct camq *camq_alloc(int size);
105
106 /*
107 * Resize a cam queue
108 */
109 u_int32_t camq_resize(struct camq *queue, int new_size);
110
111 /*
112 * Initialize a camq structure. Return 0 on success, 1 on failure.
113 */
114 int camq_init(struct camq *camq, int size);
115
116 /*
117 * Free a cam_queue structure. This should only be called if a controller
118 * driver failes somehow during its attach routine or is unloaded and has
119 * obtained a cam_queue structure.
120 */
121 void camq_free(struct camq *queue);
122
123 /*
124 * Finialize any internal storage or state of a cam_queue.
125 */
126 void camq_fini(struct camq *queue);
127
128 /*
129 * cam_queue_insert: Given a CAM queue with at least one open spot,
130 * insert the new entry maintaining order.
131 */
132 void camq_insert(struct camq *queue, cam_pinfo *new_entry);
133
134 /*
135 * camq_remove: Remove and arbitrary entry from the queue maintaining
136 * queue order.
137 */
138 cam_pinfo *camq_remove(struct camq *queue, int index);
139 #define CAMQ_HEAD 1 /* Head of queue index */
140
141 /* Index the first element in the heap */
142 #define CAMQ_GET_HEAD(camq) ((camq)->queue_array[CAMQ_HEAD])
143
144 /* Get the first element priority. */
145 #define CAMQ_GET_PRIO(camq) (((camq)->entries > 0) ? \
146 ((camq)->queue_array[CAMQ_HEAD]->priority) : 0)
147
148 /*
149 * camq_change_priority: Raise or lower the priority of an entry
150 * maintaining queue order.
151 */
152 void camq_change_priority(struct camq *queue, int index,
153 u_int32_t new_priority);
154
155 static __inline int
156 cam_ccbq_pending_ccb_count(struct cam_ccbq *ccbq);
157
158 static __inline void
159 cam_ccbq_take_opening(struct cam_ccbq *ccbq);
160
161 static __inline int
162 cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb);
163
164 static __inline int
165 cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb);
166
167 static __inline union ccb *
168 cam_ccbq_peek_ccb(struct cam_ccbq *ccbq, int index);
169
170 static __inline void
171 cam_ccbq_send_ccb(struct cam_ccbq *queue, union ccb *send_ccb);
172
173 static __inline void
174 cam_ccbq_ccb_done(struct cam_ccbq *ccbq, union ccb *done_ccb);
175
176 static __inline void
177 cam_ccbq_release_opening(struct cam_ccbq *ccbq);
178
179
180 static __inline int
181 cam_ccbq_pending_ccb_count(struct cam_ccbq *ccbq)
182 {
183 return (ccbq->queue.entries);
184 }
185
186 static __inline void
187 cam_ccbq_take_opening(struct cam_ccbq *ccbq)
188 {
189 ccbq->devq_openings--;
190 ccbq->held++;
191 }
192
193 static __inline int
194 cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb)
195 {
196 ccbq->held--;
197 camq_insert(&ccbq->queue, &new_ccb->ccb_h.pinfo);
198 if (ccbq->queue.qfrozen_cnt[CAM_PRIORITY_TO_RL(
199 new_ccb->ccb_h.pinfo.priority)] > 0) {
200 ccbq->devq_openings++;
201 ccbq->held++;
202 return (1);
203 } else
204 return (0);
205 }
206
207 static __inline int
208 cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb)
209 {
210 camq_remove(&ccbq->queue, ccb->ccb_h.pinfo.index);
211 if (ccbq->queue.qfrozen_cnt[CAM_PRIORITY_TO_RL(
212 ccb->ccb_h.pinfo.priority)] > 0) {
213 ccbq->devq_openings--;
214 ccbq->held--;
215 return (1);
216 } else
217 return (0);
218 }
219
220 static __inline union ccb *
221 cam_ccbq_peek_ccb(struct cam_ccbq *ccbq, int index)
222 {
223 return((union ccb *)ccbq->queue.queue_array[index]);
224 }
225
226 static __inline void
227 cam_ccbq_send_ccb(struct cam_ccbq *ccbq, union ccb *send_ccb)
228 {
229
230 send_ccb->ccb_h.pinfo.index = CAM_ACTIVE_INDEX;
231 ccbq->dev_active++;
232 ccbq->dev_openings--;
233 }
234
235 static __inline void
236 cam_ccbq_ccb_done(struct cam_ccbq *ccbq, union ccb *done_ccb)
237 {
238
239 ccbq->dev_active--;
240 ccbq->dev_openings++;
241 ccbq->held++;
242 }
243
244 static __inline void
245 cam_ccbq_release_opening(struct cam_ccbq *ccbq)
246 {
247 ccbq->held--;
248 ccbq->devq_openings++;
249 }
250
251 static __inline int
252 cam_ccbq_freeze(struct cam_ccbq *ccbq, cam_rl rl, u_int32_t cnt)
253 {
254 int i, frozen = 0;
255 cam_rl p, n;
256
257 /* Find pevious run level. */
258 for (p = 0; p < CAM_RL_VALUES && ccbq->queue.qfrozen_cnt[p] == 0; p++);
259 /* Find new run level. */
260 n = min(rl, p);
261 /* Apply new run level. */
262 for (i = rl; i < CAM_RL_VALUES; i++)
263 ccbq->queue.qfrozen_cnt[i] += cnt;
264 /* Update ccbq statistics. */
265 if (n == p)
266 return (0);
267 for (i = CAMQ_HEAD; i <= ccbq->queue.entries; i++) {
268 cam_rl rrl =
269 CAM_PRIORITY_TO_RL(ccbq->queue.queue_array[i]->priority);
270 if (rrl < n)
271 continue;
272 if (rrl >= p)
273 break;
274 ccbq->devq_openings++;
275 ccbq->held++;
276 frozen++;
277 }
278 return (frozen);
279 }
280
281 static __inline int
282 cam_ccbq_release(struct cam_ccbq *ccbq, cam_rl rl, u_int32_t cnt)
283 {
284 int i, released = 0;
285 cam_rl p, n;
286
287 /* Apply new run level. */
288 for (i = rl; i < CAM_RL_VALUES; i++)
289 ccbq->queue.qfrozen_cnt[i] -= cnt;
290 /* Find new run level. */
291 for (n = 0; n < CAM_RL_VALUES && ccbq->queue.qfrozen_cnt[n] == 0; n++);
292 /* Find previous run level. */
293 p = min(rl, n);
294 /* Update ccbq statistics. */
295 if (n == p)
296 return (0);
297 for (i = CAMQ_HEAD; i <= ccbq->queue.entries; i++) {
298 cam_rl rrl =
299 CAM_PRIORITY_TO_RL(ccbq->queue.queue_array[i]->priority);
300 if (rrl < p)
301 continue;
302 if (rrl >= n)
303 break;
304 ccbq->devq_openings--;
305 ccbq->held--;
306 released++;
307 }
308 return (released);
309 }
310
311 static __inline u_int32_t
312 cam_ccbq_frozen(struct cam_ccbq *ccbq, cam_rl rl)
313 {
314
315 return (ccbq->queue.qfrozen_cnt[rl]);
316 }
317
318 static __inline u_int32_t
319 cam_ccbq_frozen_top(struct cam_ccbq *ccbq)
320 {
321 cam_rl rl;
322
323 rl = CAM_PRIORITY_TO_RL(CAMQ_GET_PRIO(&ccbq->queue));
324 return (ccbq->queue.qfrozen_cnt[rl]);
325 }
326
327 #endif /* _KERNEL */
328 #endif /* _CAM_CAM_QUEUE_H */
Cache object: deede729fea3f3af7b8ed7101a94b99c
|