FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_rlist.c
1 /*
2 * Copyright (c) 1992 William F. Jolitz, TeleMuse
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This software is a component of "386BSD" developed by
16 William F. Jolitz, TeleMuse.
17 * 4. Neither the name of the developer nor the name "386BSD"
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ
22 * AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS
23 * SOFTWARE SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT.
24 * THE DEVELOPER URGES THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT
25 * NOT MAKE USE THIS WORK.
26 *
27 * FOR USERS WHO WISH TO UNDERSTAND THE 386BSD SYSTEM DEVELOPED
28 * BY WILLIAM F. JOLITZ, WE RECOMMEND THE USER STUDY WRITTEN
29 * REFERENCES SUCH AS THE "PORTING UNIX TO THE 386" SERIES
30 * (BEGINNING JANUARY 1991 "DR. DOBBS JOURNAL", USA AND BEGINNING
31 * JUNE 1991 "UNIX MAGAZIN", GERMANY) BY WILLIAM F. JOLITZ AND
32 * LYNNE GREER JOLITZ, AS WELL AS OTHER BOOKS ON UNIX AND THE
33 * ON-LINE 386BSD USER MANUAL BEFORE USE. A BOOK DISCUSSING THE INTERNALS
34 * OF 386BSD ENTITLED "386BSD FROM THE INSIDE OUT" WILL BE AVAILABLE LATE 1992.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 */
49 /*
50 * Changes Copyright (C) 1995, David Greenman & John Dyson; This software may
51 * be used, modified, copied, distributed, and sold, in both source and
52 * binary form provided that the above copyright and these terms are
53 * retained. Under no circumstances is the author responsible for the proper
54 * functioning of this software, nor does the author assume any responsibility
55 * for damages incurred with its use.
56 *
57 * $FreeBSD: src/sys/kern/subr_rlist.c,v 1.19.2.3 1999/09/05 08:15:15 peter Exp $
58 */
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/malloc.h>
63 #include <sys/rlist.h>
64 #include <sys/proc.h>
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_extern.h>
69
70 /*
71 * Resource lists.
72 */
73
74 #define RLIST_MIN 128
75 static int rlist_count=0;
76 static struct rlist *rlfree;
77
78 static struct rlist *rlist_malloc __P((void));
79
80 static struct rlist *
81 rlist_malloc()
82 {
83 struct rlist *rl;
84 int i;
85 while( rlist_count < RLIST_MIN) {
86 int s = splhigh();
87 rl = (struct rlist *)kmem_alloc(kernel_map, PAGE_SIZE);
88 splx(s);
89 if( !rl)
90 break;
91
92 for(i=0;i<(PAGE_SIZE/(sizeof *rl));i++) {
93 rl->rl_next = rlfree;
94 rlfree = rl;
95 rlist_count++;
96 rl++;
97 }
98 }
99
100 if( (rl = rlfree) == 0 )
101 panic("Cannot get an rlist entry");
102
103 --rlist_count;
104 rlfree = rl->rl_next;
105 return rl;
106 }
107
108 __inline static void
109 rlist_mfree( struct rlist *rl)
110 {
111 rl->rl_next = rlfree;
112 rlfree = rl;
113 ++rlist_count;
114 }
115
116 void
117 rlist_free(rlh, start, end)
118 struct rlisthdr *rlh;
119 u_int start, end;
120 {
121 struct rlist **rlp = &rlh->rlh_list;
122 struct rlist *prev_rlp = NULL, *cur_rlp, *next_rlp = NULL;
123 int s;
124
125 s = splhigh();
126 while (rlh->rlh_lock & RLH_LOCKED) {
127 rlh->rlh_lock |= RLH_DESIRED;
128 tsleep(rlh, PSWP, "rlistf", 0);
129 }
130 rlh->rlh_lock |= RLH_LOCKED;
131 splx(s);
132
133 /*
134 * Traverse the list looking for an entry after the one we want
135 * to insert.
136 */
137 cur_rlp = *rlp;
138 while (cur_rlp != NULL) {
139 if (start < cur_rlp->rl_start)
140 break;
141 #ifdef DIAGNOSTIC
142 if (prev_rlp) {
143 if (prev_rlp->rl_end + 1 == cur_rlp->rl_start)
144 panic("rlist_free: missed coalesce opportunity");
145 if (prev_rlp->rl_end == cur_rlp->rl_start)
146 panic("rlist_free: entries overlap");
147 if (prev_rlp->rl_end > cur_rlp->rl_start)
148 panic("entries out of order");
149 }
150 #endif
151 prev_rlp = cur_rlp;
152 cur_rlp = cur_rlp->rl_next;
153 }
154
155 if (cur_rlp != NULL) {
156
157 if (end >= cur_rlp->rl_start)
158 panic("rlist_free: free end overlaps already freed area");
159
160 if (prev_rlp) {
161 if (start <= prev_rlp->rl_end)
162 panic("rlist_free: free start overlaps already freed area");
163 /*
164 * Attempt to append
165 */
166 if (prev_rlp->rl_end + 1 == start) {
167 prev_rlp->rl_end = end;
168 /*
169 * Attempt to prepend and coalesce
170 */
171 if (end + 1 == cur_rlp->rl_start) {
172 prev_rlp->rl_end = cur_rlp->rl_end;
173 prev_rlp->rl_next = cur_rlp->rl_next;
174 rlist_mfree(cur_rlp);
175 }
176 goto done;
177 }
178 }
179 /*
180 * Attempt to prepend
181 */
182 if (end + 1 == cur_rlp->rl_start) {
183 cur_rlp->rl_start = start;
184 goto done;
185 }
186 }
187 /*
188 * Reached the end of the list without finding a larger entry.
189 * Append to last entry if there is one and it's adjacent.
190 */
191 if (prev_rlp) {
192 if (start <= prev_rlp->rl_end)
193 panic("rlist_free: free start overlaps already freed area at list tail");
194 /*
195 * Attempt to append
196 */
197 if (prev_rlp->rl_end + 1 == start) {
198 prev_rlp->rl_end = end;
199 goto done;
200 }
201 }
202
203 /*
204 * Could neither append nor prepend; allocate a new entry.
205 */
206 next_rlp = cur_rlp;
207 cur_rlp = rlist_malloc();
208 cur_rlp->rl_start = start;
209 cur_rlp->rl_end = end;
210 cur_rlp->rl_next = next_rlp;
211 if (prev_rlp) {
212 prev_rlp->rl_next = cur_rlp;
213 } else {
214 /*
215 * No previous - this entry is the new list head.
216 */
217 *rlp = cur_rlp;
218 }
219
220 done:
221 rlh->rlh_lock &= ~RLH_LOCKED;
222 if (rlh->rlh_lock & RLH_DESIRED) {
223 wakeup(rlh);
224 rlh->rlh_lock &= ~RLH_DESIRED;
225 }
226 return;
227 }
228
229 /*
230 * Obtain a region of desired size from a resource list.
231 * If nothing available of that size, return 0. Otherwise,
232 * return a value of 1 and set resource start location with
233 * "*loc". (Note: loc can be zero if we don't wish the value)
234 */
235 int
236 rlist_alloc (rlh, size, loc)
237 struct rlisthdr *rlh;
238 unsigned size, *loc;
239 {
240 struct rlist **rlp = &rlh->rlh_list;
241 register struct rlist *lp;
242 int s;
243 register struct rlist *olp = 0;
244
245 s = splhigh();
246 while (rlh->rlh_lock & RLH_LOCKED) {
247 rlh->rlh_lock |= RLH_DESIRED;
248 tsleep(rlh, PSWP, "rlistf", 0);
249 }
250 rlh->rlh_lock |= RLH_LOCKED;
251 splx(s);
252
253 /* walk list, allocating first thing that's big enough (first fit) */
254 for (; *rlp; rlp = &((*rlp)->rl_next))
255 if(size <= (*rlp)->rl_end - (*rlp)->rl_start + 1) {
256
257 /* hand it to the caller */
258 if (loc) *loc = (*rlp)->rl_start;
259 (*rlp)->rl_start += size;
260
261 /* did we eat this element entirely? */
262 if ((*rlp)->rl_start > (*rlp)->rl_end) {
263 lp = (*rlp)->rl_next;
264 rlist_mfree(*rlp);
265 /*
266 * if the deleted element was in fromt
267 * of the list, adjust *rlp, else don't.
268 */
269 if (olp) {
270 olp->rl_next = lp;
271 } else {
272 *rlp = lp;
273 }
274 }
275
276 rlh->rlh_lock &= ~RLH_LOCKED;
277 if (rlh->rlh_lock & RLH_DESIRED) {
278 wakeup(rlh);
279 rlh->rlh_lock &= ~RLH_DESIRED;
280 }
281 return (1);
282 } else {
283 olp = *rlp;
284 }
285
286 rlh->rlh_lock &= ~RLH_LOCKED;
287 if (rlh->rlh_lock & RLH_DESIRED) {
288 wakeup(rlh);
289 rlh->rlh_lock &= ~RLH_DESIRED;
290 }
291 /* nothing in list that's big enough */
292 return (0);
293 }
294
295 /*
296 * Finished with this resource list, reclaim all space and
297 * mark it as being empty.
298 */
299 void
300 rlist_destroy (rlh)
301 struct rlisthdr *rlh;
302 {
303 struct rlist **rlp = &rlh->rlh_list;
304 struct rlist *lp, *nlp;
305
306 lp = *rlp;
307 *rlp = 0;
308 for (; lp; lp = nlp) {
309 nlp = lp->rl_next;
310 rlist_mfree(lp);
311 }
312 }
Cache object: c82ac46398bb275a281c3e151bd4b656
|