FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_rlist.c
1 /*
2 * Copyright (c) 1992 William F. Jolitz, TeleMuse
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This software is a component of "386BSD" developed by
16 William F. Jolitz, TeleMuse.
17 * 4. Neither the name of the developer nor the name "386BSD"
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ
22 * AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS
23 * SOFTWARE SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT.
24 * THE DEVELOPER URGES THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT
25 * NOT MAKE USE THIS WORK.
26 *
27 * FOR USERS WHO WISH TO UNDERSTAND THE 386BSD SYSTEM DEVELOPED
28 * BY WILLIAM F. JOLITZ, WE RECOMMEND THE USER STUDY WRITTEN
29 * REFERENCES SUCH AS THE "PORTING UNIX TO THE 386" SERIES
30 * (BEGINNING JANUARY 1991 "DR. DOBBS JOURNAL", USA AND BEGINNING
31 * JUNE 1991 "UNIX MAGAZIN", GERMANY) BY WILLIAM F. JOLITZ AND
32 * LYNNE GREER JOLITZ, AS WELL AS OTHER BOOKS ON UNIX AND THE
33 * ON-LINE 386BSD USER MANUAL BEFORE USE. A BOOK DISCUSSING THE INTERNALS
34 * OF 386BSD ENTITLED "386BSD FROM THE INSIDE OUT" WILL BE AVAILABLE LATE 1992.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 */
49 /*
50 * Changes Copyright (C) 1995, David Greenman & John Dyson; This software may
51 * be used, modified, copied, distributed, and sold, in both source and
52 * binary form provided that the above copyright and these terms are
53 * retained. Under no circumstances is the author responsible for the proper
54 * functioning of this software, nor does the author assume any responsibility
55 * for damages incurred with its use.
56 *
57 * $FreeBSD$
58 */
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/rlist.h>
63 #include <vm/vm.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_extern.h>
66
67 /*
68 * Resource lists.
69 */
70
71 #define RLIST_MIN 128
72 static int rlist_count=0;
73 static struct rlist *rlfree;
74
75 static struct rlist *rlist_malloc __P((void));
76 static __inline void rlist_mfree __P((struct rlist *rl));
77
78 static struct rlist *
79 rlist_malloc()
80 {
81 struct rlist *rl;
82 int i;
83 while( rlist_count < RLIST_MIN) {
84 int s = splhigh();
85 rl = (struct rlist *)kmem_alloc(kernel_map, PAGE_SIZE);
86 splx(s);
87 if( !rl)
88 break;
89
90 for(i=0;i<(PAGE_SIZE/(sizeof *rl));i++) {
91 rl->rl_next = rlfree;
92 rlfree = rl;
93 rlist_count++;
94 rl++;
95 }
96 }
97
98 if( (rl = rlfree) == 0 )
99 panic("Cannot get an rlist entry");
100
101 --rlist_count;
102 rlfree = rl->rl_next;
103 return rl;
104 }
105
106 static __inline void
107 rlist_mfree(rl)
108 struct rlist *rl;
109 {
110 rl->rl_next = rlfree;
111 rlfree = rl;
112 ++rlist_count;
113 }
114
115 void
116 rlist_free(rlh, start, end)
117 struct rlisthdr *rlh;
118 u_int start, end;
119 {
120 struct rlist **rlp = &rlh->rlh_list;
121 struct rlist *prev_rlp = NULL, *cur_rlp, *next_rlp = NULL;
122 int s;
123
124 s = splhigh();
125 while (rlh->rlh_lock & RLH_LOCKED) {
126 rlh->rlh_lock |= RLH_DESIRED;
127 tsleep(rlh, PSWP, "rlistf", 0);
128 }
129 rlh->rlh_lock |= RLH_LOCKED;
130 splx(s);
131
132 /*
133 * Traverse the list looking for an entry after the one we want
134 * to insert.
135 */
136 cur_rlp = *rlp;
137 while (cur_rlp != NULL) {
138 if (start < cur_rlp->rl_start)
139 break;
140 if (prev_rlp) {
141 KASSERT(prev_rlp->rl_end + 1 != cur_rlp->rl_start,
142 ("rlist_free: missed coalesce opportunity"));
143 KASSERT(prev_rlp->rl_end != cur_rlp->rl_start,
144 ("rlist_free: entries overlap"));
145 KASSERT(prev_rlp->rl_end <= cur_rlp->rl_start,
146 ("entries out of order"));
147 }
148 prev_rlp = cur_rlp;
149 cur_rlp = cur_rlp->rl_next;
150 }
151
152 if (cur_rlp != NULL) {
153
154 if (end >= cur_rlp->rl_start)
155 panic("rlist_free: free end overlaps already freed area");
156
157 if (prev_rlp) {
158 if (start <= prev_rlp->rl_end)
159 panic("rlist_free: free start overlaps already freed area");
160 /*
161 * Attempt to append
162 */
163 if (prev_rlp->rl_end + 1 == start) {
164 prev_rlp->rl_end = end;
165 /*
166 * Attempt to prepend and coalesce
167 */
168 if (end + 1 == cur_rlp->rl_start) {
169 prev_rlp->rl_end = cur_rlp->rl_end;
170 prev_rlp->rl_next = cur_rlp->rl_next;
171 rlist_mfree(cur_rlp);
172 }
173 goto done;
174 }
175 }
176 /*
177 * Attempt to prepend
178 */
179 if (end + 1 == cur_rlp->rl_start) {
180 cur_rlp->rl_start = start;
181 goto done;
182 }
183 }
184 /*
185 * Reached the end of the list without finding a larger entry.
186 * Append to last entry if there is one and it's adjacent.
187 */
188 if (prev_rlp) {
189 if (start <= prev_rlp->rl_end)
190 panic("rlist_free: free start overlaps already freed area at list tail");
191 /*
192 * Attempt to append
193 */
194 if (prev_rlp->rl_end + 1 == start) {
195 prev_rlp->rl_end = end;
196 goto done;
197 }
198 }
199
200 /*
201 * Could neither append nor prepend; allocate a new entry.
202 */
203 next_rlp = cur_rlp;
204 cur_rlp = rlist_malloc();
205 cur_rlp->rl_start = start;
206 cur_rlp->rl_end = end;
207 cur_rlp->rl_next = next_rlp;
208 if (prev_rlp) {
209 prev_rlp->rl_next = cur_rlp;
210 } else {
211 /*
212 * No previous - this entry is the new list head.
213 */
214 *rlp = cur_rlp;
215 }
216
217 done:
218 rlh->rlh_lock &= ~RLH_LOCKED;
219 if (rlh->rlh_lock & RLH_DESIRED) {
220 wakeup(rlh);
221 rlh->rlh_lock &= ~RLH_DESIRED;
222 }
223 return;
224 }
225
226 /*
227 * Obtain a region of desired size from a resource list.
228 * If nothing available of that size, return 0. Otherwise,
229 * return a value of 1 and set resource start location with
230 * "*loc". (Note: loc can be zero if we don't wish the value)
231 */
232 int
233 rlist_alloc (rlh, size, loc)
234 struct rlisthdr *rlh;
235 unsigned size, *loc;
236 {
237 struct rlist **rlp = &rlh->rlh_list;
238 register struct rlist *lp;
239 int s;
240 register struct rlist *olp = 0;
241
242 s = splhigh();
243 while (rlh->rlh_lock & RLH_LOCKED) {
244 rlh->rlh_lock |= RLH_DESIRED;
245 tsleep(rlh, PSWP, "rlistf", 0);
246 }
247 rlh->rlh_lock |= RLH_LOCKED;
248 splx(s);
249
250 /* walk list, allocating first thing that's big enough (first fit) */
251 for (; *rlp; rlp = &((*rlp)->rl_next))
252 if(size <= (*rlp)->rl_end - (*rlp)->rl_start + 1) {
253
254 /* hand it to the caller */
255 if (loc) *loc = (*rlp)->rl_start;
256 (*rlp)->rl_start += size;
257
258 /* did we eat this element entirely? */
259 if ((*rlp)->rl_start > (*rlp)->rl_end) {
260 lp = (*rlp)->rl_next;
261 rlist_mfree(*rlp);
262 /*
263 * if the deleted element was in fromt
264 * of the list, adjust *rlp, else don't.
265 */
266 if (olp) {
267 olp->rl_next = lp;
268 } else {
269 *rlp = lp;
270 }
271 }
272
273 rlh->rlh_lock &= ~RLH_LOCKED;
274 if (rlh->rlh_lock & RLH_DESIRED) {
275 wakeup(rlh);
276 rlh->rlh_lock &= ~RLH_DESIRED;
277 }
278 return (1);
279 } else {
280 olp = *rlp;
281 }
282
283 rlh->rlh_lock &= ~RLH_LOCKED;
284 if (rlh->rlh_lock & RLH_DESIRED) {
285 wakeup(rlh);
286 rlh->rlh_lock &= ~RLH_DESIRED;
287 }
288 /* nothing in list that's big enough */
289 return (0);
290 }
291
292 /*
293 * Finished with this resource list, reclaim all space and
294 * mark it as being empty.
295 */
296 void
297 rlist_destroy (rlh)
298 struct rlisthdr *rlh;
299 {
300 struct rlist **rlp = &rlh->rlh_list;
301 struct rlist *lp, *nlp;
302
303 lp = *rlp;
304 *rlp = 0;
305 for (; lp; lp = nlp) {
306 nlp = lp->rl_next;
307 rlist_mfree(lp);
308 }
309 }
Cache object: 1619b752643bb18f004a807aadbe5128
|