FreeBSD/Linux Kernel Cross Reference
sys/dev/drm/drm_mm.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/9.0/sys/dev/drm/drm_mm.c 207118 2010-04-23 14:48:30Z rnoland $");
31
32 /*
33 * Generic simple memory manager implementation. Intended to be used as a base
34 * class implementation for more advanced memory managers.
35 *
36 * Note that the algorithm used is quite simple and there might be substantial
37 * performance gains if a smarter free list is implemented. Currently it is just an
38 * unordered stack of free regions. This could easily be improved if an RB-tree
39 * is used instead. At least if we expect heavy fragmentation.
40 *
41 * Aligned allocations can also see improvement.
42 *
43 * Authors:
44 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 */
46
47 #include "dev/drm/drmP.h"
48 #include "dev/drm/drm_mm.h"
49
50 #define MM_UNUSED_TARGET 4
51
52 unsigned long drm_mm_tail_space(struct drm_mm *mm)
53 {
54 struct list_head *tail_node;
55 struct drm_mm_node *entry;
56
57 tail_node = mm->ml_entry.prev;
58 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
59 if (!entry->free)
60 return 0;
61
62 return entry->size;
63 }
64
65 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
66 {
67 struct list_head *tail_node;
68 struct drm_mm_node *entry;
69
70 tail_node = mm->ml_entry.prev;
71 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
72 if (!entry->free)
73 return -ENOMEM;
74
75 if (entry->size <= size)
76 return -ENOMEM;
77
78 entry->size -= size;
79 return 0;
80 }
81
82 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
83 {
84 struct drm_mm_node *child;
85
86 if (atomic)
87 child = malloc(sizeof(*child), DRM_MEM_MM, M_NOWAIT);
88 else
89 child = malloc(sizeof(*child), DRM_MEM_MM, M_WAITOK);
90
91 if (unlikely(child == NULL)) {
92 mtx_lock(&mm->unused_lock);
93 if (list_empty(&mm->unused_nodes))
94 child = NULL;
95 else {
96 child =
97 list_entry(mm->unused_nodes.next,
98 struct drm_mm_node, fl_entry);
99 list_del(&child->fl_entry);
100 --mm->num_unused;
101 }
102 mtx_unlock(&mm->unused_lock);
103 }
104 return child;
105 }
106
107 int drm_mm_pre_get(struct drm_mm *mm)
108 {
109 struct drm_mm_node *node;
110
111 mtx_lock(&mm->unused_lock);
112 while (mm->num_unused < MM_UNUSED_TARGET) {
113 mtx_unlock(&mm->unused_lock);
114 node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
115 mtx_lock(&mm->unused_lock);
116
117 if (unlikely(node == NULL)) {
118 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
119 mtx_unlock(&mm->unused_lock);
120 return ret;
121 }
122 ++mm->num_unused;
123 list_add_tail(&node->fl_entry, &mm->unused_nodes);
124 }
125 mtx_unlock(&mm->unused_lock);
126 return 0;
127 }
128
129 static int drm_mm_create_tail_node(struct drm_mm *mm,
130 unsigned long start,
131 unsigned long size, int atomic)
132 {
133 struct drm_mm_node *child;
134
135 child = drm_mm_kmalloc(mm, atomic);
136 if (unlikely(child == NULL))
137 return -ENOMEM;
138
139 child->free = 1;
140 child->size = size;
141 child->start = start;
142 child->mm = mm;
143
144 list_add_tail(&child->ml_entry, &mm->ml_entry);
145 list_add_tail(&child->fl_entry, &mm->fl_entry);
146
147 return 0;
148 }
149
150 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
151 {
152 struct list_head *tail_node;
153 struct drm_mm_node *entry;
154
155 tail_node = mm->ml_entry.prev;
156 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
157 if (!entry->free) {
158 return drm_mm_create_tail_node(mm, entry->start + entry->size,
159 size, atomic);
160 }
161 entry->size += size;
162 return 0;
163 }
164
165 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
166 unsigned long size,
167 int atomic)
168 {
169 struct drm_mm_node *child;
170
171 child = drm_mm_kmalloc(parent->mm, atomic);
172 if (unlikely(child == NULL))
173 return NULL;
174
175 INIT_LIST_HEAD(&child->fl_entry);
176
177 child->free = 0;
178 child->size = size;
179 child->start = parent->start;
180 child->mm = parent->mm;
181
182 list_add_tail(&child->ml_entry, &parent->ml_entry);
183 INIT_LIST_HEAD(&child->fl_entry);
184
185 parent->size -= size;
186 parent->start += size;
187 return child;
188 }
189
190
191 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
192 unsigned long size,
193 unsigned alignment,
194 int atomic)
195 {
196
197 struct drm_mm_node *align_splitoff = NULL;
198 unsigned tmp = 0;
199
200 if (alignment)
201 tmp = node->start % alignment;
202
203 if (tmp) {
204 align_splitoff =
205 drm_mm_split_at_start(node, alignment - tmp, atomic);
206 if (unlikely(align_splitoff == NULL))
207 return NULL;
208 }
209
210 if (node->size == size) {
211 list_del_init(&node->fl_entry);
212 node->free = 0;
213 } else {
214 node = drm_mm_split_at_start(node, size, atomic);
215 }
216
217 if (align_splitoff)
218 drm_mm_put_block(align_splitoff);
219
220 return node;
221 }
222
223 /*
224 * Put a block. Merge with the previous and / or next block if they are free.
225 * Otherwise add to the free stack.
226 */
227
228 void drm_mm_put_block(struct drm_mm_node *cur)
229 {
230
231 struct drm_mm *mm = cur->mm;
232 struct list_head *cur_head = &cur->ml_entry;
233 struct list_head *root_head = &mm->ml_entry;
234 struct drm_mm_node *prev_node = NULL;
235 struct drm_mm_node *next_node;
236
237 int merged = 0;
238
239 if (cur_head->prev != root_head) {
240 prev_node =
241 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
242 if (prev_node->free) {
243 prev_node->size += cur->size;
244 merged = 1;
245 }
246 }
247 if (cur_head->next != root_head) {
248 next_node =
249 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
250 if (next_node->free) {
251 if (merged) {
252 prev_node->size += next_node->size;
253 list_del(&next_node->ml_entry);
254 list_del(&next_node->fl_entry);
255 if (mm->num_unused < MM_UNUSED_TARGET) {
256 list_add(&next_node->fl_entry,
257 &mm->unused_nodes);
258 ++mm->num_unused;
259 } else
260 free(next_node, DRM_MEM_MM);
261 } else {
262 next_node->size += cur->size;
263 next_node->start = cur->start;
264 merged = 1;
265 }
266 }
267 }
268 if (!merged) {
269 cur->free = 1;
270 list_add(&cur->fl_entry, &mm->fl_entry);
271 } else {
272 list_del(&cur->ml_entry);
273 if (mm->num_unused < MM_UNUSED_TARGET) {
274 list_add(&cur->fl_entry, &mm->unused_nodes);
275 ++mm->num_unused;
276 } else
277 free(cur, DRM_MEM_MM);
278 }
279 }
280
281 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
282 unsigned long size,
283 unsigned alignment, int best_match)
284 {
285 struct list_head *list;
286 const struct list_head *free_stack = &mm->fl_entry;
287 struct drm_mm_node *entry;
288 struct drm_mm_node *best;
289 unsigned long best_size;
290 unsigned wasted;
291
292 best = NULL;
293 best_size = ~0UL;
294
295 list_for_each(list, free_stack) {
296 entry = list_entry(list, struct drm_mm_node, fl_entry);
297 wasted = 0;
298
299 if (entry->size < size)
300 continue;
301
302 if (alignment) {
303 register unsigned tmp = entry->start % alignment;
304 if (tmp)
305 wasted += alignment - tmp;
306 }
307
308 if (entry->size >= size + wasted) {
309 if (!best_match)
310 return entry;
311 if (size < best_size) {
312 best = entry;
313 best_size = entry->size;
314 }
315 }
316 }
317
318 return best;
319 }
320
321 int drm_mm_clean(struct drm_mm * mm)
322 {
323 struct list_head *head = &mm->ml_entry;
324
325 return (head->next->next == head);
326 }
327
328 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
329 {
330 INIT_LIST_HEAD(&mm->ml_entry);
331 INIT_LIST_HEAD(&mm->fl_entry);
332 INIT_LIST_HEAD(&mm->unused_nodes);
333 mm->num_unused = 0;
334 mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
335
336 /* XXX This could be non-atomic but gets called from a locked path */
337 return drm_mm_create_tail_node(mm, start, size, 1);
338 }
339
340 void drm_mm_takedown(struct drm_mm * mm)
341 {
342 struct list_head *bnode = mm->fl_entry.next;
343 struct drm_mm_node *entry;
344 struct drm_mm_node *next;
345
346 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
347
348 if (entry->ml_entry.next != &mm->ml_entry ||
349 entry->fl_entry.next != &mm->fl_entry) {
350 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
351 return;
352 }
353
354 list_del(&entry->fl_entry);
355 list_del(&entry->ml_entry);
356 free(entry, DRM_MEM_MM);
357
358 mtx_lock(&mm->unused_lock);
359 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
360 list_del(&entry->fl_entry);
361 free(entry, DRM_MEM_MM);
362 --mm->num_unused;
363 }
364 mtx_unlock(&mm->unused_lock);
365
366 mtx_destroy(&mm->unused_lock);
367
368 KASSERT(mm->num_unused == 0, ("num_unused != 0"));
369 }
Cache object: bab06844d3ba90ccd7ecfa0fe1d4911a
|