FreeBSD/Linux Kernel Cross Reference
sys/dev/drm/i915_mem.c
1 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
2 */
3 /*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/6.4/sys/dev/drm/i915_mem.c 158686 2006-05-17 07:40:12Z anholt $");
31
32 #include "dev/drm/drmP.h"
33 #include "dev/drm/drm.h"
34 #include "dev/drm/i915_drm.h"
35 #include "dev/drm/i915_drv.h"
36
37 /* This memory manager is integrated into the global/local lru
38 * mechanisms used by the clients. Specifically, it operates by
39 * setting the 'in_use' fields of the global LRU to indicate whether
40 * this region is privately allocated to a client.
41 *
42 * This does require the client to actually respect that field.
43 *
44 * Currently no effort is made to allocate 'private' memory in any
45 * clever way - the LRU information isn't used to determine which
46 * block to allocate, and the ring is drained prior to allocations --
47 * in other words allocation is expensive.
48 */
49 static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
50 {
51 drm_i915_private_t *dev_priv = dev->dev_private;
52 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
53 drm_tex_region_t *list;
54 unsigned shift, nr;
55 unsigned start;
56 unsigned end;
57 unsigned i;
58 int age;
59
60 shift = dev_priv->tex_lru_log_granularity;
61 nr = I915_NR_TEX_REGIONS;
62
63 start = p->start >> shift;
64 end = (p->start + p->size - 1) >> shift;
65
66 age = ++sarea_priv->texAge;
67 list = sarea_priv->texList;
68
69 /* Mark the regions with the new flag and update their age. Move
70 * them to head of list to preserve LRU semantics.
71 */
72 for (i = start; i <= end; i++) {
73 list[i].in_use = in_use;
74 list[i].age = age;
75
76 /* remove_from_list(i)
77 */
78 list[(unsigned)list[i].next].prev = list[i].prev;
79 list[(unsigned)list[i].prev].next = list[i].next;
80
81 /* insert_at_head(list, i)
82 */
83 list[i].prev = nr;
84 list[i].next = list[nr].next;
85 list[(unsigned)list[nr].next].prev = i;
86 list[nr].next = i;
87 }
88 }
89
90 /* Very simple allocator for agp memory, working on a static range
91 * already mapped into each client's address space.
92 */
93
94 static struct mem_block *split_block(struct mem_block *p, int start, int size,
95 DRMFILE filp)
96 {
97 /* Maybe cut off the start of an existing block */
98 if (start > p->start) {
99 struct mem_block *newblock =
100 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
101 if (!newblock)
102 goto out;
103 newblock->start = start;
104 newblock->size = p->size - (start - p->start);
105 newblock->filp = NULL;
106 newblock->next = p->next;
107 newblock->prev = p;
108 p->next->prev = newblock;
109 p->next = newblock;
110 p->size -= newblock->size;
111 p = newblock;
112 }
113
114 /* Maybe cut off the end of an existing block */
115 if (size < p->size) {
116 struct mem_block *newblock =
117 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
118 if (!newblock)
119 goto out;
120 newblock->start = start + size;
121 newblock->size = p->size - size;
122 newblock->filp = NULL;
123 newblock->next = p->next;
124 newblock->prev = p;
125 p->next->prev = newblock;
126 p->next = newblock;
127 p->size = size;
128 }
129
130 out:
131 /* Our block is in the middle */
132 p->filp = filp;
133 return p;
134 }
135
136 static struct mem_block *alloc_block(struct mem_block *heap, int size,
137 int align2, DRMFILE filp)
138 {
139 struct mem_block *p;
140 int mask = (1 << align2) - 1;
141
142 for (p = heap->next; p != heap; p = p->next) {
143 int start = (p->start + mask) & ~mask;
144 if (p->filp == NULL && start + size <= p->start + p->size)
145 return split_block(p, start, size, filp);
146 }
147
148 return NULL;
149 }
150
151 static struct mem_block *find_block(struct mem_block *heap, int start)
152 {
153 struct mem_block *p;
154
155 for (p = heap->next; p != heap; p = p->next)
156 if (p->start == start)
157 return p;
158
159 return NULL;
160 }
161
162 static void free_block(struct mem_block *p)
163 {
164 p->filp = NULL;
165
166 /* Assumes a single contiguous range. Needs a special filp in
167 * 'heap' to stop it being subsumed.
168 */
169 if (p->next->filp == NULL) {
170 struct mem_block *q = p->next;
171 p->size += q->size;
172 p->next = q->next;
173 p->next->prev = p;
174 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
175 }
176
177 if (p->prev->filp == NULL) {
178 struct mem_block *q = p->prev;
179 q->size += p->size;
180 q->next = p->next;
181 q->next->prev = q;
182 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
183 }
184 }
185
186 /* Initialize. How to check for an uninitialized heap?
187 */
188 static int init_heap(struct mem_block **heap, int start, int size)
189 {
190 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
191
192 if (!blocks)
193 return -ENOMEM;
194
195 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
196 if (!*heap) {
197 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
198 return -ENOMEM;
199 }
200
201 blocks->start = start;
202 blocks->size = size;
203 blocks->filp = NULL;
204 blocks->next = blocks->prev = *heap;
205
206 memset(*heap, 0, sizeof(**heap));
207 (*heap)->filp = (DRMFILE) - 1;
208 (*heap)->next = (*heap)->prev = blocks;
209 return 0;
210 }
211
212 /* Free all blocks associated with the releasing file.
213 */
214 void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
215 {
216 struct mem_block *p;
217
218 if (!heap || !heap->next)
219 return;
220
221 for (p = heap->next; p != heap; p = p->next) {
222 if (p->filp == filp) {
223 p->filp = NULL;
224 mark_block(dev, p, 0);
225 }
226 }
227
228 /* Assumes a single contiguous range. Needs a special filp in
229 * 'heap' to stop it being subsumed.
230 */
231 for (p = heap->next; p != heap; p = p->next) {
232 while (p->filp == NULL && p->next->filp == NULL) {
233 struct mem_block *q = p->next;
234 p->size += q->size;
235 p->next = q->next;
236 p->next->prev = p;
237 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
238 }
239 }
240 }
241
242 /* Shutdown.
243 */
244 void i915_mem_takedown(struct mem_block **heap)
245 {
246 struct mem_block *p;
247
248 if (!*heap)
249 return;
250
251 for (p = (*heap)->next; p != *heap;) {
252 struct mem_block *q = p;
253 p = p->next;
254 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
255 }
256
257 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
258 *heap = NULL;
259 }
260
261 static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
262 {
263 switch (region) {
264 case I915_MEM_REGION_AGP:
265 return &dev_priv->agp_heap;
266 default:
267 return NULL;
268 }
269 }
270
271 /* IOCTL HANDLERS */
272
273 int i915_mem_alloc(DRM_IOCTL_ARGS)
274 {
275 DRM_DEVICE;
276 drm_i915_private_t *dev_priv = dev->dev_private;
277 drm_i915_mem_alloc_t alloc;
278 struct mem_block *block, **heap;
279
280 if (!dev_priv) {
281 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
282 return DRM_ERR(EINVAL);
283 }
284
285 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
286 sizeof(alloc));
287
288 heap = get_heap(dev_priv, alloc.region);
289 if (!heap || !*heap)
290 return DRM_ERR(EFAULT);
291
292 /* Make things easier on ourselves: all allocations at least
293 * 4k aligned.
294 */
295 if (alloc.alignment < 12)
296 alloc.alignment = 12;
297
298 block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
299
300 if (!block)
301 return DRM_ERR(ENOMEM);
302
303 mark_block(dev, block, 1);
304
305 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
306 DRM_ERROR("copy_to_user\n");
307 return DRM_ERR(EFAULT);
308 }
309
310 return 0;
311 }
312
313 int i915_mem_free(DRM_IOCTL_ARGS)
314 {
315 DRM_DEVICE;
316 drm_i915_private_t *dev_priv = dev->dev_private;
317 drm_i915_mem_free_t memfree;
318 struct mem_block *block, **heap;
319
320 if (!dev_priv) {
321 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
322 return DRM_ERR(EINVAL);
323 }
324
325 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
326 sizeof(memfree));
327
328 heap = get_heap(dev_priv, memfree.region);
329 if (!heap || !*heap)
330 return DRM_ERR(EFAULT);
331
332 block = find_block(*heap, memfree.region_offset);
333 if (!block)
334 return DRM_ERR(EFAULT);
335
336 if (block->filp != filp)
337 return DRM_ERR(EPERM);
338
339 mark_block(dev, block, 0);
340 free_block(block);
341 return 0;
342 }
343
344 int i915_mem_init_heap(DRM_IOCTL_ARGS)
345 {
346 DRM_DEVICE;
347 drm_i915_private_t *dev_priv = dev->dev_private;
348 drm_i915_mem_init_heap_t initheap;
349 struct mem_block **heap;
350
351 if (!dev_priv) {
352 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
353 return DRM_ERR(EINVAL);
354 }
355
356 DRM_COPY_FROM_USER_IOCTL(initheap,
357 (drm_i915_mem_init_heap_t __user *) data,
358 sizeof(initheap));
359
360 heap = get_heap(dev_priv, initheap.region);
361 if (!heap)
362 return DRM_ERR(EFAULT);
363
364 if (*heap) {
365 DRM_ERROR("heap already initialized?");
366 return DRM_ERR(EFAULT);
367 }
368
369 return init_heap(heap, initheap.start, initheap.size);
370 }
371
372 int i915_mem_destroy_heap( DRM_IOCTL_ARGS )
373 {
374 DRM_DEVICE;
375 drm_i915_private_t *dev_priv = dev->dev_private;
376 drm_i915_mem_destroy_heap_t destroyheap;
377 struct mem_block **heap;
378
379 if ( !dev_priv ) {
380 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
381 return DRM_ERR(EINVAL);
382 }
383
384 DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data,
385 sizeof(destroyheap) );
386
387 heap = get_heap( dev_priv, destroyheap.region );
388 if (!heap) {
389 DRM_ERROR("get_heap failed");
390 return DRM_ERR(EFAULT);
391 }
392
393 if (!*heap) {
394 DRM_ERROR("heap not initialized?");
395 return DRM_ERR(EFAULT);
396 }
397
398 i915_mem_takedown( heap );
399 return 0;
400 }
Cache object: 632f1daa6f2e432fb14de3ee84a0d90c
|