FreeBSD/Linux Kernel Cross Reference
sys/dev/drm/drm_bufs.h
1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com */
3 /*-
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 *
31 * $FreeBSD$
32 */
33
34 #include "dev/drm/drmP.h"
35
36 #ifndef __HAVE_PCI_DMA
37 #define __HAVE_PCI_DMA 0
38 #endif
39
40 #ifndef __HAVE_SG
41 #define __HAVE_SG 0
42 #endif
43
44 #ifndef DRIVER_BUF_PRIV_T
45 #define DRIVER_BUF_PRIV_T u32
46 #endif
47 #ifndef DRIVER_AGP_BUFFERS_MAP
48 #if __HAVE_AGP && __HAVE_DMA
49 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
50 #else
51 #define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
52 #endif
53 #endif
54
55 /*
56 * Compute order. Can be made faster.
57 */
58 int DRM(order)( unsigned long size )
59 {
60 int order;
61 unsigned long tmp;
62
63 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
64
65 if ( size & ~(1 << order) )
66 ++order;
67
68 return order;
69 }
70
71 int DRM(addmap)( DRM_IOCTL_ARGS )
72 {
73 DRM_DEVICE;
74 drm_map_t request;
75 drm_local_map_t *map;
76 drm_map_list_entry_t *list;
77
78 if (!(dev->flags & (FREAD|FWRITE)))
79 return DRM_ERR(EACCES); /* Require read/write */
80
81 DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
82
83 map = (drm_local_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
84 if ( !map )
85 return DRM_ERR(ENOMEM);
86
87 map->offset = request.offset;
88 map->size = request.size;
89 map->type = request.type;
90 map->flags = request.flags;
91 map->mtrr = 0;
92 map->handle = 0;
93
94 /* Only allow shared memory to be removable since we only keep enough
95 * book keeping information about shared memory to allow for removal
96 * when processes fork.
97 */
98 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
99 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
100 return DRM_ERR(EINVAL);
101 }
102 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
103 map->offset, map->size, map->type );
104 if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
105 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
106 return DRM_ERR(EINVAL);
107 }
108 if (map->offset + map->size < map->offset) {
109 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
110 return DRM_ERR(EINVAL);
111 }
112
113 switch ( map->type ) {
114 case _DRM_REGISTERS:
115 DRM_IOREMAP(map, dev);
116 if (!(map->flags & _DRM_WRITE_COMBINING))
117 break;
118 /* FALLTHROUGH */
119 case _DRM_FRAME_BUFFER:
120 #if __REALLY_HAVE_MTRR
121 if (DRM(mtrr_add)(map->offset, map->size, DRM_MTRR_WC) == 0)
122 map->mtrr = 1;
123 #endif
124 break;
125 case _DRM_SHM:
126 map->handle = (void *)DRM(alloc)(map->size, DRM_MEM_SAREA);
127 DRM_DEBUG( "%lu %d %p\n",
128 map->size, DRM(order)( map->size ), map->handle );
129 if ( !map->handle ) {
130 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
131 return DRM_ERR(ENOMEM);
132 }
133 map->offset = (unsigned long)map->handle;
134 if ( map->flags & _DRM_CONTAINS_LOCK ) {
135 /* Prevent a 2nd X Server from creating a 2nd lock */
136 DRM_LOCK();
137 if (dev->lock.hw_lock != NULL) {
138 DRM_UNLOCK();
139 DRM(free)(map->handle, map->size,
140 DRM_MEM_SAREA);
141 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
142 return DRM_ERR(EBUSY);
143 }
144 dev->lock.hw_lock = map->handle; /* Pointer to lock */
145 DRM_UNLOCK();
146 }
147 break;
148 #if __REALLY_HAVE_AGP
149 case _DRM_AGP:
150 map->offset += dev->agp->base;
151 map->mtrr = dev->agp->mtrr; /* for getmap */
152 break;
153 #endif
154 case _DRM_SCATTER_GATHER:
155 if (!dev->sg) {
156 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
157 return DRM_ERR(EINVAL);
158 }
159 map->offset = map->offset + dev->sg->handle;
160 break;
161
162 default:
163 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
164 return DRM_ERR(EINVAL);
165 }
166
167 list = DRM(calloc)(1, sizeof(*list), DRM_MEM_MAPS);
168 if (list == NULL) {
169 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
170 return DRM_ERR(EINVAL);
171 }
172 list->map = map;
173
174 DRM_LOCK();
175 TAILQ_INSERT_TAIL(dev->maplist, list, link);
176 DRM_UNLOCK();
177
178 request.offset = map->offset;
179 request.size = map->size;
180 request.type = map->type;
181 request.flags = map->flags;
182 request.mtrr = map->mtrr;
183 request.handle = map->handle;
184
185 if ( request.type != _DRM_SHM ) {
186 request.handle = (void *)request.offset;
187 }
188
189 DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
190
191 return 0;
192 }
193
194
195 /* Remove a map private from list and deallocate resources if the mapping
196 * isn't in use.
197 */
198
199 int DRM(rmmap)( DRM_IOCTL_ARGS )
200 {
201 DRM_DEVICE;
202 drm_map_list_entry_t *list;
203 drm_local_map_t *map;
204 drm_map_t request;
205
206 DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
207
208 DRM_LOCK();
209 TAILQ_FOREACH(list, dev->maplist, link) {
210 map = list->map;
211 if (map->handle == request.handle &&
212 map->flags & _DRM_REMOVABLE)
213 break;
214 }
215
216 /* No match found. */
217 if (list == NULL) {
218 DRM_UNLOCK();
219 return DRM_ERR(EINVAL);
220 }
221 TAILQ_REMOVE(dev->maplist, list, link);
222 DRM_UNLOCK();
223
224 DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
225
226 switch (map->type) {
227 case _DRM_REGISTERS:
228 case _DRM_FRAME_BUFFER:
229 #if __REALLY_HAVE_MTRR
230 if (map->mtrr) {
231 int __unused retcode;
232
233 retcode = DRM(mtrr_del)(map->offset, map->size,
234 DRM_MTRR_WC);
235 DRM_DEBUG("mtrr_del = %d\n", retcode);
236 }
237 #endif
238 DRM(ioremapfree)(map);
239 break;
240 case _DRM_SHM:
241 DRM(free)(map->handle, map->size, DRM_MEM_SAREA);
242 break;
243 case _DRM_AGP:
244 case _DRM_SCATTER_GATHER:
245 break;
246 }
247 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
248 return 0;
249 }
250
251 #if __HAVE_DMA
252
253
254 static void DRM(cleanup_buf_error)(drm_device_t *dev, drm_buf_entry_t *entry)
255 {
256 int i;
257
258 #if __HAVE_PCI_DMA
259 if (entry->seg_count) {
260 for (i = 0; i < entry->seg_count; i++) {
261 if (entry->seglist[i] != 0)
262 DRM(pci_free)(dev, entry->buf_size,
263 (void *)entry->seglist[i],
264 entry->seglist_bus[i]);
265 }
266 DRM(free)(entry->seglist,
267 entry->seg_count *
268 sizeof(*entry->seglist),
269 DRM_MEM_SEGS);
270 DRM(free)(entry->seglist_bus, entry->seg_count *
271 sizeof(*entry->seglist_bus), DRM_MEM_SEGS);
272
273 entry->seg_count = 0;
274 }
275 #endif /* __HAVE_PCI_DMA */
276
277 if (entry->buf_count) {
278 for (i = 0; i < entry->buf_count; i++) {
279 DRM(free)(entry->buflist[i].dev_private,
280 entry->buflist[i].dev_priv_size, DRM_MEM_BUFS);
281 }
282 DRM(free)(entry->buflist,
283 entry->buf_count *
284 sizeof(*entry->buflist),
285 DRM_MEM_BUFS);
286
287 entry->buf_count = 0;
288 }
289 }
290
291 #if __REALLY_HAVE_AGP
292 static int DRM(addbufs_agp)(drm_device_t *dev, drm_buf_desc_t *request)
293 {
294 drm_device_dma_t *dma = dev->dma;
295 drm_buf_entry_t *entry;
296 drm_buf_t *buf;
297 unsigned long offset;
298 unsigned long agp_offset;
299 int count;
300 int order;
301 int size;
302 int alignment;
303 int page_order;
304 int total;
305 int byte_count;
306 int i;
307 drm_buf_t **temp_buflist;
308
309 count = request->count;
310 order = DRM(order)(request->size);
311 size = 1 << order;
312
313 alignment = (request->flags & _DRM_PAGE_ALIGN)
314 ? round_page(size) : size;
315 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
316 total = PAGE_SIZE << page_order;
317
318 byte_count = 0;
319 agp_offset = dev->agp->base + request->agp_start;
320
321 DRM_DEBUG( "count: %d\n", count );
322 DRM_DEBUG( "order: %d\n", order );
323 DRM_DEBUG( "size: %d\n", size );
324 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
325 DRM_DEBUG( "alignment: %d\n", alignment );
326 DRM_DEBUG( "page_order: %d\n", page_order );
327 DRM_DEBUG( "total: %d\n", total );
328
329 entry = &dma->bufs[order];
330
331 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
332 DRM_MEM_BUFS );
333 if ( !entry->buflist ) {
334 return DRM_ERR(ENOMEM);
335 }
336 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
337
338 entry->buf_size = size;
339 entry->page_order = page_order;
340
341 offset = 0;
342
343 while ( entry->buf_count < count ) {
344 buf = &entry->buflist[entry->buf_count];
345 buf->idx = dma->buf_count + entry->buf_count;
346 buf->total = alignment;
347 buf->order = order;
348 buf->used = 0;
349
350 buf->offset = (dma->byte_count + offset);
351 buf->bus_address = agp_offset + offset;
352 buf->address = (void *)(agp_offset + offset);
353 buf->next = NULL;
354 buf->pending = 0;
355 buf->filp = NULL;
356
357 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
358 buf->dev_private = DRM(calloc)(1, buf->dev_priv_size,
359 DRM_MEM_BUFS);
360 if (buf->dev_private == NULL) {
361 /* Set count correctly so we free the proper amount. */
362 entry->buf_count = count;
363 DRM(cleanup_buf_error)(dev, entry);
364 return DRM_ERR(ENOMEM);
365 }
366
367 offset += alignment;
368 entry->buf_count++;
369 byte_count += PAGE_SIZE << page_order;
370 }
371
372 DRM_DEBUG( "byte_count: %d\n", byte_count );
373
374 temp_buflist = DRM(realloc)( dma->buflist,
375 dma->buf_count * sizeof(*dma->buflist),
376 (dma->buf_count + entry->buf_count)
377 * sizeof(*dma->buflist),
378 DRM_MEM_BUFS );
379 if (temp_buflist == NULL) {
380 /* Free the entry because it isn't valid */
381 DRM(cleanup_buf_error)(dev, entry);
382 return DRM_ERR(ENOMEM);
383 }
384 dma->buflist = temp_buflist;
385
386 for ( i = 0 ; i < entry->buf_count ; i++ ) {
387 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
388 }
389
390 dma->buf_count += entry->buf_count;
391 dma->byte_count += byte_count;
392
393 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
394 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
395
396 request->count = entry->buf_count;
397 request->size = size;
398
399 dma->flags = _DRM_DMA_USE_AGP;
400
401 return 0;
402 }
403 #endif /* __REALLY_HAVE_AGP */
404
405 #if __HAVE_PCI_DMA
406 static int DRM(addbufs_pci)(drm_device_t *dev, drm_buf_desc_t *request)
407 {
408 drm_device_dma_t *dma = dev->dma;
409 int count;
410 int order;
411 int size;
412 int total;
413 int page_order;
414 drm_buf_entry_t *entry;
415 vm_offset_t vaddr;
416 drm_buf_t *buf;
417 int alignment;
418 unsigned long offset;
419 int i;
420 int byte_count;
421 int page_count;
422 unsigned long *temp_pagelist;
423 drm_buf_t **temp_buflist;
424 dma_addr_t bus_addr;
425
426 count = request->count;
427 order = DRM(order)(request->size);
428 size = 1 << order;
429
430 DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
431 request->count, request->size, size, order );
432
433 alignment = (request->flags & _DRM_PAGE_ALIGN)
434 ? round_page(size) : size;
435 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
436 total = PAGE_SIZE << page_order;
437
438 entry = &dma->bufs[order];
439
440 entry->buflist = DRM(alloc)(count * sizeof(*entry->buflist),
441 DRM_MEM_BUFS);
442 entry->seglist = DRM(alloc)(count * sizeof(*entry->seglist),
443 DRM_MEM_SEGS);
444 entry->seglist_bus = DRM(alloc)(count * sizeof(*entry->seglist_bus),
445 DRM_MEM_SEGS);
446
447 /* Keep the original pagelist until we know all the allocations
448 * have succeeded
449 */
450 temp_pagelist = DRM(alloc)((dma->page_count + (count << page_order)) *
451 sizeof(*dma->pagelist), DRM_MEM_PAGES);
452
453 if (entry->buflist == NULL || entry->seglist == NULL ||
454 temp_pagelist == NULL) {
455 DRM(free)(entry->buflist, count * sizeof(*entry->buflist),
456 DRM_MEM_BUFS);
457 DRM(free)(entry->seglist, count * sizeof(*entry->seglist),
458 DRM_MEM_SEGS);
459 DRM(free)(entry->seglist_bus, count *
460 sizeof(*entry->seglist_bus), DRM_MEM_SEGS);
461 return DRM_ERR(ENOMEM);
462 }
463
464 bzero(entry->buflist, count * sizeof(*entry->buflist));
465 bzero(entry->seglist, count * sizeof(*entry->seglist));
466
467 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
468 sizeof(*dma->pagelist));
469
470 DRM_DEBUG( "pagelist: %d entries\n",
471 dma->page_count + (count << page_order) );
472
473 entry->buf_size = size;
474 entry->page_order = page_order;
475 byte_count = 0;
476 page_count = 0;
477
478 while ( entry->buf_count < count ) {
479 vaddr = (vm_offset_t) DRM(pci_alloc)(dev, size, alignment,
480 0xfffffffful, &bus_addr);
481 if (vaddr == 0) {
482 /* Set count correctly so we free the proper amount. */
483 entry->buf_count = count;
484 entry->seg_count = count;
485 DRM(cleanup_buf_error)(dev, entry);
486 DRM(free)(temp_pagelist, (dma->page_count +
487 (count << page_order)) * sizeof(*dma->pagelist),
488 DRM_MEM_PAGES);
489 return DRM_ERR(ENOMEM);
490 }
491
492 entry->seglist_bus[entry->seg_count] = bus_addr;
493 entry->seglist[entry->seg_count++] = vaddr;
494 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
495 DRM_DEBUG( "page %d @ 0x%08lx\n",
496 dma->page_count + page_count,
497 (long)vaddr + PAGE_SIZE * i );
498 temp_pagelist[dma->page_count + page_count++] =
499 vaddr + PAGE_SIZE * i;
500 }
501 for ( offset = 0 ;
502 offset + size <= total && entry->buf_count < count ;
503 offset += alignment, ++entry->buf_count ) {
504 buf = &entry->buflist[entry->buf_count];
505 buf->idx = dma->buf_count + entry->buf_count;
506 buf->total = alignment;
507 buf->order = order;
508 buf->used = 0;
509 buf->offset = (dma->byte_count + byte_count + offset);
510 buf->address = (void *)(vaddr + offset);
511 buf->bus_address = bus_addr + offset;
512 buf->next = NULL;
513 buf->pending = 0;
514 buf->filp = NULL;
515
516 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
517 buf->dev_private = DRM(alloc)(sizeof(DRIVER_BUF_PRIV_T),
518 DRM_MEM_BUFS);
519 if (buf->dev_private == NULL) {
520 /* Set count correctly so we free the proper amount. */
521 entry->buf_count = count;
522 entry->seg_count = count;
523 DRM(cleanup_buf_error)(dev, entry);
524 DRM(free)(temp_pagelist, (dma->page_count +
525 (count << page_order)) *
526 sizeof(*dma->pagelist), DRM_MEM_PAGES );
527 return DRM_ERR(ENOMEM);
528 }
529 bzero(buf->dev_private, buf->dev_priv_size);
530
531 DRM_DEBUG( "buffer %d @ %p\n",
532 entry->buf_count, buf->address );
533 }
534 byte_count += PAGE_SIZE << page_order;
535 }
536
537 temp_buflist = DRM(realloc)( dma->buflist,
538 dma->buf_count * sizeof(*dma->buflist),
539 (dma->buf_count + entry->buf_count)
540 * sizeof(*dma->buflist),
541 DRM_MEM_BUFS );
542 if (temp_buflist == NULL) {
543 /* Free the entry because it isn't valid */
544 DRM(cleanup_buf_error)(dev, entry);
545 DRM(free)(temp_pagelist, (dma->page_count +
546 (count << page_order)) * sizeof(*dma->pagelist),
547 DRM_MEM_PAGES);
548 return DRM_ERR(ENOMEM);
549 }
550 dma->buflist = temp_buflist;
551
552 for ( i = 0 ; i < entry->buf_count ; i++ ) {
553 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
554 }
555
556 /* No allocations failed, so now we can replace the orginal pagelist
557 * with the new one.
558 */
559 DRM(free)(dma->pagelist, dma->page_count * sizeof(*dma->pagelist),
560 DRM_MEM_PAGES);
561 dma->pagelist = temp_pagelist;
562
563 dma->buf_count += entry->buf_count;
564 dma->seg_count += entry->seg_count;
565 dma->page_count += entry->seg_count << page_order;
566 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
567
568 request->count = entry->buf_count;
569 request->size = size;
570
571 return 0;
572
573 }
574 #endif /* __HAVE_PCI_DMA */
575
576 #if __REALLY_HAVE_SG
577 static int DRM(addbufs_sg)(drm_device_t *dev, drm_buf_desc_t *request)
578 {
579 drm_device_dma_t *dma = dev->dma;
580 drm_buf_entry_t *entry;
581 drm_buf_t *buf;
582 unsigned long offset;
583 unsigned long agp_offset;
584 int count;
585 int order;
586 int size;
587 int alignment;
588 int page_order;
589 int total;
590 int byte_count;
591 int i;
592 drm_buf_t **temp_buflist;
593
594 count = request->count;
595 order = DRM(order)(request->size);
596 size = 1 << order;
597
598 alignment = (request->flags & _DRM_PAGE_ALIGN)
599 ? round_page(size) : size;
600 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
601 total = PAGE_SIZE << page_order;
602
603 byte_count = 0;
604 agp_offset = request->agp_start;
605
606 DRM_DEBUG( "count: %d\n", count );
607 DRM_DEBUG( "order: %d\n", order );
608 DRM_DEBUG( "size: %d\n", size );
609 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
610 DRM_DEBUG( "alignment: %d\n", alignment );
611 DRM_DEBUG( "page_order: %d\n", page_order );
612 DRM_DEBUG( "total: %d\n", total );
613
614 entry = &dma->bufs[order];
615
616 entry->buflist = DRM(calloc)(1, count * sizeof(*entry->buflist),
617 DRM_MEM_BUFS);
618 if (entry->buflist == NULL)
619 return DRM_ERR(ENOMEM);
620
621 entry->buf_size = size;
622 entry->page_order = page_order;
623
624 offset = 0;
625
626 while ( entry->buf_count < count ) {
627 buf = &entry->buflist[entry->buf_count];
628 buf->idx = dma->buf_count + entry->buf_count;
629 buf->total = alignment;
630 buf->order = order;
631 buf->used = 0;
632
633 buf->offset = (dma->byte_count + offset);
634 buf->bus_address = agp_offset + offset;
635 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
636 buf->next = NULL;
637 buf->pending = 0;
638 buf->filp = NULL;
639
640 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
641 buf->dev_private = DRM(calloc)(1, buf->dev_priv_size,
642 DRM_MEM_BUFS);
643 if (buf->dev_private == NULL) {
644 /* Set count correctly so we free the proper amount. */
645 entry->buf_count = count;
646 DRM(cleanup_buf_error)(dev, entry);
647 return DRM_ERR(ENOMEM);
648 }
649
650 DRM_DEBUG( "buffer %d @ %p\n",
651 entry->buf_count, buf->address );
652
653 offset += alignment;
654 entry->buf_count++;
655 byte_count += PAGE_SIZE << page_order;
656 }
657
658 DRM_DEBUG( "byte_count: %d\n", byte_count );
659
660 temp_buflist = DRM(realloc)( dma->buflist,
661 dma->buf_count * sizeof(*dma->buflist),
662 (dma->buf_count + entry->buf_count)
663 * sizeof(*dma->buflist),
664 DRM_MEM_BUFS );
665 if (temp_buflist == NULL) {
666 /* Free the entry because it isn't valid */
667 DRM(cleanup_buf_error)(dev, entry);
668 return DRM_ERR(ENOMEM);
669 }
670 dma->buflist = temp_buflist;
671
672 for ( i = 0 ; i < entry->buf_count ; i++ ) {
673 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
674 }
675
676 dma->buf_count += entry->buf_count;
677 dma->byte_count += byte_count;
678
679 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
680 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
681
682 request->count = entry->buf_count;
683 request->size = size;
684
685 dma->flags = _DRM_DMA_USE_SG;
686
687 return 0;
688 }
689 #endif /* __REALLY_HAVE_SG */
690
691 int DRM(addbufs)( DRM_IOCTL_ARGS )
692 {
693 DRM_DEVICE;
694 drm_buf_desc_t request;
695 int err;
696 int order;
697
698 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
699
700 if (request.count < 0 || request.count > 4096)
701 return DRM_ERR(EINVAL);
702
703 order = DRM(order)(request.size);
704 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
705 return DRM_ERR(EINVAL);
706
707 DRM_SPINLOCK(&dev->dma_lock);
708 /* No more allocations after first buffer-using ioctl. */
709 if (dev->buf_use != 0) {
710 DRM_SPINUNLOCK(&dev->dma_lock);
711 return DRM_ERR(EBUSY);
712 }
713 /* No more than one allocation per order */
714 if (dev->dma->bufs[order].buf_count != 0) {
715 DRM_SPINUNLOCK(&dev->dma_lock);
716 return DRM_ERR(ENOMEM);
717 }
718
719 #if __REALLY_HAVE_AGP
720 if ( request.flags & _DRM_AGP_BUFFER )
721 err = DRM(addbufs_agp)(dev, &request);
722 else
723 #endif
724 #if __REALLY_HAVE_SG
725 if ( request.flags & _DRM_SG_BUFFER )
726 err = DRM(addbufs_sg)(dev, &request);
727 else
728 #endif
729 #if __HAVE_PCI_DMA
730 err = DRM(addbufs_pci)(dev, &request);
731 #else
732 err = DRM_ERR(EINVAL);
733 #endif
734 DRM_SPINUNLOCK(&dev->dma_lock);
735
736 DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
737
738 return err;
739 }
740
741 int DRM(infobufs)( DRM_IOCTL_ARGS )
742 {
743 DRM_DEVICE;
744 drm_device_dma_t *dma = dev->dma;
745 drm_buf_info_t request;
746 int i;
747 int count;
748 int retcode = 0;
749
750 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
751
752 DRM_SPINLOCK(&dev->dma_lock);
753 ++dev->buf_use; /* Can't allocate more after this call */
754 DRM_SPINUNLOCK(&dev->dma_lock);
755
756 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
757 if ( dma->bufs[i].buf_count ) ++count;
758 }
759
760 DRM_DEBUG( "count = %d\n", count );
761
762 if ( request.count >= count ) {
763 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
764 if ( dma->bufs[i].buf_count ) {
765 drm_buf_desc_t from;
766
767 from.count = dma->bufs[i].buf_count;
768 from.size = dma->bufs[i].buf_size;
769 from.low_mark = dma->bufs[i].freelist.low_mark;
770 from.high_mark = dma->bufs[i].freelist.high_mark;
771
772 if (DRM_COPY_TO_USER(&request.list[count], &from,
773 sizeof(drm_buf_desc_t)) != 0) {
774 retcode = DRM_ERR(EFAULT);
775 break;
776 }
777
778 DRM_DEBUG( "%d %d %d %d %d\n",
779 i,
780 dma->bufs[i].buf_count,
781 dma->bufs[i].buf_size,
782 dma->bufs[i].freelist.low_mark,
783 dma->bufs[i].freelist.high_mark );
784 ++count;
785 }
786 }
787 }
788 request.count = count;
789
790 DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
791
792 return retcode;
793 }
794
795 int DRM(markbufs)( DRM_IOCTL_ARGS )
796 {
797 DRM_DEVICE;
798 drm_device_dma_t *dma = dev->dma;
799 drm_buf_desc_t request;
800 int order;
801
802 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
803
804 DRM_DEBUG( "%d, %d, %d\n",
805 request.size, request.low_mark, request.high_mark );
806
807
808 order = DRM(order)(request.size);
809 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
810 request.low_mark < 0 || request.high_mark < 0) {
811 return DRM_ERR(EINVAL);
812 }
813
814 DRM_SPINLOCK(&dev->dma_lock);
815 if (request.low_mark > dma->bufs[order].buf_count ||
816 request.high_mark > dma->bufs[order].buf_count) {
817 return DRM_ERR(EINVAL);
818 }
819
820 dma->bufs[order].freelist.low_mark = request.low_mark;
821 dma->bufs[order].freelist.high_mark = request.high_mark;
822 DRM_SPINUNLOCK(&dev->dma_lock);
823
824 return 0;
825 }
826
827 int DRM(freebufs)( DRM_IOCTL_ARGS )
828 {
829 DRM_DEVICE;
830 drm_device_dma_t *dma = dev->dma;
831 drm_buf_free_t request;
832 int i;
833 int idx;
834 drm_buf_t *buf;
835 int retcode = 0;
836
837 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
838
839 DRM_DEBUG( "%d\n", request.count );
840
841 DRM_SPINLOCK(&dev->dma_lock);
842 for ( i = 0 ; i < request.count ; i++ ) {
843 if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
844 retcode = DRM_ERR(EFAULT);
845 break;
846 }
847 if ( idx < 0 || idx >= dma->buf_count ) {
848 DRM_ERROR( "Index %d (of %d max)\n",
849 idx, dma->buf_count - 1 );
850 retcode = DRM_ERR(EINVAL);
851 break;
852 }
853 buf = dma->buflist[idx];
854 if ( buf->filp != filp ) {
855 DRM_ERROR("Process %d freeing buffer not owned\n",
856 DRM_CURRENTPID);
857 retcode = DRM_ERR(EINVAL);
858 break;
859 }
860 DRM(free_buffer)( dev, buf );
861 }
862 DRM_SPINUNLOCK(&dev->dma_lock);
863
864 return retcode;
865 }
866
867 int DRM(mapbufs)( DRM_IOCTL_ARGS )
868 {
869 DRM_DEVICE;
870 drm_device_dma_t *dma = dev->dma;
871 int retcode = 0;
872 const int zero = 0;
873 vm_offset_t address;
874 struct vmspace *vms;
875 #ifdef __FreeBSD__
876 vm_ooffset_t foff;
877 vm_size_t size;
878 vm_offset_t vaddr;
879 #endif /* __FreeBSD__ */
880 #ifdef __NetBSD__
881 struct vnode *vn;
882 vm_size_t size;
883 vaddr_t vaddr;
884 #endif /* __NetBSD__ */
885
886 drm_buf_map_t request;
887 int i;
888
889 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
890
891 #ifdef __NetBSD__
892 if (!vfinddev(kdev, VCHR, &vn))
893 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
894 #endif /* __NetBSD__ */
895
896 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
897 vms = p->td_proc->p_vmspace;
898 #else
899 vms = p->p_vmspace;
900 #endif
901
902 DRM_SPINLOCK(&dev->dma_lock);
903 dev->buf_use++; /* Can't allocate more after this call */
904 DRM_SPINUNLOCK(&dev->dma_lock);
905
906 if (request.count < dma->buf_count)
907 goto done;
908
909 if ((__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
910 (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG))) {
911 drm_local_map_t *map = DRIVER_AGP_BUFFERS_MAP(dev);
912
913 if (map == NULL) {
914 retcode = EINVAL;
915 goto done;
916 }
917 size = round_page(map->size);
918 foff = map->offset;
919 } else {
920 size = round_page(dma->byte_count),
921 foff = 0;
922 }
923
924 #ifdef __FreeBSD__
925 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
926 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
927 VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
928 #elif defined(__NetBSD__)
929 vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
930 retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
931 UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
932 &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
933 #endif /* __NetBSD__ */
934 if (retcode)
935 goto done;
936
937 request.virtual = (void *)vaddr;
938
939 for ( i = 0 ; i < dma->buf_count ; i++ ) {
940 if (DRM_COPY_TO_USER(&request.list[i].idx,
941 &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
942 retcode = EFAULT;
943 goto done;
944 }
945 if (DRM_COPY_TO_USER(&request.list[i].total,
946 &dma->buflist[i]->total, sizeof(request.list[0].total))) {
947 retcode = EFAULT;
948 goto done;
949 }
950 if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
951 sizeof(zero))) {
952 retcode = EFAULT;
953 goto done;
954 }
955 address = vaddr + dma->buflist[i]->offset; /* *** */
956 if (DRM_COPY_TO_USER(&request.list[i].address, &address,
957 sizeof(address))) {
958 retcode = EFAULT;
959 goto done;
960 }
961 }
962
963 done:
964 request.count = dma->buf_count;
965
966 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
967
968 DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
969
970 return DRM_ERR(retcode);
971 }
972
973 #endif /* __HAVE_DMA */
Cache object: 2fa800178dd82e5afa8b1e876a59dd27
|