FreeBSD/Linux Kernel Cross Reference
sys/dev/drm/drm_dma.h
1 /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 *
31 * $FreeBSD: releng/5.0/sys/dev/drm/drm_dma.h 97683 2002-05-31 23:19:50Z anholt $
32 */
33
34 #ifdef __FreeBSD__
35 #include <machine/bus.h>
36 #include <machine/resource.h>
37 #include <sys/rman.h>
38 #endif /* __FreeBSD__ */
39 #ifdef __linux__
40 #define __NO_VERSION__
41 #include <linux/interrupt.h> /* For task queue support */
42 #endif /* __linux__ */
43
44 #include "dev/drm/drmP.h"
45
46 #ifndef __HAVE_DMA_WAITQUEUE
47 #define __HAVE_DMA_WAITQUEUE 0
48 #endif
49 #ifndef __HAVE_DMA_RECLAIM
50 #define __HAVE_DMA_RECLAIM 0
51 #endif
52 #ifndef __HAVE_SHARED_IRQ
53 #define __HAVE_SHARED_IRQ 0
54 #endif
55
56 #if __HAVE_SHARED_IRQ
57 #define DRM_IRQ_TYPE SA_SHIRQ
58 #else
59 #define DRM_IRQ_TYPE 0
60 #endif
61
62 #if __HAVE_DMA
63
64 int DRM(dma_setup)( drm_device_t *dev )
65 {
66 int i;
67
68 dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
69 if ( !dev->dma )
70 return DRM_OS_ERR(ENOMEM);
71
72 memset( dev->dma, 0, sizeof(*dev->dma) );
73
74 for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
75 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
76
77 return 0;
78 }
79
80 void DRM(dma_takedown)(drm_device_t *dev)
81 {
82 drm_device_dma_t *dma = dev->dma;
83 int i, j;
84
85 if (!dma) return;
86
87 /* Clear dma buffers */
88 for (i = 0; i <= DRM_MAX_ORDER; i++) {
89 if (dma->bufs[i].seg_count) {
90 DRM_DEBUG("order %d: buf_count = %d,"
91 " seg_count = %d\n",
92 i,
93 dma->bufs[i].buf_count,
94 dma->bufs[i].seg_count);
95 for (j = 0; j < dma->bufs[i].seg_count; j++) {
96 DRM(free_pages)(dma->bufs[i].seglist[j],
97 dma->bufs[i].page_order,
98 DRM_MEM_DMA);
99 }
100 DRM(free)(dma->bufs[i].seglist,
101 dma->bufs[i].seg_count
102 * sizeof(*dma->bufs[0].seglist),
103 DRM_MEM_SEGS);
104 }
105 if(dma->bufs[i].buf_count) {
106 for(j = 0; j < dma->bufs[i].buf_count; j++) {
107 if(dma->bufs[i].buflist[j].dev_private) {
108 DRM(free)(dma->bufs[i].buflist[j].dev_private,
109 dma->bufs[i].buflist[j].dev_priv_size,
110 DRM_MEM_BUFS);
111 }
112 }
113 DRM(free)(dma->bufs[i].buflist,
114 dma->bufs[i].buf_count *
115 sizeof(*dma->bufs[0].buflist),
116 DRM_MEM_BUFS);
117 #if __HAVE_DMA_FREELIST
118 DRM(freelist_destroy)(&dma->bufs[i].freelist);
119 #endif
120 }
121 }
122
123 if (dma->buflist) {
124 DRM(free)(dma->buflist,
125 dma->buf_count * sizeof(*dma->buflist),
126 DRM_MEM_BUFS);
127 }
128
129 if (dma->pagelist) {
130 DRM(free)(dma->pagelist,
131 dma->page_count * sizeof(*dma->pagelist),
132 DRM_MEM_PAGES);
133 }
134 DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
135 dev->dma = NULL;
136 }
137
138
139 #if __HAVE_DMA_HISTOGRAM
140 /* This is slow, but is useful for debugging. */
141 int DRM(histogram_slot)(unsigned long count)
142 {
143 int value = DRM_DMA_HISTOGRAM_INITIAL;
144 int slot;
145
146 for (slot = 0;
147 slot < DRM_DMA_HISTOGRAM_SLOTS;
148 ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
149 if (count < value) return slot;
150 }
151 return DRM_DMA_HISTOGRAM_SLOTS - 1;
152 }
153
154 void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf)
155 {
156 cycles_t queued_to_dispatched;
157 cycles_t dispatched_to_completed;
158 cycles_t completed_to_freed;
159 int q2d, d2c, c2f, q2c, q2f;
160
161 if (buf->time_queued) {
162 queued_to_dispatched = (buf->time_dispatched
163 - buf->time_queued);
164 dispatched_to_completed = (buf->time_completed
165 - buf->time_dispatched);
166 completed_to_freed = (buf->time_freed
167 - buf->time_completed);
168
169 q2d = DRM(histogram_slot)(queued_to_dispatched);
170 d2c = DRM(histogram_slot)(dispatched_to_completed);
171 c2f = DRM(histogram_slot)(completed_to_freed);
172
173 q2c = DRM(histogram_slot)(queued_to_dispatched
174 + dispatched_to_completed);
175 q2f = DRM(histogram_slot)(queued_to_dispatched
176 + dispatched_to_completed
177 + completed_to_freed);
178
179 atomic_inc(&dev->histo.total);
180 atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
181 atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
182 atomic_inc(&dev->histo.completed_to_freed[c2f]);
183
184 atomic_inc(&dev->histo.queued_to_completed[q2c]);
185 atomic_inc(&dev->histo.queued_to_freed[q2f]);
186
187 }
188 buf->time_queued = 0;
189 buf->time_dispatched = 0;
190 buf->time_completed = 0;
191 buf->time_freed = 0;
192 }
193 #endif
194
195 void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
196 {
197 if (!buf) return;
198
199 buf->waiting = 0;
200 buf->pending = 0;
201 buf->pid = 0;
202 buf->used = 0;
203 #if __HAVE_DMA_HISTOGRAM
204 buf->time_completed = get_cycles();
205 #endif
206
207 #ifdef __linux__
208 if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) {
209 wake_up_interruptible(&buf->dma_wait);
210 }
211 #endif /* __linux__ */
212 #ifdef __FreeBSD__
213 if ( buf->dma_wait ) {
214 wakeup( &buf->dma_wait );
215 buf->dma_wait = 0;
216 }
217 #endif /* __FreeBSD__ */
218 #if __HAVE_DMA_FREELIST
219 else {
220 drm_device_dma_t *dma = dev->dma;
221 /* If processes are waiting, the last one
222 to wake will put the buffer on the free
223 list. If no processes are waiting, we
224 put the buffer on the freelist here. */
225 DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
226 }
227 #endif
228 }
229
230 #if !__HAVE_DMA_RECLAIM
231 void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid)
232 {
233 drm_device_dma_t *dma = dev->dma;
234 int i;
235
236 if (!dma) return;
237 for (i = 0; i < dma->buf_count; i++) {
238 if (dma->buflist[i]->pid == pid) {
239 switch (dma->buflist[i]->list) {
240 case DRM_LIST_NONE:
241 DRM(free_buffer)(dev, dma->buflist[i]);
242 break;
243 case DRM_LIST_WAIT:
244 dma->buflist[i]->list = DRM_LIST_RECLAIM;
245 break;
246 default:
247 /* Buffer already on hardware. */
248 break;
249 }
250 }
251 }
252 }
253 #endif
254
255
256 /* GH: This is a big hack for now...
257 */
258 #if __HAVE_OLD_DMA
259
260 void DRM(clear_next_buffer)(drm_device_t *dev)
261 {
262 drm_device_dma_t *dma = dev->dma;
263
264 dma->next_buffer = NULL;
265 if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
266 DRM_OS_WAKEUP_INT(&dma->next_queue->flush_queue);
267 }
268 dma->next_queue = NULL;
269 }
270
271 int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
272 {
273 int i;
274 int candidate = -1;
275 int j = jiffies;
276
277 if (!dev) {
278 DRM_ERROR("No device\n");
279 return -1;
280 }
281 if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
282 /* This only happens between the time the
283 interrupt is initialized and the time
284 the queues are initialized. */
285 return -1;
286 }
287
288 /* Doing "while locked" DMA? */
289 if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
290 return DRM_KERNEL_CONTEXT;
291 }
292
293 /* If there are buffers on the last_context
294 queue, and we have not been executing
295 this context very long, continue to
296 execute this context. */
297 if (dev->last_switch <= j
298 && dev->last_switch + DRM_TIME_SLICE > j
299 && DRM_WAITCOUNT(dev, dev->last_context)) {
300 return dev->last_context;
301 }
302
303 /* Otherwise, find a candidate */
304 for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
305 if (DRM_WAITCOUNT(dev, i)) {
306 candidate = dev->last_checked = i;
307 break;
308 }
309 }
310
311 if (candidate < 0) {
312 for (i = 0; i < dev->queue_count; i++) {
313 if (DRM_WAITCOUNT(dev, i)) {
314 candidate = dev->last_checked = i;
315 break;
316 }
317 }
318 }
319
320 if (wrapper
321 && candidate >= 0
322 && candidate != dev->last_context
323 && dev->last_switch <= j
324 && dev->last_switch + DRM_TIME_SLICE > j) {
325 #ifdef __linux__
326 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
327 del_timer(&dev->timer);
328 dev->timer.function = wrapper;
329 dev->timer.data = (unsigned long)dev;
330 dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
331 add_timer(&dev->timer);
332 }
333 #endif /* __linux__ */
334 #ifdef __FreeBSD__
335 int s = splclock();
336 if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
337 callout_reset(&dev->timer,
338 dev->last_switch + DRM_TIME_SLICE - j,
339 (void (*)(void *))wrapper,
340 dev);
341 }
342 splx(s);
343 #endif /* __FreeBSD__ */
344 return -1;
345 }
346
347 return candidate;
348 }
349
350
351 int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
352 {
353 int i;
354 drm_queue_t *q;
355 drm_buf_t *buf;
356 int idx;
357 int while_locked = 0;
358 drm_device_dma_t *dma = dev->dma;
359 #ifdef __linux__
360 DECLARE_WAITQUEUE(entry, current);
361 #endif /* __linux__ */
362 #ifdef __FreeBSD__
363 int error;
364 #endif /* __FreeBSD__ */
365
366 DRM_DEBUG("%d\n", d->send_count);
367
368 if (d->flags & _DRM_DMA_WHILE_LOCKED) {
369 int context = dev->lock.hw_lock->lock;
370
371 if (!_DRM_LOCK_IS_HELD(context)) {
372 DRM_ERROR("No lock held during \"while locked\""
373 " request\n");
374 return DRM_OS_ERR(EINVAL);
375 }
376 if (d->context != _DRM_LOCKING_CONTEXT(context)
377 && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
378 DRM_ERROR("Lock held by %d while %d makes"
379 " \"while locked\" request\n",
380 _DRM_LOCKING_CONTEXT(context),
381 d->context);
382 return DRM_OS_ERR(EINVAL);
383 }
384 q = dev->queuelist[DRM_KERNEL_CONTEXT];
385 while_locked = 1;
386 } else {
387 q = dev->queuelist[d->context];
388 }
389
390
391 atomic_inc(&q->use_count);
392 if (atomic_read(&q->block_write)) {
393 #ifdef __linux__
394 add_wait_queue(&q->write_queue, &entry);
395 atomic_inc(&q->block_count);
396 for (;;) {
397 current->state = TASK_INTERRUPTIBLE;
398 if (!atomic_read(&q->block_write)) break;
399 schedule();
400 if (signal_pending(current)) {
401 atomic_dec(&q->use_count);
402 remove_wait_queue(&q->write_queue, &entry);
403 return DRM_OS_ERR(EINTR);
404 }
405 }
406 atomic_dec(&q->block_count);
407 current->state = TASK_RUNNING;
408 remove_wait_queue(&q->write_queue, &entry);
409 #endif /* __linux__ */
410 #ifdef __FreeBSD__
411 atomic_inc(&q->block_count);
412 for (;;) {
413 if (!atomic_read(&q->block_write)) break;
414 error = tsleep(&q->block_write, PZERO|PCATCH,
415 "dmawr", 0);
416 if (error) {
417 atomic_dec(&q->use_count);
418 return error;
419 }
420 }
421 atomic_dec(&q->block_count);
422 #endif /* __FreeBSD__ */
423 }
424
425 for (i = 0; i < d->send_count; i++) {
426 idx = d->send_indices[i];
427 if (idx < 0 || idx >= dma->buf_count) {
428 atomic_dec(&q->use_count);
429 DRM_ERROR("Index %d (of %d max)\n",
430 d->send_indices[i], dma->buf_count - 1);
431 return DRM_OS_ERR(EINVAL);
432 }
433 buf = dma->buflist[ idx ];
434 if (buf->pid != DRM_OS_CURRENTPID) {
435 atomic_dec(&q->use_count);
436 DRM_ERROR("Process %d using buffer owned by %d\n",
437 DRM_OS_CURRENTPID, buf->pid);
438 return DRM_OS_ERR(EINVAL);
439 }
440 if (buf->list != DRM_LIST_NONE) {
441 atomic_dec(&q->use_count);
442 DRM_ERROR("Process %d using buffer %d on list %d\n",
443 DRM_OS_CURRENTPID, buf->idx, buf->list);
444 }
445 buf->used = d->send_sizes[i];
446 buf->while_locked = while_locked;
447 buf->context = d->context;
448 if (!buf->used) {
449 DRM_ERROR("Queueing 0 length buffer\n");
450 }
451 if (buf->pending) {
452 atomic_dec(&q->use_count);
453 DRM_ERROR("Queueing pending buffer:"
454 " buffer %d, offset %d\n",
455 d->send_indices[i], i);
456 return DRM_OS_ERR(EINVAL);
457 }
458 if (buf->waiting) {
459 atomic_dec(&q->use_count);
460 DRM_ERROR("Queueing waiting buffer:"
461 " buffer %d, offset %d\n",
462 d->send_indices[i], i);
463 return DRM_OS_ERR(EINVAL);
464 }
465 buf->waiting = 1;
466 if (atomic_read(&q->use_count) == 1
467 || atomic_read(&q->finalization)) {
468 DRM(free_buffer)(dev, buf);
469 } else {
470 DRM(waitlist_put)(&q->waitlist, buf);
471 atomic_inc(&q->total_queued);
472 }
473 }
474 atomic_dec(&q->use_count);
475
476 return 0;
477 }
478
479 static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
480 int order)
481 {
482 int i;
483 drm_buf_t *buf;
484 drm_device_dma_t *dma = dev->dma;
485
486 for (i = d->granted_count; i < d->request_count; i++) {
487 buf = DRM(freelist_get)(&dma->bufs[order].freelist,
488 d->flags & _DRM_DMA_WAIT);
489 if (!buf) break;
490 if (buf->pending || buf->waiting) {
491 DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
492 buf->idx,
493 buf->pid,
494 buf->waiting,
495 buf->pending);
496 }
497 buf->pid = DRM_OS_CURRENTPID;
498 if (DRM_OS_COPYTOUSR(&d->request_indices[i],
499 &buf->idx,
500 sizeof(buf->idx)))
501 return DRM_OS_ERR(EFAULT);
502
503 if (DRM_OS_COPYTOUSR(&d->request_sizes[i],
504 &buf->total,
505 sizeof(buf->total)))
506 return DRM_OS_ERR(EFAULT);
507
508 ++d->granted_count;
509 }
510 return 0;
511 }
512
513
514 int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
515 {
516 int order;
517 int retcode = 0;
518 int tmp_order;
519
520 order = DRM(order)(dma->request_size);
521
522 dma->granted_count = 0;
523 retcode = DRM(dma_get_buffers_of_order)(dev, dma, order);
524
525 if (dma->granted_count < dma->request_count
526 && (dma->flags & _DRM_DMA_SMALLER_OK)) {
527 for (tmp_order = order - 1;
528 !retcode
529 && dma->granted_count < dma->request_count
530 && tmp_order >= DRM_MIN_ORDER;
531 --tmp_order) {
532
533 retcode = DRM(dma_get_buffers_of_order)(dev, dma,
534 tmp_order);
535 }
536 }
537
538 if (dma->granted_count < dma->request_count
539 && (dma->flags & _DRM_DMA_LARGER_OK)) {
540 for (tmp_order = order + 1;
541 !retcode
542 && dma->granted_count < dma->request_count
543 && tmp_order <= DRM_MAX_ORDER;
544 ++tmp_order) {
545
546 retcode = DRM(dma_get_buffers_of_order)(dev, dma,
547 tmp_order);
548 }
549 }
550 return 0;
551 }
552
553 #endif /* __HAVE_OLD_DMA */
554
555
556 #if __HAVE_DMA_IRQ
557
558 int DRM(irq_install)( drm_device_t *dev, int irq )
559 {
560 #ifdef __FreeBSD__
561 int rid;
562 #endif /* __FreeBSD__ */
563 int retcode;
564
565 if ( !irq )
566 return DRM_OS_ERR(EINVAL);
567
568 DRM_OS_LOCK;
569 if ( dev->irq ) {
570 DRM_OS_UNLOCK;
571 return DRM_OS_ERR(EBUSY);
572 }
573 dev->irq = irq;
574 DRM_OS_UNLOCK;
575
576 DRM_DEBUG( "%s: irq=%d\n", __func__, irq );
577
578 dev->context_flag = 0;
579 dev->interrupt_flag = 0;
580 dev->dma_flag = 0;
581
582 dev->dma->next_buffer = NULL;
583 dev->dma->next_queue = NULL;
584 dev->dma->this_buffer = NULL;
585
586 #if __HAVE_DMA_IRQ_BH
587 #ifdef __linux__
588 INIT_LIST_HEAD( &dev->tq.list );
589 dev->tq.sync = 0;
590 dev->tq.routine = DRM(dma_immediate_bh);
591 dev->tq.data = dev;
592 #endif /* __linux__ */
593 #ifdef __FreeBSD__
594 TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
595 #endif /* __FreeBSD__ */
596 #endif
597
598 /* Before installing handler */
599 DRIVER_PREINSTALL();
600
601 /* Install handler */
602 #ifdef __linux__
603 retcode = request_irq( dev->irq, DRM(dma_service),
604 DRM_IRQ_TYPE, dev->devname, dev );
605 if ( retcode < 0 ) {
606 #endif /* __linux__ */
607 #ifdef __FreeBSD__
608 rid = 0;
609 dev->irqr = bus_alloc_resource(dev->device, SYS_RES_IRQ, &rid,
610 0, ~0, 1, RF_SHAREABLE);
611 if (!dev->irqr)
612 return ENOENT;
613
614 retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY,
615 DRM(dma_service), dev, &dev->irqh);
616 if ( retcode ) {
617 #endif /* __FreeBSD__ */
618 DRM_OS_LOCK;
619 #ifdef __FreeBSD__
620 bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irqr);
621 #endif /* __FreeBSD__ */
622 dev->irq = 0;
623 DRM_OS_UNLOCK;
624 return retcode;
625 }
626
627 /* After installing handler */
628 DRIVER_POSTINSTALL();
629
630 return 0;
631 }
632
633 int DRM(irq_uninstall)( drm_device_t *dev )
634 {
635 int irq;
636
637 DRM_OS_LOCK;
638 irq = dev->irq;
639 dev->irq = 0;
640 DRM_OS_UNLOCK;
641
642 if ( !irq )
643 return DRM_OS_ERR(EINVAL);
644
645 DRM_DEBUG( "%s: irq=%d\n", __func__, irq );
646
647 DRIVER_UNINSTALL();
648
649 #ifdef __linux__
650 free_irq( irq, dev );
651 #endif /* __linux__ */
652 #ifdef __FreeBSD__
653 bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
654 bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irqr);
655 #endif /* __FreeBSD__ */
656
657 return 0;
658 }
659
660 int DRM(control)( DRM_OS_IOCTL )
661 {
662 DRM_OS_DEVICE;
663 drm_control_t ctl;
664
665 DRM_OS_KRNFROMUSR( ctl, (drm_control_t *) data, sizeof(ctl) );
666
667 switch ( ctl.func ) {
668 case DRM_INST_HANDLER:
669 return DRM(irq_install)( dev, ctl.irq );
670 case DRM_UNINST_HANDLER:
671 return DRM(irq_uninstall)( dev );
672 default:
673 return DRM_OS_ERR(EINVAL);
674 }
675 }
676
677 #endif /* __HAVE_DMA_IRQ */
678
679 #endif /* __HAVE_DMA */
Cache object: db802ecf287e17d1716b2c5ae255abc7
|