FreeBSD/Linux Kernel Cross Reference
sys/dev/drm/i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3 /*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/6.4/sys/dev/drm/i915_dma.c 177543 2008-03-24 10:43:41Z remko $");
31
32 #include "dev/drm/drmP.h"
33 #include "dev/drm/drm.h"
34 #include "dev/drm/i915_drm.h"
35 #include "dev/drm/i915_drv.h"
36
37 #define IS_I965G(dev) (dev->pci_device == 0x2972 || \
38 dev->pci_device == 0x2982 || \
39 dev->pci_device == 0x2992 || \
40 dev->pci_device == 0x29A2 || \
41 dev->pci_device == 0x2A12 )
42
43 /* Really want an OS-independent resettable timer. Would like to have
44 * this loop run for (eg) 3 sec, but have the timer reset every time
45 * the head pointer changes, so that EBUSY only happens if the ring
46 * actually stalls for (eg) 3 seconds.
47 */
48 int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
49 {
50 drm_i915_private_t *dev_priv = dev->dev_private;
51 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
52 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
53 int i;
54
55 for (i = 0; i < 10000; i++) {
56 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
57 ring->space = ring->head - (ring->tail + 8);
58 if (ring->space < 0)
59 ring->space += ring->Size;
60 if (ring->space >= n)
61 return 0;
62
63 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
64
65 if (ring->head != last_head)
66 i = 0;
67
68 last_head = ring->head;
69 }
70
71 return DRM_ERR(EBUSY);
72 }
73
74 void i915_kernel_lost_context(drm_device_t * dev)
75 {
76 drm_i915_private_t *dev_priv = dev->dev_private;
77 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
78
79 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
80 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
81 ring->space = ring->head - (ring->tail + 8);
82 if (ring->space < 0)
83 ring->space += ring->Size;
84
85 if (ring->head == ring->tail)
86 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
87 }
88
89 static int i915_dma_cleanup(drm_device_t * dev)
90 {
91 /* Make sure interrupts are disabled here because the uninstall ioctl
92 * may not have been called from userspace and after dev_private
93 * is freed, it's too late.
94 */
95 if (dev->irq)
96 drm_irq_uninstall(dev);
97
98 if (dev->dev_private) {
99 drm_i915_private_t *dev_priv =
100 (drm_i915_private_t *) dev->dev_private;
101
102 if (dev_priv->ring.virtual_start) {
103 drm_core_ioremapfree(&dev_priv->ring.map, dev);
104 }
105
106 if (dev_priv->status_page_dmah) {
107 drm_pci_free(dev, dev_priv->status_page_dmah);
108 /* Need to rewrite hardware status page */
109 I915_WRITE(0x02080, 0x1ffff000);
110 }
111
112 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
113 DRM_MEM_DRIVER);
114
115 dev->dev_private = NULL;
116 }
117
118 return 0;
119 }
120
121 static int i915_initialize(drm_device_t * dev,
122 drm_i915_private_t * dev_priv,
123 drm_i915_init_t * init)
124 {
125 drm_dma_handle_t *dmah;
126
127 DRM_UNLOCK();
128 memset(dev_priv, 0, sizeof(drm_i915_private_t));
129 dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
130 0xffffffff);
131 DRM_LOCK();
132 if (!dmah) {
133 dev->dev_private = (void *)dev_priv;
134 i915_dma_cleanup(dev);
135 DRM_ERROR("Can not allocate hardware status page\n");
136 return DRM_ERR(ENOMEM);
137 }
138
139 dev_priv->status_page_dmah = dmah;
140
141 DRM_GETSAREA();
142 if (!dev_priv->sarea) {
143 DRM_ERROR("can not find sarea!\n");
144 dev->dev_private = (void *)dev_priv;
145 i915_dma_cleanup(dev);
146 return DRM_ERR(EINVAL);
147 }
148
149 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
150 if (!dev_priv->mmio_map) {
151 dev->dev_private = (void *)dev_priv;
152 i915_dma_cleanup(dev);
153 DRM_ERROR("can not find mmio map!\n");
154 return DRM_ERR(EINVAL);
155 }
156
157 dev_priv->sarea_priv = (drm_i915_sarea_t *)
158 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
159
160 dev_priv->ring.Start = init->ring_start;
161 dev_priv->ring.End = init->ring_end;
162 dev_priv->ring.Size = init->ring_size;
163 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
164
165 dev_priv->ring.map.offset = init->ring_start;
166 dev_priv->ring.map.size = init->ring_size;
167 dev_priv->ring.map.type = 0;
168 dev_priv->ring.map.flags = 0;
169 dev_priv->ring.map.mtrr = 0;
170
171 drm_core_ioremap(&dev_priv->ring.map, dev);
172
173 if (dev_priv->ring.map.handle == NULL) {
174 dev->dev_private = (void *)dev_priv;
175 i915_dma_cleanup(dev);
176 DRM_ERROR("can not ioremap virtual address for"
177 " ring buffer\n");
178 return DRM_ERR(ENOMEM);
179 }
180
181 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
182
183 dev_priv->back_offset = init->back_offset;
184 dev_priv->front_offset = init->front_offset;
185 dev_priv->current_page = 0;
186 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
187
188 /* We are using separate values as placeholders for mechanisms for
189 * private backbuffer/depthbuffer usage.
190 */
191 dev_priv->use_mi_batchbuffer_start = 0;
192
193 /* Allow hardware batchbuffers unless told otherwise.
194 */
195 dev_priv->allow_batchbuffer = 1;
196
197 /* Program Hardware Status Page */
198 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
199 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
200
201 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
202 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
203
204 I915_WRITE(0x02080, dev_priv->dma_status_page);
205 DRM_DEBUG("Enabled hardware status page\n");
206
207 dev->dev_private = (void *)dev_priv;
208
209 return 0;
210 }
211
212 static int i915_dma_resume(drm_device_t * dev)
213 {
214 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
215
216 DRM_DEBUG("%s\n", __FUNCTION__);
217
218 if (!dev_priv->sarea) {
219 DRM_ERROR("can not find sarea!\n");
220 return DRM_ERR(EINVAL);
221 }
222
223 if (!dev_priv->mmio_map) {
224 DRM_ERROR("can not find mmio map!\n");
225 return DRM_ERR(EINVAL);
226 }
227
228 if (dev_priv->ring.map.handle == NULL) {
229 DRM_ERROR("can not ioremap virtual address for"
230 " ring buffer\n");
231 return DRM_ERR(ENOMEM);
232 }
233
234 /* Program Hardware Status Page */
235 if (!dev_priv->hw_status_page) {
236 DRM_ERROR("Can not find hardware status page\n");
237 return DRM_ERR(EINVAL);
238 }
239 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
240
241 I915_WRITE(0x02080, dev_priv->dma_status_page);
242 DRM_DEBUG("Enabled hardware status page\n");
243
244 return 0;
245 }
246
247 static int i915_dma_init(DRM_IOCTL_ARGS)
248 {
249 DRM_DEVICE;
250 drm_i915_private_t *dev_priv;
251 drm_i915_init_t init;
252 int retcode = 0;
253
254 DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
255 sizeof(init));
256
257 switch (init.func) {
258 case I915_INIT_DMA:
259 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
260 DRM_MEM_DRIVER);
261 if (dev_priv == NULL)
262 return DRM_ERR(ENOMEM);
263 retcode = i915_initialize(dev, dev_priv, &init);
264 break;
265 case I915_CLEANUP_DMA:
266 retcode = i915_dma_cleanup(dev);
267 break;
268 case I915_RESUME_DMA:
269 retcode = i915_dma_resume(dev);
270 break;
271 default:
272 retcode = -EINVAL;
273 break;
274 }
275
276 return retcode;
277 }
278
279 /* Implement basically the same security restrictions as hardware does
280 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
281 *
282 * Most of the calculations below involve calculating the size of a
283 * particular instruction. It's important to get the size right as
284 * that tells us where the next instruction to check is. Any illegal
285 * instruction detected will be given a size of zero, which is a
286 * signal to abort the rest of the buffer.
287 */
288 static int do_validate_cmd(int cmd)
289 {
290 switch (((cmd >> 29) & 0x7)) {
291 case 0x0:
292 switch ((cmd >> 23) & 0x3f) {
293 case 0x0:
294 return 1; /* MI_NOOP */
295 case 0x4:
296 return 1; /* MI_FLUSH */
297 default:
298 return 0; /* disallow everything else */
299 }
300 break;
301 case 0x1:
302 return 0; /* reserved */
303 case 0x2:
304 return (cmd & 0xff) + 2; /* 2d commands */
305 case 0x3:
306 if (((cmd >> 24) & 0x1f) <= 0x18)
307 return 1;
308
309 switch ((cmd >> 24) & 0x1f) {
310 case 0x1c:
311 return 1;
312 case 0x1d:
313 switch ((cmd >> 16) & 0xff) {
314 case 0x3:
315 return (cmd & 0x1f) + 2;
316 case 0x4:
317 return (cmd & 0xf) + 2;
318 default:
319 return (cmd & 0xffff) + 2;
320 }
321 case 0x1e:
322 if (cmd & (1 << 23))
323 return (cmd & 0xffff) + 1;
324 else
325 return 1;
326 case 0x1f:
327 if ((cmd & (1 << 23)) == 0) /* inline vertices */
328 return (cmd & 0x1ffff) + 2;
329 else if (cmd & (1 << 17)) /* indirect random */
330 if ((cmd & 0xffff) == 0)
331 return 0; /* unknown length, too hard */
332 else
333 return (((cmd & 0xffff) + 1) / 2) + 1;
334 else
335 return 2; /* indirect sequential */
336 default:
337 return 0;
338 }
339 default:
340 return 0;
341 }
342
343 return 0;
344 }
345
346 static int validate_cmd(int cmd)
347 {
348 int ret = do_validate_cmd(cmd);
349
350 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
351
352 return ret;
353 }
354
355 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
356 {
357 drm_i915_private_t *dev_priv = dev->dev_private;
358 int i;
359 RING_LOCALS;
360
361 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
362 return DRM_ERR(EINVAL);
363
364 BEGIN_LP_RING((dwords+1)&~1);
365
366 for (i = 0; i < dwords;) {
367 int cmd, sz;
368
369 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) {
370
371 return DRM_ERR(EINVAL);
372 }
373 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
374 return DRM_ERR(EINVAL);
375
376 OUT_RING(cmd);
377
378 while (++i, --sz) {
379 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
380 sizeof(cmd))) {
381 return DRM_ERR(EINVAL);
382 }
383 OUT_RING(cmd);
384 }
385 }
386
387 if (dwords & 1)
388 OUT_RING(0);
389
390 ADVANCE_LP_RING();
391
392 return 0;
393 }
394
395 static int i915_emit_box(drm_device_t * dev,
396 drm_clip_rect_t __user * boxes,
397 int i, int DR1, int DR4)
398 {
399 drm_i915_private_t *dev_priv = dev->dev_private;
400 drm_clip_rect_t box;
401 RING_LOCALS;
402
403 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
404 return EFAULT;
405 }
406
407 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
408 DRM_ERROR("Bad box %d,%d..%d,%d\n",
409 box.x1, box.y1, box.x2, box.y2);
410 return DRM_ERR(EINVAL);
411 }
412
413 if (IS_I965G(dev)) {
414 BEGIN_LP_RING(4);
415 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
416 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
417 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
418 OUT_RING(DR4);
419 ADVANCE_LP_RING();
420 } else {
421 BEGIN_LP_RING(6);
422 OUT_RING(GFX_OP_DRAWRECT_INFO);
423 OUT_RING(DR1);
424 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
425 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
426 OUT_RING(DR4);
427 OUT_RING(0);
428 ADVANCE_LP_RING();
429 }
430
431 return 0;
432 }
433
434 /* XXX: Emitting the counter should really be moved to part of the IRQ
435 * emit. For now, do it in both places:
436 */
437
438 static void i915_emit_breadcrumb(drm_device_t *dev)
439 {
440 drm_i915_private_t *dev_priv = dev->dev_private;
441 RING_LOCALS;
442
443 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
444
445 if (dev_priv->counter > 0x7FFFFFFFUL)
446 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
447
448 BEGIN_LP_RING(4);
449 OUT_RING(CMD_STORE_DWORD_IDX);
450 OUT_RING(20);
451 OUT_RING(dev_priv->counter);
452 OUT_RING(0);
453 ADVANCE_LP_RING();
454 }
455
456 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
457 drm_i915_cmdbuffer_t * cmd)
458 {
459 int nbox = cmd->num_cliprects;
460 int i = 0, count, ret;
461
462 if (cmd->sz & 0x3) {
463 DRM_ERROR("alignment");
464 return DRM_ERR(EINVAL);
465 }
466
467 i915_kernel_lost_context(dev);
468
469 count = nbox ? nbox : 1;
470
471 for (i = 0; i < count; i++) {
472 if (i < nbox) {
473 ret = i915_emit_box(dev, cmd->cliprects, i,
474 cmd->DR1, cmd->DR4);
475 if (ret)
476 return ret;
477 }
478
479 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
480 if (ret)
481 return ret;
482 }
483
484 i915_emit_breadcrumb( dev );
485 return 0;
486 }
487
488 static int i915_dispatch_batchbuffer(drm_device_t * dev,
489 drm_i915_batchbuffer_t * batch)
490 {
491 drm_i915_private_t *dev_priv = dev->dev_private;
492 drm_clip_rect_t __user *boxes = batch->cliprects;
493 int nbox = batch->num_cliprects;
494 int i = 0, count;
495 RING_LOCALS;
496
497 if ((batch->start | batch->used) & 0x7) {
498 DRM_ERROR("alignment");
499 return DRM_ERR(EINVAL);
500 }
501
502 i915_kernel_lost_context(dev);
503
504 count = nbox ? nbox : 1;
505
506 for (i = 0; i < count; i++) {
507 if (i < nbox) {
508 int ret = i915_emit_box(dev, boxes, i,
509 batch->DR1, batch->DR4);
510 if (ret)
511 return ret;
512 }
513
514 if (dev_priv->use_mi_batchbuffer_start) {
515 BEGIN_LP_RING(2);
516 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
517 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
518 ADVANCE_LP_RING();
519 } else {
520 BEGIN_LP_RING(4);
521 OUT_RING(MI_BATCH_BUFFER);
522 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
523 OUT_RING(batch->start + batch->used - 4);
524 OUT_RING(0);
525 ADVANCE_LP_RING();
526 }
527 }
528
529 i915_emit_breadcrumb( dev );
530 return 0;
531 }
532
533 static int i915_dispatch_flip(drm_device_t * dev)
534 {
535 drm_i915_private_t *dev_priv = dev->dev_private;
536 RING_LOCALS;
537
538 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
539 __FUNCTION__,
540 dev_priv->current_page,
541 dev_priv->sarea_priv->pf_current_page);
542
543 i915_kernel_lost_context(dev);
544
545 BEGIN_LP_RING(2);
546 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
547 OUT_RING(0);
548 ADVANCE_LP_RING();
549
550 BEGIN_LP_RING(6);
551 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
552 OUT_RING(0);
553 if (dev_priv->current_page == 0) {
554 OUT_RING(dev_priv->back_offset);
555 dev_priv->current_page = 1;
556 } else {
557 OUT_RING(dev_priv->front_offset);
558 dev_priv->current_page = 0;
559 }
560 OUT_RING(0);
561 ADVANCE_LP_RING();
562
563 BEGIN_LP_RING(2);
564 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
565 OUT_RING(0);
566 ADVANCE_LP_RING();
567
568 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
569
570 BEGIN_LP_RING(4);
571 OUT_RING(CMD_STORE_DWORD_IDX);
572 OUT_RING(20);
573 OUT_RING(dev_priv->counter);
574 OUT_RING(0);
575 ADVANCE_LP_RING();
576
577 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
578 return 0;
579 }
580
581 static int i915_quiescent(drm_device_t * dev)
582 {
583 drm_i915_private_t *dev_priv = dev->dev_private;
584
585 i915_kernel_lost_context(dev);
586 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
587 }
588
589 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
590 {
591 DRM_DEVICE;
592
593 LOCK_TEST_WITH_RETURN(dev, filp);
594
595 return i915_quiescent(dev);
596 }
597
598 static int i915_batchbuffer(DRM_IOCTL_ARGS)
599 {
600 DRM_DEVICE;
601 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
602 u32 *hw_status = dev_priv->hw_status_page;
603 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
604 dev_priv->sarea_priv;
605 drm_i915_batchbuffer_t batch;
606 int ret;
607
608 if (!dev_priv->allow_batchbuffer) {
609 DRM_ERROR("Batchbuffer ioctl disabled\n");
610 return DRM_ERR(EINVAL);
611 }
612
613 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
614 sizeof(batch));
615
616 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
617 batch.start, batch.used, batch.num_cliprects);
618
619 LOCK_TEST_WITH_RETURN(dev, filp);
620
621 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
622 batch.num_cliprects *
623 sizeof(drm_clip_rect_t)))
624 return DRM_ERR(EFAULT);
625
626 ret = i915_dispatch_batchbuffer(dev, &batch);
627
628 sarea_priv->last_dispatch = (int)hw_status[5];
629 return ret;
630 }
631
632 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
633 {
634 DRM_DEVICE;
635 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
636 u32 *hw_status = dev_priv->hw_status_page;
637 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
638 dev_priv->sarea_priv;
639 drm_i915_cmdbuffer_t cmdbuf;
640 int ret;
641
642 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
643 sizeof(cmdbuf));
644
645 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
646 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
647
648 LOCK_TEST_WITH_RETURN(dev, filp);
649
650 if (cmdbuf.num_cliprects &&
651 DRM_VERIFYAREA_READ(cmdbuf.cliprects,
652 cmdbuf.num_cliprects *
653 sizeof(drm_clip_rect_t))) {
654 DRM_ERROR("Fault accessing cliprects\n");
655 return DRM_ERR(EFAULT);
656 }
657
658 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
659 if (ret) {
660 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
661 return ret;
662 }
663
664 sarea_priv->last_dispatch = (int)hw_status[5];
665 return 0;
666 }
667
668 static int i915_do_cleanup_pageflip(drm_device_t * dev)
669 {
670 drm_i915_private_t *dev_priv = dev->dev_private;
671
672 DRM_DEBUG("%s\n", __FUNCTION__);
673 if (dev_priv->current_page != 0)
674 i915_dispatch_flip(dev);
675
676 return 0;
677 }
678
679 static int i915_flip_bufs(DRM_IOCTL_ARGS)
680 {
681 DRM_DEVICE;
682
683 DRM_DEBUG("%s\n", __FUNCTION__);
684
685 LOCK_TEST_WITH_RETURN(dev, filp);
686
687 return i915_dispatch_flip(dev);
688 }
689
690 static int i915_getparam(DRM_IOCTL_ARGS)
691 {
692 DRM_DEVICE;
693 drm_i915_private_t *dev_priv = dev->dev_private;
694 drm_i915_getparam_t param;
695 int value;
696
697 if (!dev_priv) {
698 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
699 return DRM_ERR(EINVAL);
700 }
701
702 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
703 sizeof(param));
704
705 switch (param.param) {
706 case I915_PARAM_IRQ_ACTIVE:
707 value = dev->irq ? 1 : 0;
708 break;
709 case I915_PARAM_ALLOW_BATCHBUFFER:
710 value = dev_priv->allow_batchbuffer ? 1 : 0;
711 break;
712 case I915_PARAM_LAST_DISPATCH:
713 value = READ_BREADCRUMB(dev_priv);
714 break;
715 default:
716 DRM_ERROR("Unknown parameter %d\n", param.param);
717 return DRM_ERR(EINVAL);
718 }
719
720 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
721 DRM_ERROR("DRM_COPY_TO_USER failed\n");
722 return DRM_ERR(EFAULT);
723 }
724
725 return 0;
726 }
727
728 static int i915_setparam(DRM_IOCTL_ARGS)
729 {
730 DRM_DEVICE;
731 drm_i915_private_t *dev_priv = dev->dev_private;
732 drm_i915_setparam_t param;
733
734 if (!dev_priv) {
735 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
736 return DRM_ERR(EINVAL);
737 }
738
739 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
740 sizeof(param));
741
742 switch (param.param) {
743 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
744 dev_priv->use_mi_batchbuffer_start = param.value;
745 break;
746 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
747 dev_priv->tex_lru_log_granularity = param.value;
748 break;
749 case I915_SETPARAM_ALLOW_BATCHBUFFER:
750 dev_priv->allow_batchbuffer = param.value;
751 break;
752 default:
753 DRM_ERROR("unknown parameter %d\n", param.param);
754 return DRM_ERR(EINVAL);
755 }
756
757 return 0;
758 }
759
760 int i915_driver_load(drm_device_t *dev, unsigned long flags)
761 {
762 /* i915 has 4 more counters */
763 dev->counters += 4;
764 dev->types[6] = _DRM_STAT_IRQ;
765 dev->types[7] = _DRM_STAT_PRIMARY;
766 dev->types[8] = _DRM_STAT_SECONDARY;
767 dev->types[9] = _DRM_STAT_DMA;
768
769 return 0;
770 }
771
772 void i915_driver_lastclose(drm_device_t * dev)
773 {
774 if (dev->dev_private) {
775 drm_i915_private_t *dev_priv = dev->dev_private;
776 i915_mem_takedown(&(dev_priv->agp_heap));
777 }
778 i915_dma_cleanup(dev);
779 }
780
781 void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
782 {
783 if (dev->dev_private) {
784 drm_i915_private_t *dev_priv = dev->dev_private;
785 if (dev_priv->page_flipping) {
786 i915_do_cleanup_pageflip(dev);
787 }
788 i915_mem_release(dev, filp, dev_priv->agp_heap);
789 }
790 }
791
792 drm_ioctl_desc_t i915_ioctls[] = {
793 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
794 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
795 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
796 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
797 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
798 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
799 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
800 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
801 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
802 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
803 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
804 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
805 [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
806 [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
807 [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
808 };
809
810 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
811
812 /**
813 * Determine if the device really is AGP or not.
814 *
815 * All Intel graphics chipsets are treated as AGP, even if they are really
816 * PCI-e.
817 *
818 * \param dev The device to be tested.
819 *
820 * \returns
821 * A value of 1 is always retured to indictate every i9x5 is AGP.
822 */
823 int i915_driver_device_is_agp(drm_device_t * dev)
824 {
825 return 1;
826 }
Cache object: e9e698cd3206ddd56eb5e56f72ad1886
|