1 /* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */
2 /**
3 * \file mach64_dma.c
4 * DMA support for mach64 (Rage Pro) driver
5 *
6 * \author Gareth Hughes <gareth@valinux.com>
7 * \author Frank C. Earl <fearl@airmail.net>
8 * \author Leif Delgass <ldelgass@retinalburn.net>
9 * \author Jose Fonseca <j_r_fonseca@yahoo.co.uk>
10 */
11
12 /*-
13 * Copyright 2000 Gareth Hughes
14 * Copyright 2002 Frank C. Earl
15 * Copyright 2002-2003 Leif Delgass
16 * All Rights Reserved.
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a
19 * copy of this software and associated documentation files (the "Software"),
20 * to deal in the Software without restriction, including without limitation
21 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
22 * and/or sell copies of the Software, and to permit persons to whom the
23 * Software is furnished to do so, subject to the following conditions:
24 *
25 * The above copyright notice and this permission notice (including the next
26 * paragraph) shall be included in all copies or substantial portions of the
27 * Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
32 * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
33 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/6.4/sys/dev/drm/mach64_dma.c 153401 2005-12-14 00:52:59Z anholt $");
39
40 #include "dev/drm/drmP.h"
41 #include "dev/drm/drm.h"
42 #include "dev/drm/mach64_drm.h"
43 #include "dev/drm/mach64_drv.h"
44
45 /*******************************************************************/
46 /** \name Engine, FIFO control */
47 /*@{*/
48
49 /**
50 * Waits for free entries in the FIFO.
51 *
52 * \note Most writes to Mach64 registers are automatically routed through
53 * command FIFO which is 16 entry deep. Prior to writing to any draw engine
54 * register one has to ensure that enough FIFO entries are available by calling
55 * this function. Failure to do so may cause the engine to lock.
56 *
57 * \param dev_priv pointer to device private data structure.
58 * \param entries number of free entries in the FIFO to wait for.
59 *
60 * \returns zero on success, or -EBUSY if the timeout (specificed by
61 * drm_mach64_private::usec_timeout) occurs.
62 */
63 int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries)
64 {
65 int slots = 0, i;
66
67 for (i = 0; i < dev_priv->usec_timeout; i++) {
68 slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
69 if (slots <= (0x8000 >> entries))
70 return 0;
71 DRM_UDELAY(1);
72 }
73
74 DRM_INFO("%s failed! slots=%d entries=%d\n", __FUNCTION__, slots,
75 entries);
76 return DRM_ERR(EBUSY);
77 }
78
79 /**
80 * Wait for the draw engine to be idle.
81 */
82 int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv)
83 {
84 int i, ret;
85
86 ret = mach64_do_wait_for_fifo(dev_priv, 16);
87 if (ret < 0)
88 return ret;
89
90 for (i = 0; i < dev_priv->usec_timeout; i++) {
91 if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
92 return 0;
93 }
94 DRM_UDELAY(1);
95 }
96
97 DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__,
98 MACH64_READ(MACH64_GUI_STAT));
99 mach64_dump_ring_info(dev_priv);
100 return DRM_ERR(EBUSY);
101 }
102
103 /**
104 * Wait for free entries in the ring buffer.
105 *
106 * The Mach64 bus master can be configured to act as a virtual FIFO, using a
107 * circular buffer (commonly referred as "ring buffer" in other drivers) with
108 * pointers to engine commands. This allows the CPU to do other things while
109 * the graphics engine is busy, i.e., DMA mode.
110 *
111 * This function should be called before writing new entries to the ring
112 * buffer.
113 *
114 * \param dev_priv pointer to device private data structure.
115 * \param n number of free entries in the ring buffer to wait for.
116 *
117 * \returns zero on success, or -EBUSY if the timeout (specificed by
118 * drm_mach64_private_t::usec_timeout) occurs.
119 *
120 * \sa mach64_dump_ring_info()
121 */
122 int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n)
123 {
124 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
125 int i;
126
127 for (i = 0; i < dev_priv->usec_timeout; i++) {
128 mach64_update_ring_snapshot(dev_priv);
129 if (ring->space >= n) {
130 if (i > 0) {
131 DRM_DEBUG("%s: %d usecs\n", __FUNCTION__, i);
132 }
133 return 0;
134 }
135 DRM_UDELAY(1);
136 }
137
138 /* FIXME: This is being ignored... */
139 DRM_ERROR("failed!\n");
140 mach64_dump_ring_info(dev_priv);
141 return DRM_ERR(EBUSY);
142 }
143
144 /**
145 * Wait until all DMA requests have been processed...
146 *
147 * \sa mach64_wait_ring()
148 */
149 static int mach64_ring_idle(drm_mach64_private_t * dev_priv)
150 {
151 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
152 u32 head;
153 int i;
154
155 head = ring->head;
156 i = 0;
157 while (i < dev_priv->usec_timeout) {
158 mach64_update_ring_snapshot(dev_priv);
159 if (ring->head == ring->tail &&
160 !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
161 if (i > 0) {
162 DRM_DEBUG("%s: %d usecs\n", __FUNCTION__, i);
163 }
164 return 0;
165 }
166 if (ring->head == head) {
167 ++i;
168 } else {
169 head = ring->head;
170 i = 0;
171 }
172 DRM_UDELAY(1);
173 }
174
175 DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__,
176 MACH64_READ(MACH64_GUI_STAT));
177 mach64_dump_ring_info(dev_priv);
178 return DRM_ERR(EBUSY);
179 }
180
181 /**
182 * Reset the the ring buffer descriptors.
183 *
184 * \sa mach64_do_engine_reset()
185 */
186 static void mach64_ring_reset(drm_mach64_private_t * dev_priv)
187 {
188 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
189
190 mach64_do_release_used_buffers(dev_priv);
191 ring->head_addr = ring->start_addr;
192 ring->head = ring->tail = 0;
193 ring->space = ring->size;
194
195 MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
196 ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
197
198 dev_priv->ring_running = 0;
199 }
200
201 /**
202 * Ensure the all the queued commands will be processed.
203 */
204 int mach64_do_dma_flush(drm_mach64_private_t * dev_priv)
205 {
206 /* FIXME: It's not necessary to wait for idle when flushing
207 * we just need to ensure the ring will be completely processed
208 * in finite time without another ioctl
209 */
210 return mach64_ring_idle(dev_priv);
211 }
212
213 /**
214 * Stop all DMA activity.
215 */
216 int mach64_do_dma_idle(drm_mach64_private_t * dev_priv)
217 {
218 int ret;
219
220 /* wait for completion */
221 if ((ret = mach64_ring_idle(dev_priv)) < 0) {
222 DRM_ERROR("%s failed BM_GUI_TABLE=0x%08x tail: %u\n",
223 __FUNCTION__, MACH64_READ(MACH64_BM_GUI_TABLE),
224 dev_priv->ring.tail);
225 return ret;
226 }
227
228 mach64_ring_stop(dev_priv);
229
230 /* clean up after pass */
231 mach64_do_release_used_buffers(dev_priv);
232 return 0;
233 }
234
235 /**
236 * Reset the engine. This will stop the DMA if it is running.
237 */
238 int mach64_do_engine_reset(drm_mach64_private_t * dev_priv)
239 {
240 u32 tmp;
241
242 DRM_DEBUG("%s\n", __FUNCTION__);
243
244 /* Kill off any outstanding DMA transfers.
245 */
246 tmp = MACH64_READ(MACH64_BUS_CNTL);
247 MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
248
249 /* Reset the GUI engine (high to low transition).
250 */
251 tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
252 MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
253 /* Enable the GUI engine
254 */
255 tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
256 MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
257
258 /* ensure engine is not locked up by clearing any FIFO or HOST errors
259 */
260 tmp = MACH64_READ(MACH64_BUS_CNTL);
261 MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000);
262
263 /* Once GUI engine is restored, disable bus mastering */
264 MACH64_WRITE(MACH64_SRC_CNTL, 0);
265
266 /* Reset descriptor ring */
267 mach64_ring_reset(dev_priv);
268
269 return 0;
270 }
271
272 /*@}*/
273
274
275 /*******************************************************************/
276 /** \name Debugging output */
277 /*@{*/
278
279 /**
280 * Dump engine registers values.
281 */
282 void mach64_dump_engine_info(drm_mach64_private_t * dev_priv)
283 {
284 DRM_INFO("\n");
285 if (!dev_priv->is_pci) {
286 DRM_INFO(" AGP_BASE = 0x%08x\n",
287 MACH64_READ(MACH64_AGP_BASE));
288 DRM_INFO(" AGP_CNTL = 0x%08x\n",
289 MACH64_READ(MACH64_AGP_CNTL));
290 }
291 DRM_INFO(" ALPHA_TST_CNTL = 0x%08x\n",
292 MACH64_READ(MACH64_ALPHA_TST_CNTL));
293 DRM_INFO("\n");
294 DRM_INFO(" BM_COMMAND = 0x%08x\n",
295 MACH64_READ(MACH64_BM_COMMAND));
296 DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
297 MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
298 DRM_INFO(" BM_GUI_TABLE = 0x%08x\n",
299 MACH64_READ(MACH64_BM_GUI_TABLE));
300 DRM_INFO(" BM_STATUS = 0x%08x\n",
301 MACH64_READ(MACH64_BM_STATUS));
302 DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
303 MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
304 DRM_INFO(" BM_SYSTEM_TABLE = 0x%08x\n",
305 MACH64_READ(MACH64_BM_SYSTEM_TABLE));
306 DRM_INFO(" BUS_CNTL = 0x%08x\n",
307 MACH64_READ(MACH64_BUS_CNTL));
308 DRM_INFO("\n");
309 /* DRM_INFO( " CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
310 DRM_INFO(" CLR_CMP_CLR = 0x%08x\n",
311 MACH64_READ(MACH64_CLR_CMP_CLR));
312 DRM_INFO(" CLR_CMP_CNTL = 0x%08x\n",
313 MACH64_READ(MACH64_CLR_CMP_CNTL));
314 /* DRM_INFO( " CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
315 DRM_INFO(" CONFIG_CHIP_ID = 0x%08x\n",
316 MACH64_READ(MACH64_CONFIG_CHIP_ID));
317 DRM_INFO(" CONFIG_CNTL = 0x%08x\n",
318 MACH64_READ(MACH64_CONFIG_CNTL));
319 DRM_INFO(" CONFIG_STAT0 = 0x%08x\n",
320 MACH64_READ(MACH64_CONFIG_STAT0));
321 DRM_INFO(" CONFIG_STAT1 = 0x%08x\n",
322 MACH64_READ(MACH64_CONFIG_STAT1));
323 DRM_INFO(" CONFIG_STAT2 = 0x%08x\n",
324 MACH64_READ(MACH64_CONFIG_STAT2));
325 DRM_INFO(" CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
326 DRM_INFO(" CUSTOM_MACRO_CNTL = 0x%08x\n",
327 MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
328 DRM_INFO("\n");
329 /* DRM_INFO( " DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
330 /* DRM_INFO( " DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
331 DRM_INFO(" DP_BKGD_CLR = 0x%08x\n",
332 MACH64_READ(MACH64_DP_BKGD_CLR));
333 DRM_INFO(" DP_FRGD_CLR = 0x%08x\n",
334 MACH64_READ(MACH64_DP_FRGD_CLR));
335 DRM_INFO(" DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
336 DRM_INFO(" DP_PIX_WIDTH = 0x%08x\n",
337 MACH64_READ(MACH64_DP_PIX_WIDTH));
338 DRM_INFO(" DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
339 DRM_INFO(" DP_WRITE_MASK = 0x%08x\n",
340 MACH64_READ(MACH64_DP_WRITE_MASK));
341 DRM_INFO(" DSP_CONFIG = 0x%08x\n",
342 MACH64_READ(MACH64_DSP_CONFIG));
343 DRM_INFO(" DSP_ON_OFF = 0x%08x\n",
344 MACH64_READ(MACH64_DSP_ON_OFF));
345 DRM_INFO(" DST_CNTL = 0x%08x\n",
346 MACH64_READ(MACH64_DST_CNTL));
347 DRM_INFO(" DST_OFF_PITCH = 0x%08x\n",
348 MACH64_READ(MACH64_DST_OFF_PITCH));
349 DRM_INFO("\n");
350 /* DRM_INFO( " EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
351 DRM_INFO(" EXT_MEM_CNTL = 0x%08x\n",
352 MACH64_READ(MACH64_EXT_MEM_CNTL));
353 DRM_INFO("\n");
354 DRM_INFO(" FIFO_STAT = 0x%08x\n",
355 MACH64_READ(MACH64_FIFO_STAT));
356 DRM_INFO("\n");
357 DRM_INFO(" GEN_TEST_CNTL = 0x%08x\n",
358 MACH64_READ(MACH64_GEN_TEST_CNTL));
359 /* DRM_INFO( " GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
360 DRM_INFO(" GUI_CMDFIFO_DATA = 0x%08x\n",
361 MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
362 DRM_INFO(" GUI_CMDFIFO_DEBUG = 0x%08x\n",
363 MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
364 DRM_INFO(" GUI_CNTL = 0x%08x\n",
365 MACH64_READ(MACH64_GUI_CNTL));
366 DRM_INFO(" GUI_STAT = 0x%08x\n",
367 MACH64_READ(MACH64_GUI_STAT));
368 DRM_INFO(" GUI_TRAJ_CNTL = 0x%08x\n",
369 MACH64_READ(MACH64_GUI_TRAJ_CNTL));
370 DRM_INFO("\n");
371 DRM_INFO(" HOST_CNTL = 0x%08x\n",
372 MACH64_READ(MACH64_HOST_CNTL));
373 DRM_INFO(" HW_DEBUG = 0x%08x\n",
374 MACH64_READ(MACH64_HW_DEBUG));
375 DRM_INFO("\n");
376 DRM_INFO(" MEM_ADDR_CONFIG = 0x%08x\n",
377 MACH64_READ(MACH64_MEM_ADDR_CONFIG));
378 DRM_INFO(" MEM_BUF_CNTL = 0x%08x\n",
379 MACH64_READ(MACH64_MEM_BUF_CNTL));
380 DRM_INFO("\n");
381 DRM_INFO(" PAT_REG0 = 0x%08x\n",
382 MACH64_READ(MACH64_PAT_REG0));
383 DRM_INFO(" PAT_REG1 = 0x%08x\n",
384 MACH64_READ(MACH64_PAT_REG1));
385 DRM_INFO("\n");
386 DRM_INFO(" SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
387 DRM_INFO(" SC_RIGHT = 0x%08x\n",
388 MACH64_READ(MACH64_SC_RIGHT));
389 DRM_INFO(" SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
390 DRM_INFO(" SC_BOTTOM = 0x%08x\n",
391 MACH64_READ(MACH64_SC_BOTTOM));
392 DRM_INFO("\n");
393 DRM_INFO(" SCALE_3D_CNTL = 0x%08x\n",
394 MACH64_READ(MACH64_SCALE_3D_CNTL));
395 DRM_INFO(" SCRATCH_REG0 = 0x%08x\n",
396 MACH64_READ(MACH64_SCRATCH_REG0));
397 DRM_INFO(" SCRATCH_REG1 = 0x%08x\n",
398 MACH64_READ(MACH64_SCRATCH_REG1));
399 DRM_INFO(" SETUP_CNTL = 0x%08x\n",
400 MACH64_READ(MACH64_SETUP_CNTL));
401 DRM_INFO(" SRC_CNTL = 0x%08x\n",
402 MACH64_READ(MACH64_SRC_CNTL));
403 DRM_INFO("\n");
404 DRM_INFO(" TEX_CNTL = 0x%08x\n",
405 MACH64_READ(MACH64_TEX_CNTL));
406 DRM_INFO(" TEX_SIZE_PITCH = 0x%08x\n",
407 MACH64_READ(MACH64_TEX_SIZE_PITCH));
408 DRM_INFO(" TIMER_CONFIG = 0x%08x\n",
409 MACH64_READ(MACH64_TIMER_CONFIG));
410 DRM_INFO("\n");
411 DRM_INFO(" Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
412 DRM_INFO(" Z_OFF_PITCH = 0x%08x\n",
413 MACH64_READ(MACH64_Z_OFF_PITCH));
414 DRM_INFO("\n");
415 }
416
417 #define MACH64_DUMP_CONTEXT 3
418
419 /**
420 * Used by mach64_dump_ring_info() to dump the contents of the current buffer
421 * pointed by the ring head.
422 */
423 static void mach64_dump_buf_info(drm_mach64_private_t * dev_priv,
424 drm_buf_t * buf)
425 {
426 u32 addr = GETBUFADDR(buf);
427 u32 used = buf->used >> 2;
428 u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR);
429 u32 *p = GETBUFPTR(buf);
430 int skipped = 0;
431
432 DRM_INFO("buffer contents:\n");
433
434 while (used) {
435 u32 reg, count;
436
437 reg = le32_to_cpu(*p++);
438 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
439 (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
440 addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
441 addr >=
442 GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
443 DRM_INFO("%08x: 0x%08x\n", addr, reg);
444 }
445 addr += 4;
446 used--;
447
448 count = (reg >> 16) + 1;
449 reg = reg & 0xffff;
450 reg = MMSELECT(reg);
451 while (count && used) {
452 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
453 (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
454 addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
455 addr >=
456 GETBUFADDR(buf) + buf->used -
457 MACH64_DUMP_CONTEXT * 4) {
458 DRM_INFO("%08x: 0x%04x = 0x%08x\n", addr,
459 reg, le32_to_cpu(*p));
460 skipped = 0;
461 } else {
462 if (!skipped) {
463 DRM_INFO(" ...\n");
464 skipped = 1;
465 }
466 }
467 p++;
468 addr += 4;
469 used--;
470
471 reg += 4;
472 count--;
473 }
474 }
475
476 DRM_INFO("\n");
477 }
478
479 /**
480 * Dump the ring state and contents, including the contents of the buffer being
481 * processed by the graphics engine.
482 */
483 void mach64_dump_ring_info(drm_mach64_private_t * dev_priv)
484 {
485 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
486 int i, skipped;
487
488 DRM_INFO("\n");
489
490 DRM_INFO("ring contents:\n");
491 DRM_INFO(" head_addr: 0x%08x head: %u tail: %u\n\n",
492 ring->head_addr, ring->head, ring->tail);
493
494 skipped = 0;
495 for (i = 0; i < ring->size / sizeof(u32); i += 4) {
496 if (i <= MACH64_DUMP_CONTEXT * 4 ||
497 i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 ||
498 (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 &&
499 i <= ring->tail + MACH64_DUMP_CONTEXT * 4) ||
500 (i >= ring->head - MACH64_DUMP_CONTEXT * 4 &&
501 i <= ring->head + MACH64_DUMP_CONTEXT * 4)) {
502 DRM_INFO(" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x%s%s\n",
503 (u32)(ring->start_addr + i * sizeof(u32)),
504 le32_to_cpu(((u32 *) ring->start)[i + 0]),
505 le32_to_cpu(((u32 *) ring->start)[i + 1]),
506 le32_to_cpu(((u32 *) ring->start)[i + 2]),
507 le32_to_cpu(((u32 *) ring->start)[i + 3]),
508 i == ring->head ? " (head)" : "",
509 i == ring->tail ? " (tail)" : "");
510 skipped = 0;
511 } else {
512 if (!skipped) {
513 DRM_INFO(" ...\n");
514 skipped = 1;
515 }
516 }
517 }
518
519 DRM_INFO("\n");
520
521 if (ring->head >= 0 && ring->head < ring->size / sizeof(u32)) {
522 struct list_head *ptr;
523 u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]);
524
525 list_for_each(ptr, &dev_priv->pending) {
526 drm_mach64_freelist_t *entry =
527 list_entry(ptr, drm_mach64_freelist_t, list);
528 drm_buf_t *buf = entry->buf;
529
530 u32 buf_addr = GETBUFADDR(buf);
531
532 if (buf_addr <= addr && addr < buf_addr + buf->used) {
533 mach64_dump_buf_info(dev_priv, buf);
534 }
535 }
536 }
537
538 DRM_INFO("\n");
539 DRM_INFO(" BM_GUI_TABLE = 0x%08x\n",
540 MACH64_READ(MACH64_BM_GUI_TABLE));
541 DRM_INFO("\n");
542 DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
543 MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
544 DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
545 MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
546 DRM_INFO(" BM_COMMAND = 0x%08x\n",
547 MACH64_READ(MACH64_BM_COMMAND));
548 DRM_INFO("\n");
549 DRM_INFO(" BM_STATUS = 0x%08x\n",
550 MACH64_READ(MACH64_BM_STATUS));
551 DRM_INFO(" BUS_CNTL = 0x%08x\n",
552 MACH64_READ(MACH64_BUS_CNTL));
553 DRM_INFO(" FIFO_STAT = 0x%08x\n",
554 MACH64_READ(MACH64_FIFO_STAT));
555 DRM_INFO(" GUI_STAT = 0x%08x\n",
556 MACH64_READ(MACH64_GUI_STAT));
557 DRM_INFO(" SRC_CNTL = 0x%08x\n",
558 MACH64_READ(MACH64_SRC_CNTL));
559 }
560
561 /*@}*/
562
563
564 /*******************************************************************/
565 /** \name DMA test and initialization */
566 /*@{*/
567
568 /**
569 * Perform a simple DMA operation using the pattern registers to test whether
570 * DMA works.
571 *
572 * \return zero if successful.
573 *
574 * \note This function was the testbed for many experiences regarding Mach64
575 * DMA operation. It is left here since it so tricky to get DMA operating
576 * properly in some architectures and hardware.
577 */
578 static int mach64_bm_dma_test(drm_device_t * dev)
579 {
580 drm_mach64_private_t *dev_priv = dev->dev_private;
581 drm_dma_handle_t *cpu_addr_dmah;
582 u32 data_addr;
583 u32 *table, *data;
584 u32 expected[2];
585 u32 src_cntl, pat_reg0, pat_reg1;
586 int i, count, failed;
587
588 DRM_DEBUG("%s\n", __FUNCTION__);
589
590 table = (u32 *) dev_priv->ring.start;
591
592 /* FIXME: get a dma buffer from the freelist here */
593 DRM_DEBUG("Allocating data memory ...\n");
594 cpu_addr_dmah =
595 drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
596 if (!cpu_addr_dmah) {
597 DRM_INFO("data-memory allocation failed!\n");
598 return DRM_ERR(ENOMEM);
599 } else {
600 data = (u32 *) cpu_addr_dmah->vaddr;
601 data_addr = (u32) cpu_addr_dmah->busaddr;
602 }
603
604 /* Save the X server's value for SRC_CNTL and restore it
605 * in case our test fails. This prevents the X server
606 * from disabling it's cache for this register
607 */
608 src_cntl = MACH64_READ(MACH64_SRC_CNTL);
609 pat_reg0 = MACH64_READ(MACH64_PAT_REG0);
610 pat_reg1 = MACH64_READ(MACH64_PAT_REG1);
611
612 mach64_do_wait_for_fifo(dev_priv, 3);
613
614 MACH64_WRITE(MACH64_SRC_CNTL, 0);
615 MACH64_WRITE(MACH64_PAT_REG0, 0x11111111);
616 MACH64_WRITE(MACH64_PAT_REG1, 0x11111111);
617
618 mach64_do_wait_for_idle(dev_priv);
619
620 for (i = 0; i < 2; i++) {
621 u32 reg;
622 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
623 DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg);
624 if (reg != 0x11111111) {
625 DRM_INFO("Error initializing test registers\n");
626 DRM_INFO("resetting engine ...\n");
627 mach64_do_engine_reset(dev_priv);
628 DRM_INFO("freeing data buffer memory.\n");
629 drm_pci_free(dev, cpu_addr_dmah);
630 return DRM_ERR(EIO);
631 }
632 }
633
634 /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */
635 count = 0;
636
637 data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
638 data[count++] = expected[0] = 0x22222222;
639 data[count++] = expected[1] = 0xaaaaaaaa;
640
641 while (count < 1020) {
642 data[count++] =
643 cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
644 data[count++] = 0x22222222;
645 data[count++] = 0xaaaaaaaa;
646 }
647 data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16));
648 data[count++] = 0;
649
650 DRM_DEBUG("Preparing table ...\n");
651 table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR +
652 MACH64_APERTURE_OFFSET);
653 table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr);
654 table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32)
655 | MACH64_DMA_HOLD_OFFSET
656 | MACH64_DMA_EOL);
657 table[MACH64_DMA_RESERVED] = 0;
658
659 DRM_DEBUG("table[0] = 0x%08x\n", table[0]);
660 DRM_DEBUG("table[1] = 0x%08x\n", table[1]);
661 DRM_DEBUG("table[2] = 0x%08x\n", table[2]);
662 DRM_DEBUG("table[3] = 0x%08x\n", table[3]);
663
664 for (i = 0; i < 6; i++) {
665 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
666 }
667 DRM_DEBUG(" ...\n");
668 for (i = count - 5; i < count; i++) {
669 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
670 }
671
672 DRM_MEMORYBARRIER();
673
674 DRM_DEBUG("waiting for idle...\n");
675 if ((i = mach64_do_wait_for_idle(dev_priv))) {
676 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
677 DRM_INFO("resetting engine ...\n");
678 mach64_do_engine_reset(dev_priv);
679 mach64_do_wait_for_fifo(dev_priv, 3);
680 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
681 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
682 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
683 DRM_INFO("freeing data buffer memory.\n");
684 drm_pci_free(dev, cpu_addr_dmah);
685 return i;
686 }
687 DRM_DEBUG("waiting for idle...done\n");
688
689 DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL));
690 DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL));
691 DRM_DEBUG("\n");
692 DRM_DEBUG("data bus addr = 0x%08x\n", data_addr);
693 DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr);
694
695 DRM_DEBUG("starting DMA transfer...\n");
696 MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
697 dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
698
699 MACH64_WRITE(MACH64_SRC_CNTL,
700 MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
701 MACH64_SRC_BM_OP_SYSTEM_TO_REG);
702
703 /* Kick off the transfer */
704 DRM_DEBUG("starting DMA transfer... done.\n");
705 MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
706
707 DRM_DEBUG("waiting for idle...\n");
708
709 if ((i = mach64_do_wait_for_idle(dev_priv))) {
710 /* engine locked up, dump register state and reset */
711 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
712 mach64_dump_engine_info(dev_priv);
713 DRM_INFO("resetting engine ...\n");
714 mach64_do_engine_reset(dev_priv);
715 mach64_do_wait_for_fifo(dev_priv, 3);
716 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
717 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
718 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
719 DRM_INFO("freeing data buffer memory.\n");
720 drm_pci_free(dev, cpu_addr_dmah);
721 return i;
722 }
723
724 DRM_DEBUG("waiting for idle...done\n");
725
726 /* restore SRC_CNTL */
727 mach64_do_wait_for_fifo(dev_priv, 1);
728 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
729
730 failed = 0;
731
732 /* Check register values to see if the GUI master operation succeeded */
733 for (i = 0; i < 2; i++) {
734 u32 reg;
735 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
736 DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg);
737 if (reg != expected[i]) {
738 failed = -1;
739 }
740 }
741
742 /* restore pattern registers */
743 mach64_do_wait_for_fifo(dev_priv, 2);
744 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
745 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
746
747 DRM_DEBUG("freeing data buffer memory.\n");
748 drm_pci_free(dev, cpu_addr_dmah);
749 DRM_DEBUG("returning ...\n");
750
751 return failed;
752 }
753
754 /**
755 * Called during the DMA initialization ioctl to initialize all the necessary
756 * software and hardware state for DMA operation.
757 */
758 static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
759 {
760 drm_mach64_private_t *dev_priv;
761 u32 tmp;
762 int i, ret;
763
764 DRM_DEBUG("%s\n", __FUNCTION__);
765
766 dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
767 if (dev_priv == NULL)
768 return DRM_ERR(ENOMEM);
769
770 memset(dev_priv, 0, sizeof(drm_mach64_private_t));
771
772 dev_priv->is_pci = init->is_pci;
773
774 dev_priv->fb_bpp = init->fb_bpp;
775 dev_priv->front_offset = init->front_offset;
776 dev_priv->front_pitch = init->front_pitch;
777 dev_priv->back_offset = init->back_offset;
778 dev_priv->back_pitch = init->back_pitch;
779
780 dev_priv->depth_bpp = init->depth_bpp;
781 dev_priv->depth_offset = init->depth_offset;
782 dev_priv->depth_pitch = init->depth_pitch;
783
784 dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) |
785 (dev_priv->front_offset >> 3));
786 dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) |
787 (dev_priv->back_offset >> 3));
788 dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) |
789 (dev_priv->depth_offset >> 3));
790
791 dev_priv->usec_timeout = 1000000;
792
793 /* Set up the freelist, placeholder list and pending list */
794 INIT_LIST_HEAD(&dev_priv->free_list);
795 INIT_LIST_HEAD(&dev_priv->placeholders);
796 INIT_LIST_HEAD(&dev_priv->pending);
797
798 DRM_GETSAREA();
799
800 if (!dev_priv->sarea) {
801 DRM_ERROR("can not find sarea!\n");
802 dev->dev_private = (void *)dev_priv;
803 mach64_do_cleanup_dma(dev);
804 return DRM_ERR(EINVAL);
805 }
806 dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
807 if (!dev_priv->fb) {
808 DRM_ERROR("can not find frame buffer map!\n");
809 dev->dev_private = (void *)dev_priv;
810 mach64_do_cleanup_dma(dev);
811 return DRM_ERR(EINVAL);
812 }
813 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
814 if (!dev_priv->mmio) {
815 DRM_ERROR("can not find mmio map!\n");
816 dev->dev_private = (void *)dev_priv;
817 mach64_do_cleanup_dma(dev);
818 return DRM_ERR(EINVAL);
819 }
820
821 dev_priv->sarea_priv = (drm_mach64_sarea_t *)
822 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
823
824 if (!dev_priv->is_pci) {
825 dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
826 if (!dev_priv->ring_map) {
827 DRM_ERROR("can not find ring map!\n");
828 dev->dev_private = (void *)dev_priv;
829 mach64_do_cleanup_dma(dev);
830 return DRM_ERR(EINVAL);
831 }
832 drm_core_ioremap(dev_priv->ring_map, dev);
833 if (!dev_priv->ring_map->handle) {
834 DRM_ERROR("can not ioremap virtual address for"
835 " descriptor ring\n");
836 dev->dev_private = (void *)dev_priv;
837 mach64_do_cleanup_dma(dev);
838 return DRM_ERR(ENOMEM);
839 }
840 dev->agp_buffer_map =
841 drm_core_findmap(dev, init->buffers_offset);
842 if (!dev->agp_buffer_map) {
843 DRM_ERROR("can not find dma buffer map!\n");
844 dev->dev_private = (void *)dev_priv;
845 mach64_do_cleanup_dma(dev);
846 return DRM_ERR(EINVAL);
847 }
848 /* there might be a nicer way to do this -
849 dev isn't passed all the way though the mach64 - DA */
850 dev_priv->dev_buffers = dev->agp_buffer_map;
851
852 drm_core_ioremap(dev->agp_buffer_map, dev);
853 if (!dev->agp_buffer_map->handle) {
854 DRM_ERROR("can not ioremap virtual address for"
855 " dma buffer\n");
856 dev->dev_private = (void *)dev_priv;
857 mach64_do_cleanup_dma(dev);
858 return DRM_ERR(ENOMEM);
859 }
860 dev_priv->agp_textures =
861 drm_core_findmap(dev, init->agp_textures_offset);
862 if (!dev_priv->agp_textures) {
863 DRM_ERROR("can not find agp texture region!\n");
864 dev->dev_private = (void *)dev_priv;
865 mach64_do_cleanup_dma(dev);
866 return DRM_ERR(EINVAL);
867 }
868 }
869
870 dev->dev_private = (void *)dev_priv;
871
872 dev_priv->driver_mode = init->dma_mode;
873
874 /* changing the FIFO size from the default causes problems with DMA */
875 tmp = MACH64_READ(MACH64_GUI_CNTL);
876 if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) {
877 DRM_INFO("Setting FIFO size to 128 entries\n");
878 /* FIFO must be empty to change the FIFO depth */
879 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
880 DRM_ERROR
881 ("wait for idle failed before changing FIFO depth!\n");
882 mach64_do_cleanup_dma(dev);
883 return ret;
884 }
885 MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
886 | MACH64_CMDFIFO_SIZE_128));
887 /* need to read GUI_STAT for proper sync according to docs */
888 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
889 DRM_ERROR
890 ("wait for idle failed when changing FIFO depth!\n");
891 mach64_do_cleanup_dma(dev);
892 return ret;
893 }
894 }
895
896 /* allocate descriptor memory from pci pool */
897 DRM_DEBUG("Allocating dma descriptor ring\n");
898 dev_priv->ring.size = 0x4000; /* 16KB */
899
900 if (dev_priv->is_pci) {
901 dev_priv->ring.dmah = drm_pci_alloc(dev, dev_priv->ring.size,
902 dev_priv->ring.size,
903 0xfffffffful);
904
905 if (!dev_priv->ring.dmah) {
906 DRM_ERROR("Allocating dma descriptor ring failed\n");
907 return DRM_ERR(ENOMEM);
908 } else {
909 dev_priv->ring.start = dev_priv->ring.dmah->vaddr;
910 dev_priv->ring.start_addr =
911 (u32) dev_priv->ring.dmah->busaddr;
912 }
913 } else {
914 dev_priv->ring.start = dev_priv->ring_map->handle;
915 dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
916 }
917
918 memset(dev_priv->ring.start, 0, dev_priv->ring.size);
919 DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
920 dev_priv->ring.start, dev_priv->ring.start_addr);
921
922 ret = 0;
923 if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
924
925 /* enable block 1 registers and bus mastering */
926 MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
927 | MACH64_BUS_EXT_REG_EN)
928 & ~MACH64_BUS_MASTER_DIS));
929
930 /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */
931 DRM_DEBUG("Starting DMA test...\n");
932 if ((ret = mach64_bm_dma_test(dev))) {
933 dev_priv->driver_mode = MACH64_MODE_MMIO;
934 }
935 }
936
937 switch (dev_priv->driver_mode) {
938 case MACH64_MODE_MMIO:
939 MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL)
940 | MACH64_BUS_EXT_REG_EN
941 | MACH64_BUS_MASTER_DIS));
942 if (init->dma_mode == MACH64_MODE_MMIO)
943 DRM_INFO("Forcing pseudo-DMA mode\n");
944 else
945 DRM_INFO
946 ("DMA test failed (ret=%d), using pseudo-DMA mode\n",
947 ret);
948 break;
949 case MACH64_MODE_DMA_SYNC:
950 DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
951 break;
952 case MACH64_MODE_DMA_ASYNC:
953 default:
954 DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n");
955 }
956
957 dev_priv->ring_running = 0;
958
959 /* setup offsets for physical address of table start and end */
960 dev_priv->ring.head_addr = dev_priv->ring.start_addr;
961 dev_priv->ring.head = dev_priv->ring.tail = 0;
962 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
963 dev_priv->ring.space = dev_priv->ring.size;
964
965 /* setup physical address and size of descriptor table */
966 mach64_do_wait_for_fifo(dev_priv, 1);
967 MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
968 (dev_priv->ring.
969 head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
970
971 /* init frame counter */
972 dev_priv->sarea_priv->frames_queued = 0;
973 for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) {
974 dev_priv->frame_ofs[i] = ~0; /* All ones indicates placeholder */
975 }
976
977 /* Allocate the DMA buffer freelist */
978 if ((ret = mach64_init_freelist(dev))) {
979 DRM_ERROR("Freelist allocation failed\n");
980 mach64_do_cleanup_dma(dev);
981 return ret;
982 }
983
984 return 0;
985 }
986
987 /*******************************************************************/
988 /** MMIO Pseudo-DMA (intended primarily for debugging, not performance)
989 */
990
991 int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv)
992 {
993 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
994 volatile u32 *ring_read;
995 struct list_head *ptr;
996 drm_mach64_freelist_t *entry;
997 drm_buf_t *buf = NULL;
998 u32 *buf_ptr;
999 u32 used, reg, target;
1000 int fifo, count, found, ret, no_idle_wait;
1001
1002 fifo = count = reg = no_idle_wait = 0;
1003 target = MACH64_BM_ADDR;
1004
1005 if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1006 DRM_INFO
1007 ("%s: idle failed before pseudo-dma dispatch, resetting engine\n",
1008 __FUNCTION__);
1009 mach64_dump_engine_info(dev_priv);
1010 mach64_do_engine_reset(dev_priv);
1011 return ret;
1012 }
1013
1014 ring_read = (u32 *) ring->start;
1015
1016 while (ring->tail != ring->head) {
1017 u32 buf_addr, new_target, offset;
1018 u32 bytes, remaining, head, eol;
1019
1020 head = ring->head;
1021
1022 new_target =
1023 le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
1024 buf_addr = le32_to_cpu(ring_read[head++]);
1025 eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
1026 bytes = le32_to_cpu(ring_read[head++])
1027 & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL);
1028 head++;
1029 head &= ring->tail_mask;
1030
1031 /* can't wait for idle between a blit setup descriptor
1032 * and a HOSTDATA descriptor or the engine will lock
1033 */
1034 if (new_target == MACH64_BM_HOSTDATA
1035 && target == MACH64_BM_ADDR)
1036 no_idle_wait = 1;
1037
1038 target = new_target;
1039
1040 found = 0;
1041 offset = 0;
1042 list_for_each(ptr, &dev_priv->pending) {
1043 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1044 buf = entry->buf;
1045 offset = buf_addr - GETBUFADDR(buf);
1046 if (offset >= 0 && offset < MACH64_BUFFER_SIZE) {
1047 found = 1;
1048 break;
1049 }
1050 }
1051
1052 if (!found || buf == NULL) {
1053 DRM_ERROR
1054 ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
1055 head, ring->tail, buf_addr, (eol ? "eol" : ""));
1056 mach64_dump_ring_info(dev_priv);
1057 mach64_do_engine_reset(dev_priv);
1058 return DRM_ERR(EINVAL);
1059 }
1060
1061 /* Hand feed the buffer to the card via MMIO, waiting for the fifo
1062 * every 16 writes
1063 */
1064 DRM_DEBUG("target: (0x%08x) %s\n", target,
1065 (target ==
1066 MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
1067 DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
1068 buf->used);
1069
1070 remaining = (buf->used - offset) >> 2; /* dwords remaining in buffer */
1071 used = bytes >> 2; /* dwords in buffer for this descriptor */
1072 buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset);
1073
1074 while (used) {
1075
1076 if (count == 0) {
1077 if (target == MACH64_BM_HOSTDATA) {
1078 reg = DMAREG(MACH64_HOST_DATA0);
1079 count =
1080 (remaining > 16) ? 16 : remaining;
1081 fifo = 0;
1082 } else {
1083 reg = le32_to_cpu(*buf_ptr++);
1084 used--;
1085 count = (reg >> 16) + 1;
1086 }
1087
1088 reg = reg & 0xffff;
1089 reg = MMSELECT(reg);
1090 }
1091 while (count && used) {
1092 if (!fifo) {
1093 if (no_idle_wait) {
1094 if ((ret =
1095 mach64_do_wait_for_fifo
1096 (dev_priv, 16)) < 0) {
1097 no_idle_wait = 0;
1098 return ret;
1099 }
1100 } else {
1101 if ((ret =
1102 mach64_do_wait_for_idle
1103 (dev_priv)) < 0) {
1104 return ret;
1105 }
1106 }
1107 fifo = 16;
1108 }
1109 --fifo;
1110 MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
1111 used--;
1112 remaining--;
1113
1114 reg += 4;
1115 count--;
1116 }
1117 }
1118 ring->head = head;
1119 ring->head_addr = ring->start_addr + (ring->head * sizeof(u32));
1120 ring->space += (4 * sizeof(u32));
1121 }
1122
1123 if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1124 return ret;
1125 }
1126 MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1127 ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
1128
1129 DRM_DEBUG("%s completed\n", __FUNCTION__);
1130 return 0;
1131 }
1132
1133 /*@}*/
1134
1135
1136 /*******************************************************************/
1137 /** \name DMA cleanup */
1138 /*@{*/
1139
1140 int mach64_do_cleanup_dma(drm_device_t * dev)
1141 {
1142 DRM_DEBUG("%s\n", __FUNCTION__);
1143
1144 /* Make sure interrupts are disabled here because the uninstall ioctl
1145 * may not have been called from userspace and after dev_private
1146 * is freed, it's too late.
1147 */
1148 if (dev->irq)
1149 drm_irq_uninstall(dev);
1150
1151 if (dev->dev_private) {
1152 drm_mach64_private_t *dev_priv = dev->dev_private;
1153
1154 if (dev_priv->is_pci) {
1155 if (dev_priv->ring.dmah) {
1156 drm_pci_free(dev, dev_priv->ring.dmah);
1157 }
1158 } else {
1159 if (dev_priv->ring_map)
1160 drm_core_ioremapfree(dev_priv->ring_map, dev);
1161 }
1162
1163 if (dev->agp_buffer_map) {
1164 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1165 dev->agp_buffer_map = NULL;
1166 }
1167
1168 mach64_destroy_freelist(dev);
1169
1170 drm_free(dev_priv, sizeof(drm_mach64_private_t),
1171 DRM_MEM_DRIVER);
1172 dev->dev_private = NULL;
1173 }
1174
1175 return 0;
1176 }
1177
1178 /*@}*/
1179
1180
1181 /*******************************************************************/
1182 /** \name IOCTL handlers */
1183 /*@{*/
1184
1185 int mach64_dma_init(DRM_IOCTL_ARGS)
1186 {
1187 DRM_DEVICE;
1188 drm_mach64_init_t init;
1189
1190 DRM_DEBUG("%s\n", __FUNCTION__);
1191
1192 LOCK_TEST_WITH_RETURN(dev, filp);
1193
1194 DRM_COPY_FROM_USER_IOCTL(init, (drm_mach64_init_t *) data,
1195 sizeof(init));
1196
1197 switch (init.func) {
1198 case DRM_MACH64_INIT_DMA:
1199 return mach64_do_dma_init(dev, &init);
1200 case DRM_MACH64_CLEANUP_DMA:
1201 return mach64_do_cleanup_dma(dev);
1202 }
1203
1204 return DRM_ERR(EINVAL);
1205 }
1206
1207 int mach64_dma_idle(DRM_IOCTL_ARGS)
1208 {
1209 DRM_DEVICE;
1210 drm_mach64_private_t *dev_priv = dev->dev_private;
1211
1212 DRM_DEBUG("%s\n", __FUNCTION__);
1213
1214 LOCK_TEST_WITH_RETURN(dev, filp);
1215
1216 return mach64_do_dma_idle(dev_priv);
1217 }
1218
1219 int mach64_dma_flush(DRM_IOCTL_ARGS)
1220 {
1221 DRM_DEVICE;
1222 drm_mach64_private_t *dev_priv = dev->dev_private;
1223
1224 DRM_DEBUG("%s\n", __FUNCTION__);
1225
1226 LOCK_TEST_WITH_RETURN(dev, filp);
1227
1228 return mach64_do_dma_flush(dev_priv);
1229 }
1230
1231 int mach64_engine_reset(DRM_IOCTL_ARGS)
1232 {
1233 DRM_DEVICE;
1234 drm_mach64_private_t *dev_priv = dev->dev_private;
1235
1236 DRM_DEBUG("%s\n", __FUNCTION__);
1237
1238 LOCK_TEST_WITH_RETURN(dev, filp);
1239
1240 return mach64_do_engine_reset(dev_priv);
1241 }
1242
1243 /*@}*/
1244
1245
1246 /*******************************************************************/
1247 /** \name Freelist management */
1248 /*@{*/
1249
1250 int mach64_init_freelist(drm_device_t * dev)
1251 {
1252 drm_device_dma_t *dma = dev->dma;
1253 drm_mach64_private_t *dev_priv = dev->dev_private;
1254 drm_mach64_freelist_t *entry;
1255 struct list_head *ptr;
1256 int i;
1257
1258 DRM_DEBUG("%s: adding %d buffers to freelist\n", __FUNCTION__,
1259 dma->buf_count);
1260
1261 for (i = 0; i < dma->buf_count; i++) {
1262 if ((entry =
1263 (drm_mach64_freelist_t *)
1264 drm_alloc(sizeof(drm_mach64_freelist_t),
1265 DRM_MEM_BUFLISTS)) == NULL)
1266 return DRM_ERR(ENOMEM);
1267 memset(entry, 0, sizeof(drm_mach64_freelist_t));
1268 entry->buf = dma->buflist[i];
1269 ptr = &entry->list;
1270 list_add_tail(ptr, &dev_priv->free_list);
1271 }
1272
1273 return 0;
1274 }
1275
1276 void mach64_destroy_freelist(drm_device_t * dev)
1277 {
1278 drm_mach64_private_t *dev_priv = dev->dev_private;
1279 drm_mach64_freelist_t *entry;
1280 struct list_head *ptr;
1281 struct list_head *tmp;
1282
1283 DRM_DEBUG("%s\n", __FUNCTION__);
1284
1285 list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1286 list_del(ptr);
1287 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1288 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1289 }
1290 list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
1291 list_del(ptr);
1292 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1293 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1294 }
1295
1296 list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
1297 list_del(ptr);
1298 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1299 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1300 }
1301 }
1302
1303 /* IMPORTANT: This function should only be called when the engine is idle or locked up,
1304 * as it assumes all buffers in the pending list have been completed by the hardware.
1305 */
1306 int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv)
1307 {
1308 struct list_head *ptr;
1309 struct list_head *tmp;
1310 drm_mach64_freelist_t *entry;
1311 int i;
1312
1313 if (list_empty(&dev_priv->pending))
1314 return 0;
1315
1316 /* Iterate the pending list and move all buffers into the freelist... */
1317 i = 0;
1318 list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1319 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1320 if (entry->discard) {
1321 entry->buf->pending = 0;
1322 list_del(ptr);
1323 list_add_tail(ptr, &dev_priv->free_list);
1324 i++;
1325 }
1326 }
1327
1328 DRM_DEBUG("%s: released %d buffers from pending list\n", __FUNCTION__,
1329 i);
1330
1331 return 0;
1332 }
1333
1334 drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv)
1335 {
1336 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1337 drm_mach64_freelist_t *entry;
1338 struct list_head *ptr;
1339 struct list_head *tmp;
1340 int t;
1341
1342 if (list_empty(&dev_priv->free_list)) {
1343 u32 head, tail, ofs;
1344
1345 if (list_empty(&dev_priv->pending)) {
1346 DRM_ERROR
1347 ("Couldn't get buffer - pending and free lists empty\n");
1348 t = 0;
1349 list_for_each(ptr, &dev_priv->placeholders) {
1350 t++;
1351 }
1352 DRM_INFO("Placeholders: %d\n", t);
1353 return NULL;
1354 }
1355
1356 tail = ring->tail;
1357 for (t = 0; t < dev_priv->usec_timeout; t++) {
1358 mach64_ring_tick(dev_priv, ring);
1359 head = ring->head;
1360
1361 if (head == tail) {
1362 #if MACH64_EXTRA_CHECKING
1363 if (MACH64_READ(MACH64_GUI_STAT) &
1364 MACH64_GUI_ACTIVE) {
1365 DRM_ERROR
1366 ("Empty ring with non-idle engine!\n");
1367 mach64_dump_ring_info(dev_priv);
1368 return NULL;
1369 }
1370 #endif
1371 /* last pass is complete, so release everything */
1372 mach64_do_release_used_buffers(dev_priv);
1373 DRM_DEBUG
1374 ("%s: idle engine, freed all buffers.\n",
1375 __FUNCTION__);
1376 if (list_empty(&dev_priv->free_list)) {
1377 DRM_ERROR
1378 ("Freelist empty with idle engine\n");
1379 return NULL;
1380 }
1381 goto _freelist_entry_found;
1382 }
1383 /* Look for a completed buffer and bail out of the loop
1384 * as soon as we find one -- don't waste time trying
1385 * to free extra bufs here, leave that to do_release_used_buffers
1386 */
1387 list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1388 entry =
1389 list_entry(ptr, drm_mach64_freelist_t,
1390 list);
1391 ofs = entry->ring_ofs;
1392 if (entry->discard &&
1393 ((head < tail
1394 && (ofs < head || ofs >= tail))
1395 || (head > tail
1396 && (ofs < head && ofs >= tail)))) {
1397 #if MACH64_EXTRA_CHECKING
1398 int i;
1399
1400 for (i = head; i != tail;
1401 i = (i + 4) & ring->tail_mask) {
1402 u32 o1 =
1403 le32_to_cpu(((u32 *) ring->
1404 start)[i + 1]);
1405 u32 o2 = GETBUFADDR(entry->buf);
1406
1407 if (o1 == o2) {
1408 DRM_ERROR
1409 ("Attempting to free used buffer: "
1410 "i=%d buf=0x%08x\n",
1411 i, o1);
1412 mach64_dump_ring_info
1413 (dev_priv);
1414 return NULL;
1415 }
1416 }
1417 #endif
1418 /* found a processed buffer */
1419 entry->buf->pending = 0;
1420 list_del(ptr);
1421 entry->buf->used = 0;
1422 list_add_tail(ptr,
1423 &dev_priv->placeholders);
1424 DRM_DEBUG
1425 ("%s: freed processed buffer (head=%d tail=%d "
1426 "buf ring ofs=%d).\n",
1427 __FUNCTION__, head, tail, ofs);
1428 return entry->buf;
1429 }
1430 }
1431 DRM_UDELAY(1);
1432 }
1433 mach64_dump_ring_info(dev_priv);
1434 DRM_ERROR
1435 ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
1436 ring->head_addr, ring->head, ring->tail);
1437 return NULL;
1438 }
1439
1440 _freelist_entry_found:
1441 ptr = dev_priv->free_list.next;
1442 list_del(ptr);
1443 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1444 entry->buf->used = 0;
1445 list_add_tail(ptr, &dev_priv->placeholders);
1446 return entry->buf;
1447 }
1448
1449 /*@}*/
1450
1451
1452 /*******************************************************************/
1453 /** \name DMA buffer request and submission IOCTL handler */
1454 /*@{*/
1455
1456 static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev,
1457 drm_dma_t * d)
1458 {
1459 int i;
1460 drm_buf_t *buf;
1461 drm_mach64_private_t *dev_priv = dev->dev_private;
1462
1463 for (i = d->granted_count; i < d->request_count; i++) {
1464 buf = mach64_freelist_get(dev_priv);
1465 #if MACH64_EXTRA_CHECKING
1466 if (!buf)
1467 return DRM_ERR(EFAULT);
1468 #else
1469 if (!buf)
1470 return DRM_ERR(EAGAIN);
1471 #endif
1472
1473 buf->filp = filp;
1474
1475 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1476 sizeof(buf->idx)))
1477 return DRM_ERR(EFAULT);
1478 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1479 sizeof(buf->total)))
1480 return DRM_ERR(EFAULT);
1481
1482 d->granted_count++;
1483 }
1484 return 0;
1485 }
1486
1487 int mach64_dma_buffers(DRM_IOCTL_ARGS)
1488 {
1489 DRM_DEVICE;
1490 drm_device_dma_t *dma = dev->dma;
1491 drm_dma_t d;
1492 int ret = 0;
1493
1494 LOCK_TEST_WITH_RETURN(dev, filp);
1495
1496 DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t *) data, sizeof(d));
1497
1498 /* Please don't send us buffers.
1499 */
1500 if (d.send_count != 0) {
1501 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1502 DRM_CURRENTPID, d.send_count);
1503 return DRM_ERR(EINVAL);
1504 }
1505
1506 /* We'll send you buffers.
1507 */
1508 if (d.request_count < 0 || d.request_count > dma->buf_count) {
1509 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1510 DRM_CURRENTPID, d.request_count, dma->buf_count);
1511 ret = DRM_ERR(EINVAL);
1512 }
1513
1514 d.granted_count = 0;
1515
1516 if (d.request_count) {
1517 ret = mach64_dma_get_buffers(filp, dev, &d);
1518 }
1519
1520 DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d));
1521
1522 return ret;
1523 }
1524
1525 void mach64_driver_lastclose(drm_device_t * dev)
1526 {
1527 mach64_do_cleanup_dma(dev);
1528 }
1529
1530 /*@}*/
Cache object: 2a41035fdc4b4468b67b65a504826e4a
|