1 /*-
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/9.0/sys/dev/drm/radeon_cs.c 215367 2010-11-16 03:43:06Z nwhitehorn $");
30 #include "dev/drm/drmP.h"
31 #include "dev/drm/radeon_drm.h"
32 #include "dev/drm/radeon_drv.h"
33
34 /* regs */
35 #define AVIVO_D1MODE_VLINE_START_END 0x6538
36 #define AVIVO_D2MODE_VLINE_START_END 0x6d38
37 #define R600_CP_COHER_BASE 0x85f8
38 #define R600_DB_DEPTH_BASE 0x2800c
39 #define R600_CB_COLOR0_BASE 0x28040
40 #define R600_CB_COLOR1_BASE 0x28044
41 #define R600_CB_COLOR2_BASE 0x28048
42 #define R600_CB_COLOR3_BASE 0x2804c
43 #define R600_CB_COLOR4_BASE 0x28050
44 #define R600_CB_COLOR5_BASE 0x28054
45 #define R600_CB_COLOR6_BASE 0x28058
46 #define R600_CB_COLOR7_BASE 0x2805c
47 #define R600_SQ_PGM_START_FS 0x28894
48 #define R600_SQ_PGM_START_ES 0x28880
49 #define R600_SQ_PGM_START_VS 0x28858
50 #define R600_SQ_PGM_START_GS 0x2886c
51 #define R600_SQ_PGM_START_PS 0x28840
52 #define R600_VGT_DMA_BASE 0x287e8
53 #define R600_VGT_DMA_BASE_HI 0x287e4
54 #define R600_VGT_STRMOUT_BASE_OFFSET_0 0x28b10
55 #define R600_VGT_STRMOUT_BASE_OFFSET_1 0x28b14
56 #define R600_VGT_STRMOUT_BASE_OFFSET_2 0x28b18
57 #define R600_VGT_STRMOUT_BASE_OFFSET_3 0x28b1c
58 #define R600_VGT_STRMOUT_BASE_OFFSET_HI_0 0x28b44
59 #define R600_VGT_STRMOUT_BASE_OFFSET_HI_1 0x28b48
60 #define R600_VGT_STRMOUT_BASE_OFFSET_HI_2 0x28b4c
61 #define R600_VGT_STRMOUT_BASE_OFFSET_HI_3 0x28b50
62 #define R600_VGT_STRMOUT_BUFFER_BASE_0 0x28ad8
63 #define R600_VGT_STRMOUT_BUFFER_BASE_1 0x28ae8
64 #define R600_VGT_STRMOUT_BUFFER_BASE_2 0x28af8
65 #define R600_VGT_STRMOUT_BUFFER_BASE_3 0x28b08
66 #define R600_VGT_STRMOUT_BUFFER_OFFSET_0 0x28adc
67 #define R600_VGT_STRMOUT_BUFFER_OFFSET_1 0x28aec
68 #define R600_VGT_STRMOUT_BUFFER_OFFSET_2 0x28afc
69 #define R600_VGT_STRMOUT_BUFFER_OFFSET_3 0x28b0c
70
71 /* resource type */
72 #define R600_SQ_TEX_VTX_INVALID_TEXTURE 0x0
73 #define R600_SQ_TEX_VTX_INVALID_BUFFER 0x1
74 #define R600_SQ_TEX_VTX_VALID_TEXTURE 0x2
75 #define R600_SQ_TEX_VTX_VALID_BUFFER 0x3
76
77 /* packet 3 type offsets */
78 #define R600_SET_CONFIG_REG_OFFSET 0x00008000
79 #define R600_SET_CONFIG_REG_END 0x0000ac00
80 #define R600_SET_CONTEXT_REG_OFFSET 0x00028000
81 #define R600_SET_CONTEXT_REG_END 0x00029000
82 #define R600_SET_ALU_CONST_OFFSET 0x00030000
83 #define R600_SET_ALU_CONST_END 0x00032000
84 #define R600_SET_RESOURCE_OFFSET 0x00038000
85 #define R600_SET_RESOURCE_END 0x0003c000
86 #define R600_SET_SAMPLER_OFFSET 0x0003c000
87 #define R600_SET_SAMPLER_END 0x0003cff0
88 #define R600_SET_CTL_CONST_OFFSET 0x0003cff0
89 #define R600_SET_CTL_CONST_END 0x0003e200
90 #define R600_SET_LOOP_CONST_OFFSET 0x0003e200
91 #define R600_SET_LOOP_CONST_END 0x0003e380
92 #define R600_SET_BOOL_CONST_OFFSET 0x0003e380
93 #define R600_SET_BOOL_CONST_END 0x00040000
94
95 /* Packet 3 types */
96 #define R600_IT_INDIRECT_BUFFER_END 0x00001700
97 #define R600_IT_SET_PREDICATION 0x00002000
98 #define R600_IT_REG_RMW 0x00002100
99 #define R600_IT_COND_EXEC 0x00002200
100 #define R600_IT_PRED_EXEC 0x00002300
101 #define R600_IT_START_3D_CMDBUF 0x00002400
102 #define R600_IT_DRAW_INDEX_2 0x00002700
103 #define R600_IT_CONTEXT_CONTROL 0x00002800
104 #define R600_IT_DRAW_INDEX_IMMD_BE 0x00002900
105 #define R600_IT_INDEX_TYPE 0x00002A00
106 #define R600_IT_DRAW_INDEX 0x00002B00
107 #define R600_IT_DRAW_INDEX_AUTO 0x00002D00
108 #define R600_IT_DRAW_INDEX_IMMD 0x00002E00
109 #define R600_IT_NUM_INSTANCES 0x00002F00
110 #define R600_IT_STRMOUT_BUFFER_UPDATE 0x00003400
111 #define R600_IT_INDIRECT_BUFFER_MP 0x00003800
112 #define R600_IT_MEM_SEMAPHORE 0x00003900
113 #define R600_IT_MPEG_INDEX 0x00003A00
114 #define R600_IT_WAIT_REG_MEM 0x00003C00
115 #define R600_IT_MEM_WRITE 0x00003D00
116 #define R600_IT_INDIRECT_BUFFER 0x00003200
117 #define R600_IT_CP_INTERRUPT 0x00004000
118 #define R600_IT_SURFACE_SYNC 0x00004300
119 #define R600_IT_ME_INITIALIZE 0x00004400
120 #define R600_IT_COND_WRITE 0x00004500
121 #define R600_IT_EVENT_WRITE 0x00004600
122 #define R600_IT_EVENT_WRITE_EOP 0x00004700
123 #define R600_IT_ONE_REG_WRITE 0x00005700
124 #define R600_IT_SET_CONFIG_REG 0x00006800
125 #define R600_IT_SET_CONTEXT_REG 0x00006900
126 #define R600_IT_SET_ALU_CONST 0x00006A00
127 #define R600_IT_SET_BOOL_CONST 0x00006B00
128 #define R600_IT_SET_LOOP_CONST 0x00006C00
129 #define R600_IT_SET_RESOURCE 0x00006D00
130 #define R600_IT_SET_SAMPLER 0x00006E00
131 #define R600_IT_SET_CTL_CONST 0x00006F00
132 #define R600_IT_SURFACE_BASE_UPDATE 0x00007300
133
134 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
135 {
136 struct drm_radeon_cs_parser parser;
137 struct drm_radeon_private *dev_priv = dev->dev_private;
138 struct drm_radeon_cs *cs = data;
139 uint32_t cs_id;
140 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
141 uint64_t *chunk_array;
142 uint64_t *chunk_array_ptr;
143 long size;
144 int r, i;
145
146 mtx_lock(&dev_priv->cs.cs_mutex);
147 /* set command stream id to 0 which is fake id */
148 cs_id = 0;
149 cs->cs_id = cs_id;
150
151 if (dev_priv == NULL) {
152 DRM_ERROR("called with no initialization\n");
153 mtx_unlock(&dev_priv->cs.cs_mutex);
154 return -EINVAL;
155 }
156 if (!cs->num_chunks) {
157 mtx_unlock(&dev_priv->cs.cs_mutex);
158 return 0;
159 }
160
161
162 chunk_array = drm_calloc(cs->num_chunks, sizeof(uint64_t), DRM_MEM_DRIVER);
163 if (!chunk_array) {
164 mtx_unlock(&dev_priv->cs.cs_mutex);
165 return -ENOMEM;
166 }
167
168 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
169
170 if (DRM_COPY_FROM_USER(chunk_array, chunk_array_ptr, sizeof(uint64_t)*cs->num_chunks)) {
171 r = -EFAULT;
172 goto out;
173 }
174
175 parser.dev = dev;
176 parser.file_priv = fpriv;
177 parser.reloc_index = -1;
178 parser.ib_index = -1;
179 parser.num_chunks = cs->num_chunks;
180 /* copy out the chunk headers */
181 parser.chunks = drm_calloc(parser.num_chunks, sizeof(struct drm_radeon_kernel_chunk), DRM_MEM_DRIVER);
182 if (!parser.chunks) {
183 r = -ENOMEM;
184 goto out;
185 }
186
187 for (i = 0; i < parser.num_chunks; i++) {
188 struct drm_radeon_cs_chunk user_chunk;
189
190 chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
191
192 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr, sizeof(struct drm_radeon_cs_chunk))){
193 r = -EFAULT;
194 goto out;
195 }
196 parser.chunks[i].chunk_id = user_chunk.chunk_id;
197
198 if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS)
199 parser.reloc_index = i;
200
201 if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_IB)
202 parser.ib_index = i;
203
204 if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_OLD) {
205 parser.ib_index = i;
206 parser.reloc_index = -1;
207 }
208
209 parser.chunks[i].length_dw = user_chunk.length_dw;
210 parser.chunks[i].chunk_data = (uint32_t *)(unsigned long)user_chunk.chunk_data;
211
212 parser.chunks[i].kdata = NULL;
213 size = parser.chunks[i].length_dw * sizeof(uint32_t);
214
215 switch(parser.chunks[i].chunk_id) {
216 case RADEON_CHUNK_ID_IB:
217 case RADEON_CHUNK_ID_OLD:
218 if (size == 0) {
219 r = -EINVAL;
220 goto out;
221 }
222 case RADEON_CHUNK_ID_RELOCS:
223 if (size) {
224 parser.chunks[i].kdata = drm_alloc(size, DRM_MEM_DRIVER);
225 if (!parser.chunks[i].kdata) {
226 r = -ENOMEM;
227 goto out;
228 }
229
230 if (DRM_COPY_FROM_USER(parser.chunks[i].kdata, parser.chunks[i].chunk_data, size)) {
231 r = -EFAULT;
232 goto out;
233 }
234 } else
235 parser.chunks[i].kdata = NULL;
236 break;
237 default:
238 break;
239 }
240 DRM_DEBUG("chunk %d %d %d %p\n", i, parser.chunks[i].chunk_id, parser.chunks[i].length_dw,
241 parser.chunks[i].chunk_data);
242 }
243
244 if (parser.chunks[parser.ib_index].length_dw > (16 * 1024)) {
245 DRM_ERROR("cs->dwords too big: %d\n", parser.chunks[parser.ib_index].length_dw);
246 r = -EINVAL;
247 goto out;
248 }
249
250 /* get ib */
251 r = dev_priv->cs.ib_get(&parser);
252 if (r) {
253 DRM_ERROR("ib_get failed\n");
254 goto out;
255 }
256
257 /* now parse command stream */
258 r = dev_priv->cs.parse(&parser);
259 if (r) {
260 goto out;
261 }
262
263 out:
264 dev_priv->cs.ib_free(&parser, r);
265
266 /* emit cs id sequence */
267 dev_priv->cs.id_emit(&parser, &cs_id);
268
269 cs->cs_id = cs_id;
270
271 mtx_unlock(&dev_priv->cs.cs_mutex);
272
273 for (i = 0; i < parser.num_chunks; i++) {
274 if (parser.chunks[i].kdata)
275 drm_free(parser.chunks[i].kdata, parser.chunks[i].length_dw * sizeof(uint32_t), DRM_MEM_DRIVER);
276 }
277
278 drm_free(parser.chunks, sizeof(struct drm_radeon_kernel_chunk)*parser.num_chunks, DRM_MEM_DRIVER);
279 drm_free(chunk_array, sizeof(uint64_t)*parser.num_chunks, DRM_MEM_DRIVER);
280
281 return r;
282 }
283
284 /* for non-mm */
285 static int r600_nomm_relocate(struct drm_radeon_cs_parser *parser, uint32_t *reloc, uint64_t *offset)
286 {
287 struct drm_device *dev = parser->dev;
288 drm_radeon_private_t *dev_priv = dev->dev_private;
289 struct drm_radeon_kernel_chunk *reloc_chunk = &parser->chunks[parser->reloc_index];
290 uint32_t offset_dw = reloc[1];
291
292 //DRM_INFO("reloc: 0x%08x 0x%08x\n", reloc[0], reloc[1]);
293 //DRM_INFO("length: %d\n", reloc_chunk->length_dw);
294
295 if (!reloc_chunk->kdata)
296 return -EINVAL;
297
298 if (offset_dw > reloc_chunk->length_dw) {
299 DRM_ERROR("Offset larger than chunk 0x%x %d\n", offset_dw, reloc_chunk->length_dw);
300 return -EINVAL;
301 }
302
303 /* 40 bit addr */
304 *offset = reloc_chunk->kdata[offset_dw + 3];
305 *offset <<= 32;
306 *offset |= reloc_chunk->kdata[offset_dw + 0];
307
308 //DRM_INFO("offset 0x%lx\n", *offset);
309
310 if (!radeon_check_offset(dev_priv, *offset)) {
311 DRM_ERROR("bad offset! 0x%lx\n", (unsigned long)*offset);
312 return -EINVAL;
313 }
314
315 return 0;
316 }
317
318 static inline int r600_cs_packet0(struct drm_radeon_cs_parser *parser, uint32_t *offset_dw_p)
319 {
320 uint32_t hdr, num_dw, reg;
321 int count_dw = 1;
322 int ret = 0;
323 uint32_t offset_dw = *offset_dw_p;
324 int incr = 2;
325
326 hdr = parser->chunks[parser->ib_index].kdata[offset_dw];
327 num_dw = ((hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16) + 2;
328 reg = (hdr & 0xffff) << 2;
329
330 while (count_dw < num_dw) {
331 switch (reg) {
332 case AVIVO_D1MODE_VLINE_START_END:
333 case AVIVO_D2MODE_VLINE_START_END:
334 break;
335 default:
336 ret = -EINVAL;
337 DRM_ERROR("bad packet 0 reg: 0x%08x\n", reg);
338 break;
339 }
340 if (ret)
341 break;
342 count_dw++;
343 reg += 4;
344 }
345 *offset_dw_p += incr;
346 return ret;
347 }
348
349 static inline int r600_cs_packet3(struct drm_radeon_cs_parser *parser, uint32_t *offset_dw_p)
350 {
351 struct drm_device *dev = parser->dev;
352 drm_radeon_private_t *dev_priv = dev->dev_private;
353 uint32_t hdr, num_dw, start_reg, end_reg, reg;
354 uint32_t *reloc;
355 uint64_t offset;
356 int ret = 0;
357 uint32_t offset_dw = *offset_dw_p;
358 int incr = 2;
359 int i;
360 struct drm_radeon_kernel_chunk *ib_chunk;
361
362 ib_chunk = &parser->chunks[parser->ib_index];
363
364 hdr = ib_chunk->kdata[offset_dw];
365 num_dw = ((hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16) + 2;
366
367 /* just the ones we use for now, add more later */
368 switch (hdr & 0xff00) {
369 case R600_IT_START_3D_CMDBUF:
370 //DRM_INFO("R600_IT_START_3D_CMDBUF\n");
371 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
372 ret = -EINVAL;
373 if (num_dw != 2)
374 ret = -EINVAL;
375 if (ret)
376 DRM_ERROR("bad START_3D\n");
377 break;
378 case R600_IT_CONTEXT_CONTROL:
379 //DRM_INFO("R600_IT_CONTEXT_CONTROL\n");
380 if (num_dw != 3)
381 ret = -EINVAL;
382 if (ret)
383 DRM_ERROR("bad CONTEXT_CONTROL\n");
384 break;
385 case R600_IT_INDEX_TYPE:
386 case R600_IT_NUM_INSTANCES:
387 //DRM_INFO("R600_IT_INDEX_TYPE/R600_IT_NUM_INSTANCES\n");
388 if (num_dw != 2)
389 ret = -EINVAL;
390 if (ret)
391 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
392 break;
393 case R600_IT_DRAW_INDEX:
394 //DRM_INFO("R600_IT_DRAW_INDEX\n");
395 if (num_dw != 5) {
396 ret = -EINVAL;
397 DRM_ERROR("bad DRAW_INDEX\n");
398 break;
399 }
400 reloc = ib_chunk->kdata + offset_dw + num_dw;
401 ret = dev_priv->cs.relocate(parser, reloc, &offset);
402 if (ret) {
403 DRM_ERROR("bad DRAW_INDEX\n");
404 break;
405 }
406 ib_chunk->kdata[offset_dw + 1] += (offset & 0xffffffff);
407 ib_chunk->kdata[offset_dw + 2] += (upper_32_bits(offset) & 0xff);
408 break;
409 case R600_IT_DRAW_INDEX_AUTO:
410 //DRM_INFO("R600_IT_DRAW_INDEX_AUTO\n");
411 if (num_dw != 3)
412 ret = -EINVAL;
413 if (ret)
414 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
415 break;
416 case R600_IT_DRAW_INDEX_IMMD_BE:
417 case R600_IT_DRAW_INDEX_IMMD:
418 //DRM_INFO("R600_IT_DRAW_INDEX_IMMD\n");
419 if (num_dw < 4)
420 ret = -EINVAL;
421 if (ret)
422 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
423 break;
424 case R600_IT_WAIT_REG_MEM:
425 //DRM_INFO("R600_IT_WAIT_REG_MEM\n");
426 if (num_dw != 7)
427 ret = -EINVAL;
428 /* bit 4 is reg (0) or mem (1) */
429 if (ib_chunk->kdata[offset_dw + 1] & 0x10) {
430 reloc = ib_chunk->kdata + offset_dw + num_dw;
431 ret = dev_priv->cs.relocate(parser, reloc, &offset);
432 if (ret) {
433 DRM_ERROR("bad WAIT_REG_MEM\n");
434 break;
435 }
436 ib_chunk->kdata[offset_dw + 2] += (offset & 0xffffffff);
437 ib_chunk->kdata[offset_dw + 3] += (upper_32_bits(offset) & 0xff);
438 }
439 if (ret)
440 DRM_ERROR("bad WAIT_REG_MEM\n");
441 break;
442 case R600_IT_SURFACE_SYNC:
443 //DRM_INFO("R600_IT_SURFACE_SYNC\n");
444 if (num_dw != 5)
445 ret = -EINVAL;
446 /* 0xffffffff/0x0 is flush all cache flag */
447 else if ((ib_chunk->kdata[offset_dw + 2] == 0xffffffff) &&
448 (ib_chunk->kdata[offset_dw + 3] == 0))
449 ret = 0;
450 else {
451 reloc = ib_chunk->kdata + offset_dw + num_dw;
452 ret = dev_priv->cs.relocate(parser, reloc, &offset);
453 if (ret) {
454 DRM_ERROR("bad SURFACE_SYNC\n");
455 break;
456 }
457 ib_chunk->kdata[offset_dw + 3] += ((offset >> 8) & 0xffffffff);
458 }
459 break;
460 case R600_IT_EVENT_WRITE:
461 //DRM_INFO("R600_IT_EVENT_WRITE\n");
462 if ((num_dw != 4) && (num_dw != 2))
463 ret = -EINVAL;
464 if (num_dw > 2) {
465 reloc = ib_chunk->kdata + offset_dw + num_dw;
466 ret = dev_priv->cs.relocate(parser, reloc, &offset);
467 if (ret) {
468 DRM_ERROR("bad EVENT_WRITE\n");
469 break;
470 }
471 ib_chunk->kdata[offset_dw + 2] += (offset & 0xffffffff);
472 ib_chunk->kdata[offset_dw + 3] += (upper_32_bits(offset) & 0xff);
473 }
474 if (ret)
475 DRM_ERROR("bad EVENT_WRITE\n");
476 break;
477 case R600_IT_EVENT_WRITE_EOP:
478 //DRM_INFO("R600_IT_EVENT_WRITE_EOP\n");
479 if (num_dw != 6) {
480 ret = -EINVAL;
481 DRM_ERROR("bad EVENT_WRITE_EOP\n");
482 break;
483 }
484 reloc = ib_chunk->kdata + offset_dw + num_dw;
485 ret = dev_priv->cs.relocate(parser, reloc, &offset);
486 if (ret) {
487 DRM_ERROR("bad EVENT_WRITE_EOP\n");
488 break;
489 }
490 ib_chunk->kdata[offset_dw + 2] += (offset & 0xffffffff);
491 ib_chunk->kdata[offset_dw + 3] += (upper_32_bits(offset) & 0xff);
492 break;
493 case R600_IT_SET_CONFIG_REG:
494 //DRM_INFO("R600_IT_SET_CONFIG_REG\n");
495 start_reg = (ib_chunk->kdata[offset_dw + 1] << 2) + R600_SET_CONFIG_REG_OFFSET;
496 end_reg = 4 * (num_dw - 2) + start_reg - 4;
497 if ((start_reg < R600_SET_CONFIG_REG_OFFSET) ||
498 (start_reg >= R600_SET_CONFIG_REG_END) ||
499 (end_reg >= R600_SET_CONFIG_REG_END))
500 ret = -EINVAL;
501 else {
502 for (i = 0; i < (num_dw - 2); i++) {
503 reg = start_reg + (4 * i);
504 switch (reg) {
505 case R600_CP_COHER_BASE:
506 /* use R600_IT_SURFACE_SYNC */
507 ret = -EINVAL;
508 break;
509 default:
510 break;
511 }
512 if (ret)
513 break;
514 }
515 }
516 if (ret)
517 DRM_ERROR("bad SET_CONFIG_REG\n");
518 break;
519 case R600_IT_SET_CONTEXT_REG:
520 //DRM_INFO("R600_IT_SET_CONTEXT_REG\n");
521 start_reg = ib_chunk->kdata[offset_dw + 1] << 2;
522 start_reg += R600_SET_CONTEXT_REG_OFFSET;
523 end_reg = 4 * (num_dw - 2) + start_reg - 4;
524 if ((start_reg < R600_SET_CONTEXT_REG_OFFSET) ||
525 (start_reg >= R600_SET_CONTEXT_REG_END) ||
526 (end_reg >= R600_SET_CONTEXT_REG_END))
527 ret = -EINVAL;
528 else {
529 for (i = 0; i < (num_dw - 2); i++) {
530 reg = start_reg + (4 * i);
531 switch (reg) {
532 case R600_DB_DEPTH_BASE:
533 case R600_CB_COLOR0_BASE:
534 case R600_CB_COLOR1_BASE:
535 case R600_CB_COLOR2_BASE:
536 case R600_CB_COLOR3_BASE:
537 case R600_CB_COLOR4_BASE:
538 case R600_CB_COLOR5_BASE:
539 case R600_CB_COLOR6_BASE:
540 case R600_CB_COLOR7_BASE:
541 case R600_SQ_PGM_START_FS:
542 case R600_SQ_PGM_START_ES:
543 case R600_SQ_PGM_START_VS:
544 case R600_SQ_PGM_START_GS:
545 case R600_SQ_PGM_START_PS:
546 //DRM_INFO("reg: 0x%08x\n", reg);
547 reloc = ib_chunk->kdata + offset_dw + num_dw + (i * 2);
548 ret = dev_priv->cs.relocate(parser, reloc, &offset);
549 if (ret) {
550 DRM_ERROR("bad SET_CONTEXT_REG\n");
551 break;
552 }
553 ib_chunk->kdata[offset_dw + 2 + i] +=
554 ((offset >> 8) & 0xffffffff);
555 break;
556 case R600_VGT_DMA_BASE:
557 case R600_VGT_DMA_BASE_HI:
558 /* These should be handled by DRAW_INDEX packet 3 */
559 case R600_VGT_STRMOUT_BASE_OFFSET_0:
560 case R600_VGT_STRMOUT_BASE_OFFSET_1:
561 case R600_VGT_STRMOUT_BASE_OFFSET_2:
562 case R600_VGT_STRMOUT_BASE_OFFSET_3:
563 case R600_VGT_STRMOUT_BASE_OFFSET_HI_0:
564 case R600_VGT_STRMOUT_BASE_OFFSET_HI_1:
565 case R600_VGT_STRMOUT_BASE_OFFSET_HI_2:
566 case R600_VGT_STRMOUT_BASE_OFFSET_HI_3:
567 case R600_VGT_STRMOUT_BUFFER_BASE_0:
568 case R600_VGT_STRMOUT_BUFFER_BASE_1:
569 case R600_VGT_STRMOUT_BUFFER_BASE_2:
570 case R600_VGT_STRMOUT_BUFFER_BASE_3:
571 case R600_VGT_STRMOUT_BUFFER_OFFSET_0:
572 case R600_VGT_STRMOUT_BUFFER_OFFSET_1:
573 case R600_VGT_STRMOUT_BUFFER_OFFSET_2:
574 case R600_VGT_STRMOUT_BUFFER_OFFSET_3:
575 /* These should be handled by STRMOUT_BUFFER packet 3 */
576 DRM_ERROR("bad context reg: 0x%08x\n", reg);
577 ret = -EINVAL;
578 break;
579 default:
580 break;
581 }
582 if (ret)
583 break;
584 }
585 }
586 if (ret)
587 DRM_ERROR("bad SET_CONTEXT_REG\n");
588 break;
589 case R600_IT_SET_RESOURCE:
590 //DRM_INFO("R600_IT_SET_RESOURCE\n");
591 if ((num_dw - 2) % 7)
592 ret = -EINVAL;
593 start_reg = ib_chunk->kdata[offset_dw + 1] << 2;
594 start_reg += R600_SET_RESOURCE_OFFSET;
595 end_reg = 4 * (num_dw - 2) + start_reg - 4;
596 if ((start_reg < R600_SET_RESOURCE_OFFSET) ||
597 (start_reg >= R600_SET_RESOURCE_END) ||
598 (end_reg >= R600_SET_RESOURCE_END))
599 ret = -EINVAL;
600 else {
601 for (i = 0; i < ((num_dw - 2) / 7); i++) {
602 switch ((ib_chunk->kdata[offset_dw + (i * 7) + 6 + 2] & 0xc0000000) >> 30) {
603 case R600_SQ_TEX_VTX_INVALID_TEXTURE:
604 case R600_SQ_TEX_VTX_INVALID_BUFFER:
605 default:
606 ret = -EINVAL;
607 break;
608 case R600_SQ_TEX_VTX_VALID_TEXTURE:
609 /* tex base */
610 reloc = ib_chunk->kdata + offset_dw + num_dw + (i * 4);
611 ret = dev_priv->cs.relocate(parser, reloc, &offset);
612 if (ret)
613 break;
614 ib_chunk->kdata[offset_dw + (i * 7) + 2 + 2] +=
615 ((offset >> 8) & 0xffffffff);
616 /* tex mip base */
617 reloc = ib_chunk->kdata + offset_dw + num_dw + (i * 4) + 2;
618 ret = dev_priv->cs.relocate(parser, reloc, &offset);
619 if (ret)
620 break;
621 ib_chunk->kdata[offset_dw + (i * 7) + 3 + 2] +=
622 ((offset >> 8) & 0xffffffff);
623 break;
624 case R600_SQ_TEX_VTX_VALID_BUFFER:
625 /* vtx base */
626 reloc = ib_chunk->kdata + offset_dw + num_dw + (i * 2);
627 ret = dev_priv->cs.relocate(parser, reloc, &offset);
628 if (ret)
629 break;
630 ib_chunk->kdata[offset_dw + (i * 7) + 0 + 2] += (offset & 0xffffffff);
631 ib_chunk->kdata[offset_dw + (i * 7) + 2 + 2] += (upper_32_bits(offset) & 0xff);
632 break;
633 }
634 if (ret)
635 break;
636 }
637 }
638 if (ret)
639 DRM_ERROR("bad SET_RESOURCE\n");
640 break;
641 case R600_IT_SET_ALU_CONST:
642 //DRM_INFO("R600_IT_SET_ALU_CONST\n");
643 start_reg = ib_chunk->kdata[offset_dw + 1] << 2;
644 start_reg += R600_SET_ALU_CONST_OFFSET;
645 end_reg = 4 * (num_dw - 2) + start_reg - 4;
646 if ((start_reg < R600_SET_ALU_CONST_OFFSET) ||
647 (start_reg >= R600_SET_ALU_CONST_END) ||
648 (end_reg >= R600_SET_ALU_CONST_END))
649 ret = -EINVAL;
650 if (ret)
651 DRM_ERROR("bad SET_ALU_CONST\n");
652 break;
653 case R600_IT_SET_BOOL_CONST:
654 //DRM_INFO("R600_IT_SET_BOOL_CONST\n");
655 start_reg = ib_chunk->kdata[offset_dw + 1] << 2;
656 start_reg += R600_SET_BOOL_CONST_OFFSET;
657 end_reg = 4 * (num_dw - 2) + start_reg - 4;
658 if ((start_reg < R600_SET_BOOL_CONST_OFFSET) ||
659 (start_reg >= R600_SET_BOOL_CONST_END) ||
660 (end_reg >= R600_SET_BOOL_CONST_END))
661 ret = -EINVAL;
662 if (ret)
663 DRM_ERROR("bad SET_BOOL_CONST\n");
664 break;
665 case R600_IT_SET_LOOP_CONST:
666 //DRM_INFO("R600_IT_SET_LOOP_CONST\n");
667 start_reg = ib_chunk->kdata[offset_dw + 1] << 2;
668 start_reg += R600_SET_LOOP_CONST_OFFSET;
669 end_reg = 4 * (num_dw - 2) + start_reg - 4;
670 if ((start_reg < R600_SET_LOOP_CONST_OFFSET) ||
671 (start_reg >= R600_SET_LOOP_CONST_END) ||
672 (end_reg >= R600_SET_LOOP_CONST_END))
673 ret = -EINVAL;
674 if (ret)
675 DRM_ERROR("bad SET_LOOP_CONST\n");
676 break;
677 case R600_IT_SET_CTL_CONST:
678 //DRM_INFO("R600_IT_SET_CTL_CONST\n");
679 start_reg = ib_chunk->kdata[offset_dw + 1] << 2;
680 start_reg += R600_SET_CTL_CONST_OFFSET;
681 end_reg = 4 * (num_dw - 2) + start_reg - 4;
682 if ((start_reg < R600_SET_CTL_CONST_OFFSET) ||
683 (start_reg >= R600_SET_CTL_CONST_END) ||
684 (end_reg >= R600_SET_CTL_CONST_END))
685 ret = -EINVAL;
686 if (ret)
687 DRM_ERROR("bad SET_CTL_CONST\n");
688 break;
689 case R600_IT_SET_SAMPLER:
690 //DRM_INFO("R600_IT_SET_SAMPLER\n");
691 if ((num_dw - 2) % 3)
692 ret = -EINVAL;
693 start_reg = ib_chunk->kdata[offset_dw + 1] << 2;
694 start_reg += R600_SET_SAMPLER_OFFSET;
695 end_reg = 4 * (num_dw - 2) + start_reg - 4;
696 if ((start_reg < R600_SET_SAMPLER_OFFSET) ||
697 (start_reg >= R600_SET_SAMPLER_END) ||
698 (end_reg >= R600_SET_SAMPLER_END))
699 ret = -EINVAL;
700 if (ret)
701 DRM_ERROR("bad SET_SAMPLER\n");
702 break;
703 case R600_IT_SURFACE_BASE_UPDATE:
704 //DRM_INFO("R600_IT_SURFACE_BASE_UPDATE\n");
705 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) ||
706 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600))
707 ret = -EINVAL;
708 if (num_dw != 2)
709 ret = -EINVAL;
710 if (ret)
711 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
712 break;
713 case RADEON_CP_NOP:
714 //DRM_INFO("NOP: %d\n", ib_chunk->kdata[offset_dw + 1]);
715 break;
716 default:
717 DRM_ERROR("invalid packet 3 0x%08x\n", 0xff00);
718 ret = -EINVAL;
719 break;
720 }
721
722 *offset_dw_p += incr;
723 return ret;
724 }
725
726 static int r600_cs_parse(struct drm_radeon_cs_parser *parser)
727 {
728 volatile int rb;
729 struct drm_radeon_kernel_chunk *ib_chunk;
730 /* scan the packet for various things */
731 int count_dw = 0, size_dw;
732 int ret = 0;
733
734 ib_chunk = &parser->chunks[parser->ib_index];
735 size_dw = ib_chunk->length_dw;
736
737 while (count_dw < size_dw && ret == 0) {
738 int hdr = ib_chunk->kdata[count_dw];
739 int num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16;
740
741 switch (hdr & RADEON_CP_PACKET_MASK) {
742 case RADEON_CP_PACKET0:
743 ret = r600_cs_packet0(parser, &count_dw);
744 break;
745 case RADEON_CP_PACKET1:
746 ret = -EINVAL;
747 break;
748 case RADEON_CP_PACKET2:
749 DRM_DEBUG("Packet 2\n");
750 num_dw += 1;
751 break;
752 case RADEON_CP_PACKET3:
753 ret = r600_cs_packet3(parser, &count_dw);
754 break;
755 }
756
757 count_dw += num_dw;
758 }
759
760 if (ret)
761 return ret;
762
763
764 /* copy the packet into the IB */
765 memcpy(parser->ib, ib_chunk->kdata, ib_chunk->length_dw * sizeof(uint32_t));
766
767 /* read back last byte to flush WC buffers */
768 rb = *(volatile u_int32_t *) (((vm_offset_t)parser->ib + (ib_chunk->length_dw-1) * sizeof(uint32_t)));
769
770 return 0;
771 }
772
773 static uint32_t radeon_cs_id_get(struct drm_radeon_private *radeon)
774 {
775 /* FIXME: protect with a spinlock */
776 /* FIXME: check if wrap affect last reported wrap & sequence */
777 radeon->cs.id_scnt = (radeon->cs.id_scnt + 1) & 0x00FFFFFF;
778 if (!radeon->cs.id_scnt) {
779 /* increment wrap counter */
780 radeon->cs.id_wcnt += 0x01000000;
781 /* valid sequence counter start at 1 */
782 radeon->cs.id_scnt = 1;
783 }
784 return (radeon->cs.id_scnt | radeon->cs.id_wcnt);
785 }
786
787 static void r600_cs_id_emit(struct drm_radeon_cs_parser *parser, uint32_t *id)
788 {
789 drm_radeon_private_t *dev_priv = parser->dev->dev_private;
790 RING_LOCALS;
791
792 //dev_priv->irq_emitted = radeon_update_breadcrumb(parser->dev);
793
794 *id = radeon_cs_id_get(dev_priv);
795
796 /* SCRATCH 2 */
797 BEGIN_RING(3);
798 R600_CLEAR_AGE(*id);
799 ADVANCE_RING();
800 COMMIT_RING();
801 }
802
803 static uint32_t r600_cs_id_last_get(struct drm_device *dev)
804 {
805 //drm_radeon_private_t *dev_priv = dev->dev_private;
806
807 //return GET_R600_SCRATCH(dev_priv, 2);
808 return 0;
809 }
810
811 static int r600_ib_get(struct drm_radeon_cs_parser *parser)
812 {
813 struct drm_device *dev = parser->dev;
814 drm_radeon_private_t *dev_priv = dev->dev_private;
815 struct drm_buf *buf;
816
817 buf = radeon_freelist_get(dev);
818 if (!buf) {
819 dev_priv->cs_buf = NULL;
820 return -EBUSY;
821 }
822 buf->file_priv = parser->file_priv;
823 dev_priv->cs_buf = buf;
824 parser->ib = (void *)((vm_offset_t)dev->agp_buffer_map->virtual +
825 buf->offset);
826
827 return 0;
828 }
829
830 static void r600_ib_free(struct drm_radeon_cs_parser *parser, int error)
831 {
832 struct drm_device *dev = parser->dev;
833 drm_radeon_private_t *dev_priv = dev->dev_private;
834 struct drm_buf *buf = dev_priv->cs_buf;
835
836 if (buf) {
837 if (!error)
838 r600_cp_dispatch_indirect(dev, buf, 0,
839 parser->chunks[parser->ib_index].length_dw * sizeof(uint32_t));
840 radeon_cp_discard_buffer(dev, buf);
841 COMMIT_RING();
842 }
843 }
844
845 int r600_cs_init(struct drm_device *dev)
846 {
847 drm_radeon_private_t *dev_priv = dev->dev_private;
848
849 dev_priv->cs.ib_get = r600_ib_get;
850 dev_priv->cs.ib_free = r600_ib_free;
851 dev_priv->cs.id_emit = r600_cs_id_emit;
852 dev_priv->cs.id_last_get = r600_cs_id_last_get;
853 dev_priv->cs.parse = r600_cs_parse;
854 dev_priv->cs.relocate = r600_nomm_relocate;
855 return 0;
856 }
Cache object: 771b8d49418df60e5767357f0aa5c699
|