1 /* radeon_state.c -- State support for Radeon -*- linux-c -*- */
2 /*-
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Gareth Hughes <gareth@valinux.com>
27 * Kevin E. Martin <martin@valinux.com>
28 *
29 * $FreeBSD$
30 */
31
32 #include "dev/drm/radeon.h"
33 #include "dev/drm/drmP.h"
34 #include "dev/drm/drm.h"
35 #include "dev/drm/drm_sarea.h"
36 #include "dev/drm/radeon_drm.h"
37 #include "dev/drm/radeon_drv.h"
38
39
40 /* ================================================================
41 * Helper functions for client state checking and fixup
42 */
43
44 static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_priv,
45 drm_file_t *filp_priv,
46 u32 *offset ) {
47 u32 off = *offset;
48
49 if ( off >= dev_priv->fb_location &&
50 off < ( dev_priv->gart_vm_start + dev_priv->gart_size ) )
51 return 0;
52
53 off += filp_priv->radeon_fb_delta;
54
55 DRM_DEBUG( "offset fixed up to 0x%x\n", off );
56
57 if ( off < dev_priv->fb_location ||
58 off >= ( dev_priv->gart_vm_start + dev_priv->gart_size ) )
59 return DRM_ERR( EINVAL );
60
61 *offset = off;
62
63 return 0;
64 }
65
66 static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv,
67 drm_file_t *filp_priv,
68 u32 *offset ) {
69 u32 off;
70
71 DRM_GET_USER_UNCHECKED( off, offset );
72
73 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &off ) )
74 return DRM_ERR( EINVAL );
75
76 DRM_PUT_USER_UNCHECKED( offset, off );
77
78 return 0;
79 }
80
81 static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
82 drm_file_t *filp_priv,
83 int id,
84 u32 *data ) {
85 switch ( id ) {
86
87 case RADEON_EMIT_PP_MISC:
88 if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
89 &data[( RADEON_RB3D_DEPTHOFFSET
90 - RADEON_PP_MISC ) / 4] ) ) {
91 DRM_ERROR( "Invalid depth buffer offset\n" );
92 return DRM_ERR( EINVAL );
93 }
94 break;
95
96 case RADEON_EMIT_PP_CNTL:
97 if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
98 &data[( RADEON_RB3D_COLOROFFSET
99 - RADEON_PP_CNTL ) / 4] ) ) {
100 DRM_ERROR( "Invalid colour buffer offset\n" );
101 return DRM_ERR( EINVAL );
102 }
103 break;
104
105 case R200_EMIT_PP_TXOFFSET_0:
106 case R200_EMIT_PP_TXOFFSET_1:
107 case R200_EMIT_PP_TXOFFSET_2:
108 case R200_EMIT_PP_TXOFFSET_3:
109 case R200_EMIT_PP_TXOFFSET_4:
110 case R200_EMIT_PP_TXOFFSET_5:
111 if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
112 &data[0] ) ) {
113 DRM_ERROR( "Invalid R200 texture offset\n" );
114 return DRM_ERR( EINVAL );
115 }
116 break;
117
118 case RADEON_EMIT_PP_TXFILTER_0:
119 case RADEON_EMIT_PP_TXFILTER_1:
120 case RADEON_EMIT_PP_TXFILTER_2:
121 if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
122 &data[( RADEON_PP_TXOFFSET_0
123 - RADEON_PP_TXFILTER_0 ) / 4] ) ) {
124 DRM_ERROR( "Invalid R100 texture offset\n" );
125 return DRM_ERR( EINVAL );
126 }
127 break;
128
129 case R200_EMIT_PP_CUBIC_OFFSETS_0:
130 case R200_EMIT_PP_CUBIC_OFFSETS_1:
131 case R200_EMIT_PP_CUBIC_OFFSETS_2:
132 case R200_EMIT_PP_CUBIC_OFFSETS_3:
133 case R200_EMIT_PP_CUBIC_OFFSETS_4:
134 case R200_EMIT_PP_CUBIC_OFFSETS_5: {
135 int i;
136 for ( i = 0; i < 5; i++ ) {
137 if ( radeon_check_and_fixup_offset_user( dev_priv,
138 filp_priv,
139 &data[i] ) ) {
140 DRM_ERROR( "Invalid R200 cubic texture offset\n" );
141 return DRM_ERR( EINVAL );
142 }
143 }
144 break;
145 }
146
147 case RADEON_EMIT_RB3D_COLORPITCH:
148 case RADEON_EMIT_RE_LINE_PATTERN:
149 case RADEON_EMIT_SE_LINE_WIDTH:
150 case RADEON_EMIT_PP_LUM_MATRIX:
151 case RADEON_EMIT_PP_ROT_MATRIX_0:
152 case RADEON_EMIT_RB3D_STENCILREFMASK:
153 case RADEON_EMIT_SE_VPORT_XSCALE:
154 case RADEON_EMIT_SE_CNTL:
155 case RADEON_EMIT_SE_CNTL_STATUS:
156 case RADEON_EMIT_RE_MISC:
157 case RADEON_EMIT_PP_BORDER_COLOR_0:
158 case RADEON_EMIT_PP_BORDER_COLOR_1:
159 case RADEON_EMIT_PP_BORDER_COLOR_2:
160 case RADEON_EMIT_SE_ZBIAS_FACTOR:
161 case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
162 case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
163 case R200_EMIT_PP_TXCBLEND_0:
164 case R200_EMIT_PP_TXCBLEND_1:
165 case R200_EMIT_PP_TXCBLEND_2:
166 case R200_EMIT_PP_TXCBLEND_3:
167 case R200_EMIT_PP_TXCBLEND_4:
168 case R200_EMIT_PP_TXCBLEND_5:
169 case R200_EMIT_PP_TXCBLEND_6:
170 case R200_EMIT_PP_TXCBLEND_7:
171 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
172 case R200_EMIT_TFACTOR_0:
173 case R200_EMIT_VTX_FMT_0:
174 case R200_EMIT_VAP_CTL:
175 case R200_EMIT_MATRIX_SELECT_0:
176 case R200_EMIT_TEX_PROC_CTL_2:
177 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
178 case R200_EMIT_PP_TXFILTER_0:
179 case R200_EMIT_PP_TXFILTER_1:
180 case R200_EMIT_PP_TXFILTER_2:
181 case R200_EMIT_PP_TXFILTER_3:
182 case R200_EMIT_PP_TXFILTER_4:
183 case R200_EMIT_PP_TXFILTER_5:
184 case R200_EMIT_VTE_CNTL:
185 case R200_EMIT_OUTPUT_VTX_COMP_SEL:
186 case R200_EMIT_PP_TAM_DEBUG3:
187 case R200_EMIT_PP_CNTL_X:
188 case R200_EMIT_RB3D_DEPTHXY_OFFSET:
189 case R200_EMIT_RE_AUX_SCISSOR_CNTL:
190 case R200_EMIT_RE_SCISSOR_TL_0:
191 case R200_EMIT_RE_SCISSOR_TL_1:
192 case R200_EMIT_RE_SCISSOR_TL_2:
193 case R200_EMIT_SE_VAP_CNTL_STATUS:
194 case R200_EMIT_SE_VTX_STATE_CNTL:
195 case R200_EMIT_RE_POINTSIZE:
196 case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
197 case R200_EMIT_PP_CUBIC_FACES_0:
198 case R200_EMIT_PP_CUBIC_FACES_1:
199 case R200_EMIT_PP_CUBIC_FACES_2:
200 case R200_EMIT_PP_CUBIC_FACES_3:
201 case R200_EMIT_PP_CUBIC_FACES_4:
202 case R200_EMIT_PP_CUBIC_FACES_5:
203 case RADEON_EMIT_PP_TEX_SIZE_0:
204 case RADEON_EMIT_PP_TEX_SIZE_1:
205 case RADEON_EMIT_PP_TEX_SIZE_2:
206 case R200_EMIT_RB3D_BLENDCOLOR:
207 /* These packets don't contain memory offsets */
208 break;
209
210 default:
211 DRM_ERROR( "Unknown state packet ID %d\n", id );
212 return DRM_ERR( EINVAL );
213 }
214
215 return 0;
216 }
217
218 static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_priv,
219 drm_file_t *filp_priv,
220 drm_radeon_cmd_buffer_t *cmdbuf,
221 unsigned int *cmdsz ) {
222 u32 tmp[4], *cmd = ( u32* )cmdbuf->buf;
223
224 if ( DRM_COPY_FROM_USER_UNCHECKED( tmp, cmd, sizeof( tmp ) ) ) {
225 DRM_ERROR( "Failed to copy data from user space\n" );
226 return DRM_ERR( EFAULT );
227 }
228
229 *cmdsz = 2 + ( ( tmp[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 );
230
231 if ( ( tmp[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) {
232 DRM_ERROR( "Not a type 3 packet\n" );
233 return DRM_ERR( EINVAL );
234 }
235
236 if ( 4 * *cmdsz > cmdbuf->bufsz ) {
237 DRM_ERROR( "Packet size larger than size of data provided\n" );
238 return DRM_ERR( EINVAL );
239 }
240
241 /* Check client state and fix it up if necessary */
242 if ( tmp[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */
243 u32 offset;
244
245 if ( tmp[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL
246 | RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
247 offset = tmp[2] << 10;
248 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
249 DRM_ERROR( "Invalid first packet offset\n" );
250 return DRM_ERR( EINVAL );
251 }
252 tmp[2] = ( tmp[2] & 0xffc00000 ) | offset >> 10;
253 }
254
255 if ( ( tmp[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) &&
256 ( tmp[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
257 offset = tmp[3] << 10;
258 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
259 DRM_ERROR( "Invalid second packet offset\n" );
260 return DRM_ERR( EINVAL );
261 }
262 tmp[3] = ( tmp[3] & 0xffc00000 ) | offset >> 10;
263 }
264
265 if ( DRM_COPY_TO_USER_UNCHECKED( cmd, tmp, sizeof( tmp ) ) ) {
266 DRM_ERROR( "Failed to copy data to user space\n" );
267 return DRM_ERR( EFAULT );
268 }
269 }
270
271 return 0;
272 }
273
274
275 /* ================================================================
276 * CP hardware state programming functions
277 */
278
279 static __inline__ void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv,
280 drm_clip_rect_t *box )
281 {
282 RING_LOCALS;
283
284 DRM_DEBUG( " box: x1=%d y1=%d x2=%d y2=%d\n",
285 box->x1, box->y1, box->x2, box->y2 );
286
287 BEGIN_RING( 4 );
288 OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) );
289 OUT_RING( (box->y1 << 16) | box->x1 );
290 OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) );
291 OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) );
292 ADVANCE_RING();
293 }
294
295 /* Emit 1.1 state
296 */
297 static int radeon_emit_state( drm_radeon_private_t *dev_priv,
298 drm_file_t *filp_priv,
299 drm_radeon_context_regs_t *ctx,
300 drm_radeon_texture_regs_t *tex,
301 unsigned int dirty )
302 {
303 RING_LOCALS;
304 DRM_DEBUG( "dirty=0x%08x\n", dirty );
305
306 if ( dirty & RADEON_UPLOAD_CONTEXT ) {
307 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
308 &ctx->rb3d_depthoffset ) ) {
309 DRM_ERROR( "Invalid depth buffer offset\n" );
310 return DRM_ERR( EINVAL );
311 }
312
313 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
314 &ctx->rb3d_coloroffset ) ) {
315 DRM_ERROR( "Invalid depth buffer offset\n" );
316 return DRM_ERR( EINVAL );
317 }
318
319 BEGIN_RING( 14 );
320 OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) );
321 OUT_RING( ctx->pp_misc );
322 OUT_RING( ctx->pp_fog_color );
323 OUT_RING( ctx->re_solid_color );
324 OUT_RING( ctx->rb3d_blendcntl );
325 OUT_RING( ctx->rb3d_depthoffset );
326 OUT_RING( ctx->rb3d_depthpitch );
327 OUT_RING( ctx->rb3d_zstencilcntl );
328 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) );
329 OUT_RING( ctx->pp_cntl );
330 OUT_RING( ctx->rb3d_cntl );
331 OUT_RING( ctx->rb3d_coloroffset );
332 OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) );
333 OUT_RING( ctx->rb3d_colorpitch );
334 ADVANCE_RING();
335 }
336
337 if ( dirty & RADEON_UPLOAD_VERTFMT ) {
338 BEGIN_RING( 2 );
339 OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) );
340 OUT_RING( ctx->se_coord_fmt );
341 ADVANCE_RING();
342 }
343
344 if ( dirty & RADEON_UPLOAD_LINE ) {
345 BEGIN_RING( 5 );
346 OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) );
347 OUT_RING( ctx->re_line_pattern );
348 OUT_RING( ctx->re_line_state );
349 OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) );
350 OUT_RING( ctx->se_line_width );
351 ADVANCE_RING();
352 }
353
354 if ( dirty & RADEON_UPLOAD_BUMPMAP ) {
355 BEGIN_RING( 5 );
356 OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) );
357 OUT_RING( ctx->pp_lum_matrix );
358 OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) );
359 OUT_RING( ctx->pp_rot_matrix_0 );
360 OUT_RING( ctx->pp_rot_matrix_1 );
361 ADVANCE_RING();
362 }
363
364 if ( dirty & RADEON_UPLOAD_MASKS ) {
365 BEGIN_RING( 4 );
366 OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) );
367 OUT_RING( ctx->rb3d_stencilrefmask );
368 OUT_RING( ctx->rb3d_ropcntl );
369 OUT_RING( ctx->rb3d_planemask );
370 ADVANCE_RING();
371 }
372
373 if ( dirty & RADEON_UPLOAD_VIEWPORT ) {
374 BEGIN_RING( 7 );
375 OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) );
376 OUT_RING( ctx->se_vport_xscale );
377 OUT_RING( ctx->se_vport_xoffset );
378 OUT_RING( ctx->se_vport_yscale );
379 OUT_RING( ctx->se_vport_yoffset );
380 OUT_RING( ctx->se_vport_zscale );
381 OUT_RING( ctx->se_vport_zoffset );
382 ADVANCE_RING();
383 }
384
385 if ( dirty & RADEON_UPLOAD_SETUP ) {
386 BEGIN_RING( 4 );
387 OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) );
388 OUT_RING( ctx->se_cntl );
389 OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) );
390 OUT_RING( ctx->se_cntl_status );
391 ADVANCE_RING();
392 }
393
394 if ( dirty & RADEON_UPLOAD_MISC ) {
395 BEGIN_RING( 2 );
396 OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) );
397 OUT_RING( ctx->re_misc );
398 ADVANCE_RING();
399 }
400
401 if ( dirty & RADEON_UPLOAD_TEX0 ) {
402 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
403 &tex[0].pp_txoffset ) ) {
404 DRM_ERROR( "Invalid texture offset for unit 0\n" );
405 return DRM_ERR( EINVAL );
406 }
407
408 BEGIN_RING( 9 );
409 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) );
410 OUT_RING( tex[0].pp_txfilter );
411 OUT_RING( tex[0].pp_txformat );
412 OUT_RING( tex[0].pp_txoffset );
413 OUT_RING( tex[0].pp_txcblend );
414 OUT_RING( tex[0].pp_txablend );
415 OUT_RING( tex[0].pp_tfactor );
416 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) );
417 OUT_RING( tex[0].pp_border_color );
418 ADVANCE_RING();
419 }
420
421 if ( dirty & RADEON_UPLOAD_TEX1 ) {
422 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
423 &tex[1].pp_txoffset ) ) {
424 DRM_ERROR( "Invalid texture offset for unit 1\n" );
425 return DRM_ERR( EINVAL );
426 }
427
428 BEGIN_RING( 9 );
429 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) );
430 OUT_RING( tex[1].pp_txfilter );
431 OUT_RING( tex[1].pp_txformat );
432 OUT_RING( tex[1].pp_txoffset );
433 OUT_RING( tex[1].pp_txcblend );
434 OUT_RING( tex[1].pp_txablend );
435 OUT_RING( tex[1].pp_tfactor );
436 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) );
437 OUT_RING( tex[1].pp_border_color );
438 ADVANCE_RING();
439 }
440
441 if ( dirty & RADEON_UPLOAD_TEX2 ) {
442 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
443 &tex[2].pp_txoffset ) ) {
444 DRM_ERROR( "Invalid texture offset for unit 2\n" );
445 return DRM_ERR( EINVAL );
446 }
447
448 BEGIN_RING( 9 );
449 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) );
450 OUT_RING( tex[2].pp_txfilter );
451 OUT_RING( tex[2].pp_txformat );
452 OUT_RING( tex[2].pp_txoffset );
453 OUT_RING( tex[2].pp_txcblend );
454 OUT_RING( tex[2].pp_txablend );
455 OUT_RING( tex[2].pp_tfactor );
456 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) );
457 OUT_RING( tex[2].pp_border_color );
458 ADVANCE_RING();
459 }
460
461 return 0;
462 }
463
464 /* Emit 1.2 state
465 */
466 static int radeon_emit_state2( drm_radeon_private_t *dev_priv,
467 drm_file_t *filp_priv,
468 drm_radeon_state_t *state )
469 {
470 RING_LOCALS;
471
472 if (state->dirty & RADEON_UPLOAD_ZBIAS) {
473 BEGIN_RING( 3 );
474 OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) );
475 OUT_RING( state->context2.se_zbias_factor );
476 OUT_RING( state->context2.se_zbias_constant );
477 ADVANCE_RING();
478 }
479
480 return radeon_emit_state( dev_priv, filp_priv, &state->context,
481 state->tex, state->dirty );
482 }
483
484 /* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
485 * 1.3 cmdbuffers allow all previous state to be updated as well as
486 * the tcl scalar and vector areas.
487 */
488 static struct {
489 int start;
490 int len;
491 const char *name;
492 } packet[RADEON_MAX_STATE_PACKETS] = {
493 { RADEON_PP_MISC,7,"RADEON_PP_MISC" },
494 { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" },
495 { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" },
496 { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" },
497 { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" },
498 { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" },
499 { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" },
500 { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" },
501 { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" },
502 { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" },
503 { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" },
504 { RADEON_RE_MISC,1,"RADEON_RE_MISC" },
505 { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" },
506 { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" },
507 { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" },
508 { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" },
509 { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" },
510 { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" },
511 { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" },
512 { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" },
513 { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" },
514 { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" },
515 { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" },
516 { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" },
517 { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" },
518 { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" },
519 { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" },
520 { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" },
521 { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" },
522 { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" },
523 { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" },
524 { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" },
525 { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" },
526 { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" },
527 { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" },
528 { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" },
529 { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" },
530 { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" },
531 { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" },
532 { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" },
533 { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" },
534 { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" },
535 { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" },
536 { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" },
537 { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" },
538 { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" },
539 { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" },
540 { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" },
541 { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" },
542 { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" },
543 { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" },
544 { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" },
545 { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" },
546 { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" },
547 { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" },
548 { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" },
549 { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" },
550 { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" },
551 { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" },
552 { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" },
553 { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" },
554 { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */
555 { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */
556 { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" },
557 { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" },
558 { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" },
559 { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" },
560 { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" },
561 { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" },
562 { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" },
563 { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" },
564 { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" },
565 { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" },
566 { RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0" },
567 { RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1" },
568 { RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2" },
569 { R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR" },
570 };
571
572
573
574 /* ================================================================
575 * Performance monitoring functions
576 */
577
578 static void radeon_clear_box( drm_radeon_private_t *dev_priv,
579 int x, int y, int w, int h,
580 int r, int g, int b )
581 {
582 u32 color;
583 RING_LOCALS;
584
585 x += dev_priv->sarea_priv->boxes[0].x1;
586 y += dev_priv->sarea_priv->boxes[0].y1;
587
588 switch ( dev_priv->color_fmt ) {
589 case RADEON_COLOR_FORMAT_RGB565:
590 color = (((r & 0xf8) << 8) |
591 ((g & 0xfc) << 3) |
592 ((b & 0xf8) >> 3));
593 break;
594 case RADEON_COLOR_FORMAT_ARGB8888:
595 default:
596 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
597 break;
598 }
599
600 BEGIN_RING( 4 );
601 RADEON_WAIT_UNTIL_3D_IDLE();
602 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
603 OUT_RING( 0xffffffff );
604 ADVANCE_RING();
605
606 BEGIN_RING( 6 );
607
608 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
609 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
610 RADEON_GMC_BRUSH_SOLID_COLOR |
611 (dev_priv->color_fmt << 8) |
612 RADEON_GMC_SRC_DATATYPE_COLOR |
613 RADEON_ROP3_P |
614 RADEON_GMC_CLR_CMP_CNTL_DIS );
615
616 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
617 OUT_RING( dev_priv->front_pitch_offset );
618 } else {
619 OUT_RING( dev_priv->back_pitch_offset );
620 }
621
622 OUT_RING( color );
623
624 OUT_RING( (x << 16) | y );
625 OUT_RING( (w << 16) | h );
626
627 ADVANCE_RING();
628 }
629
630 static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv )
631 {
632 /* Collapse various things into a wait flag -- trying to
633 * guess if userspase slept -- better just to have them tell us.
634 */
635 if (dev_priv->stats.last_frame_reads > 1 ||
636 dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
637 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
638 }
639
640 if (dev_priv->stats.freelist_loops) {
641 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
642 }
643
644 /* Purple box for page flipping
645 */
646 if ( dev_priv->stats.boxes & RADEON_BOX_FLIP )
647 radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 );
648
649 /* Red box if we have to wait for idle at any point
650 */
651 if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE )
652 radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 );
653
654 /* Blue box: lost context?
655 */
656
657 /* Yellow box for texture swaps
658 */
659 if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD )
660 radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 );
661
662 /* Green box if hardware never idles (as far as we can tell)
663 */
664 if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) )
665 radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
666
667
668 /* Draw bars indicating number of buffers allocated
669 * (not a great measure, easily confused)
670 */
671 if (dev_priv->stats.requested_bufs) {
672 if (dev_priv->stats.requested_bufs > 100)
673 dev_priv->stats.requested_bufs = 100;
674
675 radeon_clear_box( dev_priv, 4, 16,
676 dev_priv->stats.requested_bufs, 4,
677 196, 128, 128 );
678 }
679
680 memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) );
681
682 }
683 /* ================================================================
684 * CP command dispatch functions
685 */
686
687 static void radeon_cp_dispatch_clear( drm_device_t *dev,
688 drm_radeon_clear_t *clear,
689 drm_radeon_clear_rect_t *depth_boxes )
690 {
691 drm_radeon_private_t *dev_priv = dev->dev_private;
692 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
693 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
694 int nbox = sarea_priv->nbox;
695 drm_clip_rect_t *pbox = sarea_priv->boxes;
696 unsigned int flags = clear->flags;
697 u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0;
698 int i;
699 RING_LOCALS;
700 DRM_DEBUG( "flags = 0x%x\n", flags );
701
702 dev_priv->stats.clears++;
703
704 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
705 unsigned int tmp = flags;
706
707 flags &= ~(RADEON_FRONT | RADEON_BACK);
708 if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK;
709 if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT;
710 }
711
712 if ( flags & (RADEON_FRONT | RADEON_BACK) ) {
713
714 BEGIN_RING( 4 );
715
716 /* Ensure the 3D stream is idle before doing a
717 * 2D fill to clear the front or back buffer.
718 */
719 RADEON_WAIT_UNTIL_3D_IDLE();
720
721 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
722 OUT_RING( clear->color_mask );
723
724 ADVANCE_RING();
725
726 /* Make sure we restore the 3D state next time.
727 */
728 dev_priv->sarea_priv->ctx_owner = 0;
729
730 for ( i = 0 ; i < nbox ; i++ ) {
731 int x = pbox[i].x1;
732 int y = pbox[i].y1;
733 int w = pbox[i].x2 - x;
734 int h = pbox[i].y2 - y;
735
736 DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
737 x, y, w, h, flags );
738
739 if ( flags & RADEON_FRONT ) {
740 BEGIN_RING( 6 );
741
742 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
743 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
744 RADEON_GMC_BRUSH_SOLID_COLOR |
745 (dev_priv->color_fmt << 8) |
746 RADEON_GMC_SRC_DATATYPE_COLOR |
747 RADEON_ROP3_P |
748 RADEON_GMC_CLR_CMP_CNTL_DIS );
749
750 OUT_RING( dev_priv->front_pitch_offset );
751 OUT_RING( clear->clear_color );
752
753 OUT_RING( (x << 16) | y );
754 OUT_RING( (w << 16) | h );
755
756 ADVANCE_RING();
757 }
758
759 if ( flags & RADEON_BACK ) {
760 BEGIN_RING( 6 );
761
762 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
763 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
764 RADEON_GMC_BRUSH_SOLID_COLOR |
765 (dev_priv->color_fmt << 8) |
766 RADEON_GMC_SRC_DATATYPE_COLOR |
767 RADEON_ROP3_P |
768 RADEON_GMC_CLR_CMP_CNTL_DIS );
769
770 OUT_RING( dev_priv->back_pitch_offset );
771 OUT_RING( clear->clear_color );
772
773 OUT_RING( (x << 16) | y );
774 OUT_RING( (w << 16) | h );
775
776 ADVANCE_RING();
777 }
778 }
779 }
780
781 /* We have to clear the depth and/or stencil buffers by
782 * rendering a quad into just those buffers. Thus, we have to
783 * make sure the 3D engine is configured correctly.
784 */
785 if ( dev_priv->is_r200 &&
786 (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
787
788 int tempPP_CNTL;
789 int tempRE_CNTL;
790 int tempRB3D_CNTL;
791 int tempRB3D_ZSTENCILCNTL;
792 int tempRB3D_STENCILREFMASK;
793 int tempRB3D_PLANEMASK;
794 int tempSE_CNTL;
795 int tempSE_VTE_CNTL;
796 int tempSE_VTX_FMT_0;
797 int tempSE_VTX_FMT_1;
798 int tempSE_VAP_CNTL;
799 int tempRE_AUX_SCISSOR_CNTL;
800
801 tempPP_CNTL = 0;
802 tempRE_CNTL = 0;
803
804 tempRB3D_CNTL = depth_clear->rb3d_cntl;
805 tempRB3D_CNTL &= ~(1<<15); /* unset radeon magic flag */
806
807 tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
808 tempRB3D_STENCILREFMASK = 0x0;
809
810 tempSE_CNTL = depth_clear->se_cntl;
811
812
813
814 /* Disable TCL */
815
816 tempSE_VAP_CNTL = (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
817 (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
818
819 tempRB3D_PLANEMASK = 0x0;
820
821 tempRE_AUX_SCISSOR_CNTL = 0x0;
822
823 tempSE_VTE_CNTL =
824 SE_VTE_CNTL__VTX_XY_FMT_MASK |
825 SE_VTE_CNTL__VTX_Z_FMT_MASK;
826
827 /* Vertex format (X, Y, Z, W)*/
828 tempSE_VTX_FMT_0 =
829 SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
830 SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
831 tempSE_VTX_FMT_1 = 0x0;
832
833
834 /*
835 * Depth buffer specific enables
836 */
837 if (flags & RADEON_DEPTH) {
838 /* Enable depth buffer */
839 tempRB3D_CNTL |= RADEON_Z_ENABLE;
840 } else {
841 /* Disable depth buffer */
842 tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
843 }
844
845 /*
846 * Stencil buffer specific enables
847 */
848 if ( flags & RADEON_STENCIL ) {
849 tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
850 tempRB3D_STENCILREFMASK = clear->depth_mask;
851 } else {
852 tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
853 tempRB3D_STENCILREFMASK = 0x00000000;
854 }
855
856 BEGIN_RING( 26 );
857 RADEON_WAIT_UNTIL_2D_IDLE();
858
859 OUT_RING_REG( RADEON_PP_CNTL, tempPP_CNTL );
860 OUT_RING_REG( R200_RE_CNTL, tempRE_CNTL );
861 OUT_RING_REG( RADEON_RB3D_CNTL, tempRB3D_CNTL );
862 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
863 tempRB3D_ZSTENCILCNTL );
864 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
865 tempRB3D_STENCILREFMASK );
866 OUT_RING_REG( RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK );
867 OUT_RING_REG( RADEON_SE_CNTL, tempSE_CNTL );
868 OUT_RING_REG( R200_SE_VTE_CNTL, tempSE_VTE_CNTL );
869 OUT_RING_REG( R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0 );
870 OUT_RING_REG( R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1 );
871 OUT_RING_REG( R200_SE_VAP_CNTL, tempSE_VAP_CNTL );
872 OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL,
873 tempRE_AUX_SCISSOR_CNTL );
874 ADVANCE_RING();
875
876 /* Make sure we restore the 3D state next time.
877 */
878 dev_priv->sarea_priv->ctx_owner = 0;
879
880 for ( i = 0 ; i < nbox ; i++ ) {
881
882 /* Funny that this should be required --
883 * sets top-left?
884 */
885 radeon_emit_clip_rect( dev_priv,
886 &sarea_priv->boxes[i] );
887
888 BEGIN_RING( 14 );
889 OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 12 ) );
890 OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
891 RADEON_PRIM_WALK_RING |
892 (3 << RADEON_NUM_VERTICES_SHIFT)) );
893 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
894 OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
895 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
896 OUT_RING( 0x3f800000 );
897 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
898 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
899 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
900 OUT_RING( 0x3f800000 );
901 OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
902 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
903 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
904 OUT_RING( 0x3f800000 );
905 ADVANCE_RING();
906 }
907 }
908 else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
909
910 rb3d_cntl = depth_clear->rb3d_cntl;
911
912 if ( flags & RADEON_DEPTH ) {
913 rb3d_cntl |= RADEON_Z_ENABLE;
914 } else {
915 rb3d_cntl &= ~RADEON_Z_ENABLE;
916 }
917
918 if ( flags & RADEON_STENCIL ) {
919 rb3d_cntl |= RADEON_STENCIL_ENABLE;
920 rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
921 } else {
922 rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
923 rb3d_stencilrefmask = 0x00000000;
924 }
925
926 BEGIN_RING( 13 );
927 RADEON_WAIT_UNTIL_2D_IDLE();
928
929 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) );
930 OUT_RING( 0x00000000 );
931 OUT_RING( rb3d_cntl );
932
933 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
934 depth_clear->rb3d_zstencilcntl );
935 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
936 rb3d_stencilrefmask );
937 OUT_RING_REG( RADEON_RB3D_PLANEMASK,
938 0x00000000 );
939 OUT_RING_REG( RADEON_SE_CNTL,
940 depth_clear->se_cntl );
941 ADVANCE_RING();
942
943 /* Make sure we restore the 3D state next time.
944 */
945 dev_priv->sarea_priv->ctx_owner = 0;
946
947 for ( i = 0 ; i < nbox ; i++ ) {
948
949 /* Funny that this should be required --
950 * sets top-left?
951 */
952 radeon_emit_clip_rect( dev_priv,
953 &sarea_priv->boxes[i] );
954
955 BEGIN_RING( 15 );
956
957 OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 13 ) );
958 OUT_RING( RADEON_VTX_Z_PRESENT |
959 RADEON_VTX_PKCOLOR_PRESENT);
960 OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
961 RADEON_PRIM_WALK_RING |
962 RADEON_MAOS_ENABLE |
963 RADEON_VTX_FMT_RADEON_MODE |
964 (3 << RADEON_NUM_VERTICES_SHIFT)) );
965
966
967 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
968 OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
969 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
970 OUT_RING( 0x0 );
971
972 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
973 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
974 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
975 OUT_RING( 0x0 );
976
977 OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
978 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
979 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
980 OUT_RING( 0x0 );
981
982 ADVANCE_RING();
983 }
984 }
985
986 /* Increment the clear counter. The client-side 3D driver must
987 * wait on this value before performing the clear ioctl. We
988 * need this because the card's so damned fast...
989 */
990 dev_priv->sarea_priv->last_clear++;
991
992 BEGIN_RING( 4 );
993
994 RADEON_CLEAR_AGE( dev_priv->sarea_priv->last_clear );
995 RADEON_WAIT_UNTIL_IDLE();
996
997 ADVANCE_RING();
998 }
999
1000 static void radeon_cp_dispatch_swap( drm_device_t *dev )
1001 {
1002 drm_radeon_private_t *dev_priv = dev->dev_private;
1003 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1004 int nbox = sarea_priv->nbox;
1005 drm_clip_rect_t *pbox = sarea_priv->boxes;
1006 int i;
1007 RING_LOCALS;
1008 DRM_DEBUG( "\n" );
1009
1010 /* Do some trivial performance monitoring...
1011 */
1012 if (dev_priv->do_boxes)
1013 radeon_cp_performance_boxes( dev_priv );
1014
1015
1016 /* Wait for the 3D stream to idle before dispatching the bitblt.
1017 * This will prevent data corruption between the two streams.
1018 */
1019 BEGIN_RING( 2 );
1020
1021 RADEON_WAIT_UNTIL_3D_IDLE();
1022
1023 ADVANCE_RING();
1024
1025 for ( i = 0 ; i < nbox ; i++ ) {
1026 int x = pbox[i].x1;
1027 int y = pbox[i].y1;
1028 int w = pbox[i].x2 - x;
1029 int h = pbox[i].y2 - y;
1030
1031 DRM_DEBUG( "dispatch swap %d,%d-%d,%d\n",
1032 x, y, w, h );
1033
1034 BEGIN_RING( 7 );
1035
1036 OUT_RING( CP_PACKET3( RADEON_CNTL_BITBLT_MULTI, 5 ) );
1037 OUT_RING( RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1038 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1039 RADEON_GMC_BRUSH_NONE |
1040 (dev_priv->color_fmt << 8) |
1041 RADEON_GMC_SRC_DATATYPE_COLOR |
1042 RADEON_ROP3_S |
1043 RADEON_DP_SRC_SOURCE_MEMORY |
1044 RADEON_GMC_CLR_CMP_CNTL_DIS |
1045 RADEON_GMC_WR_MSK_DIS );
1046
1047 /* Make this work even if front & back are flipped:
1048 */
1049 if (dev_priv->current_page == 0) {
1050 OUT_RING( dev_priv->back_pitch_offset );
1051 OUT_RING( dev_priv->front_pitch_offset );
1052 }
1053 else {
1054 OUT_RING( dev_priv->front_pitch_offset );
1055 OUT_RING( dev_priv->back_pitch_offset );
1056 }
1057
1058 OUT_RING( (x << 16) | y );
1059 OUT_RING( (x << 16) | y );
1060 OUT_RING( (w << 16) | h );
1061
1062 ADVANCE_RING();
1063 }
1064
1065 /* Increment the frame counter. The client-side 3D driver must
1066 * throttle the framerate by waiting for this value before
1067 * performing the swapbuffer ioctl.
1068 */
1069 dev_priv->sarea_priv->last_frame++;
1070
1071 BEGIN_RING( 4 );
1072
1073 RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
1074 RADEON_WAIT_UNTIL_2D_IDLE();
1075
1076 ADVANCE_RING();
1077 }
1078
1079 static void radeon_cp_dispatch_flip( drm_device_t *dev )
1080 {
1081 drm_radeon_private_t *dev_priv = dev->dev_private;
1082 drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle;
1083 int offset = (dev_priv->current_page == 1)
1084 ? dev_priv->front_offset : dev_priv->back_offset;
1085 RING_LOCALS;
1086 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
1087 __FUNCTION__,
1088 dev_priv->current_page,
1089 dev_priv->sarea_priv->pfCurrentPage);
1090
1091 /* Do some trivial performance monitoring...
1092 */
1093 if (dev_priv->do_boxes) {
1094 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
1095 radeon_cp_performance_boxes( dev_priv );
1096 }
1097
1098 /* Update the frame offsets for both CRTCs
1099 */
1100 BEGIN_RING( 6 );
1101
1102 RADEON_WAIT_UNTIL_3D_IDLE();
1103 OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch
1104 + sarea->frame.x
1105 * ( dev_priv->color_fmt - 2 ) ) & ~7 )
1106 + offset );
1107 OUT_RING_REG( RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
1108 + offset );
1109
1110 ADVANCE_RING();
1111
1112 /* Increment the frame counter. The client-side 3D driver must
1113 * throttle the framerate by waiting for this value before
1114 * performing the swapbuffer ioctl.
1115 */
1116 dev_priv->sarea_priv->last_frame++;
1117 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
1118 1 - dev_priv->current_page;
1119
1120 BEGIN_RING( 2 );
1121
1122 RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
1123
1124 ADVANCE_RING();
1125 }
1126
1127 static int bad_prim_vertex_nr( int primitive, int nr )
1128 {
1129 switch (primitive & RADEON_PRIM_TYPE_MASK) {
1130 case RADEON_PRIM_TYPE_NONE:
1131 case RADEON_PRIM_TYPE_POINT:
1132 return nr < 1;
1133 case RADEON_PRIM_TYPE_LINE:
1134 return (nr & 1) || nr == 0;
1135 case RADEON_PRIM_TYPE_LINE_STRIP:
1136 return nr < 2;
1137 case RADEON_PRIM_TYPE_TRI_LIST:
1138 case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
1139 case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
1140 case RADEON_PRIM_TYPE_RECT_LIST:
1141 return nr % 3 || nr == 0;
1142 case RADEON_PRIM_TYPE_TRI_FAN:
1143 case RADEON_PRIM_TYPE_TRI_STRIP:
1144 return nr < 3;
1145 default:
1146 return 1;
1147 }
1148 }
1149
1150
1151
1152 typedef struct {
1153 unsigned int start;
1154 unsigned int finish;
1155 unsigned int prim;
1156 unsigned int numverts;
1157 unsigned int offset;
1158 unsigned int vc_format;
1159 } drm_radeon_tcl_prim_t;
1160
1161 static void radeon_cp_dispatch_vertex( drm_device_t *dev,
1162 drm_buf_t *buf,
1163 drm_radeon_tcl_prim_t *prim )
1164
1165 {
1166 drm_radeon_private_t *dev_priv = dev->dev_private;
1167 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1168 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
1169 int numverts = (int)prim->numverts;
1170 int nbox = sarea_priv->nbox;
1171 int i = 0;
1172 RING_LOCALS;
1173
1174 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
1175 prim->prim,
1176 prim->vc_format,
1177 prim->start,
1178 prim->finish,
1179 prim->numverts);
1180
1181 if (bad_prim_vertex_nr( prim->prim, prim->numverts )) {
1182 DRM_ERROR( "bad prim %x numverts %d\n",
1183 prim->prim, prim->numverts );
1184 return;
1185 }
1186
1187 do {
1188 /* Emit the next cliprect */
1189 if ( i < nbox ) {
1190 radeon_emit_clip_rect( dev_priv,
1191 &sarea_priv->boxes[i] );
1192 }
1193
1194 /* Emit the vertex buffer rendering commands */
1195 BEGIN_RING( 5 );
1196
1197 OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) );
1198 OUT_RING( offset );
1199 OUT_RING( numverts );
1200 OUT_RING( prim->vc_format );
1201 OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST |
1202 RADEON_COLOR_ORDER_RGBA |
1203 RADEON_VTX_FMT_RADEON_MODE |
1204 (numverts << RADEON_NUM_VERTICES_SHIFT) );
1205
1206 ADVANCE_RING();
1207
1208 i++;
1209 } while ( i < nbox );
1210 }
1211
1212
1213
1214 static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf )
1215 {
1216 drm_radeon_private_t *dev_priv = dev->dev_private;
1217 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1218 RING_LOCALS;
1219
1220 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
1221
1222 /* Emit the vertex buffer age */
1223 BEGIN_RING( 2 );
1224 RADEON_DISPATCH_AGE( buf_priv->age );
1225 ADVANCE_RING();
1226
1227 buf->pending = 1;
1228 buf->used = 0;
1229 }
1230
1231 static void radeon_cp_dispatch_indirect( drm_device_t *dev,
1232 drm_buf_t *buf,
1233 int start, int end )
1234 {
1235 drm_radeon_private_t *dev_priv = dev->dev_private;
1236 RING_LOCALS;
1237 DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
1238 buf->idx, start, end );
1239
1240 if ( start != end ) {
1241 int offset = (dev_priv->gart_buffers_offset
1242 + buf->offset + start);
1243 int dwords = (end - start + 3) / sizeof(u32);
1244
1245 /* Indirect buffer data must be an even number of
1246 * dwords, so if we've been given an odd number we must
1247 * pad the data with a Type-2 CP packet.
1248 */
1249 if ( dwords & 1 ) {
1250 u32 *data = (u32 *)
1251 ((char *)dev_priv->buffers->handle
1252 + buf->offset + start);
1253 data[dwords++] = RADEON_CP_PACKET2;
1254 }
1255
1256 /* Fire off the indirect buffer */
1257 BEGIN_RING( 3 );
1258
1259 OUT_RING( CP_PACKET0( RADEON_CP_IB_BASE, 1 ) );
1260 OUT_RING( offset );
1261 OUT_RING( dwords );
1262
1263 ADVANCE_RING();
1264 }
1265 }
1266
1267
1268 static void radeon_cp_dispatch_indices( drm_device_t *dev,
1269 drm_buf_t *elt_buf,
1270 drm_radeon_tcl_prim_t *prim )
1271 {
1272 drm_radeon_private_t *dev_priv = dev->dev_private;
1273 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1274 int offset = dev_priv->gart_buffers_offset + prim->offset;
1275 u32 *data;
1276 int dwords;
1277 int i = 0;
1278 int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
1279 int count = (prim->finish - start) / sizeof(u16);
1280 int nbox = sarea_priv->nbox;
1281
1282 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1283 prim->prim,
1284 prim->vc_format,
1285 prim->start,
1286 prim->finish,
1287 prim->offset,
1288 prim->numverts);
1289
1290 if (bad_prim_vertex_nr( prim->prim, count )) {
1291 DRM_ERROR( "bad prim %x count %d\n",
1292 prim->prim, count );
1293 return;
1294 }
1295
1296
1297 if ( start >= prim->finish ||
1298 (prim->start & 0x7) ) {
1299 DRM_ERROR( "buffer prim %d\n", prim->prim );
1300 return;
1301 }
1302
1303 dwords = (prim->finish - prim->start + 3) / sizeof(u32);
1304
1305 data = (u32 *)((char *)dev_priv->buffers->handle +
1306 elt_buf->offset + prim->start);
1307
1308 data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
1309 data[1] = offset;
1310 data[2] = prim->numverts;
1311 data[3] = prim->vc_format;
1312 data[4] = (prim->prim |
1313 RADEON_PRIM_WALK_IND |
1314 RADEON_COLOR_ORDER_RGBA |
1315 RADEON_VTX_FMT_RADEON_MODE |
1316 (count << RADEON_NUM_VERTICES_SHIFT) );
1317
1318 do {
1319 if ( i < nbox )
1320 radeon_emit_clip_rect( dev_priv,
1321 &sarea_priv->boxes[i] );
1322
1323 radeon_cp_dispatch_indirect( dev, elt_buf,
1324 prim->start,
1325 prim->finish );
1326
1327 i++;
1328 } while ( i < nbox );
1329
1330 }
1331
1332 #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))
1333
1334 static int radeon_cp_dispatch_texture( DRMFILE filp,
1335 drm_device_t *dev,
1336 drm_radeon_texture_t *tex,
1337 drm_radeon_tex_image_t *image )
1338 {
1339 drm_radeon_private_t *dev_priv = dev->dev_private;
1340 drm_file_t *filp_priv;
1341 drm_buf_t *buf;
1342 u32 format;
1343 u32 *buffer;
1344 const u8 *data;
1345 int size, dwords, tex_width, blit_width;
1346 u32 height;
1347 int i;
1348 RING_LOCALS;
1349
1350 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1351
1352 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &tex->offset ) ) {
1353 DRM_ERROR( "Invalid destination offset\n" );
1354 return DRM_ERR( EINVAL );
1355 }
1356
1357 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
1358
1359 /* Flush the pixel cache. This ensures no pixel data gets mixed
1360 * up with the texture data from the host data blit, otherwise
1361 * part of the texture image may be corrupted.
1362 */
1363 BEGIN_RING( 4 );
1364 RADEON_FLUSH_CACHE();
1365 RADEON_WAIT_UNTIL_IDLE();
1366 ADVANCE_RING();
1367
1368 #ifdef __BIG_ENDIAN
1369 /* The Mesa texture functions provide the data in little endian as the
1370 * chip wants it, but we need to compensate for the fact that the CP
1371 * ring gets byte-swapped
1372 */
1373 BEGIN_RING( 2 );
1374 OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
1375 ADVANCE_RING();
1376 #endif
1377
1378
1379 /* The compiler won't optimize away a division by a variable,
1380 * even if the only legal values are powers of two. Thus, we'll
1381 * use a shift instead.
1382 */
1383 switch ( tex->format ) {
1384 case RADEON_TXFORMAT_ARGB8888:
1385 case RADEON_TXFORMAT_RGBA8888:
1386 format = RADEON_COLOR_FORMAT_ARGB8888;
1387 tex_width = tex->width * 4;
1388 blit_width = image->width * 4;
1389 break;
1390 case RADEON_TXFORMAT_AI88:
1391 case RADEON_TXFORMAT_ARGB1555:
1392 case RADEON_TXFORMAT_RGB565:
1393 case RADEON_TXFORMAT_ARGB4444:
1394 case RADEON_TXFORMAT_VYUY422:
1395 case RADEON_TXFORMAT_YVYU422:
1396 format = RADEON_COLOR_FORMAT_RGB565;
1397 tex_width = tex->width * 2;
1398 blit_width = image->width * 2;
1399 break;
1400 case RADEON_TXFORMAT_I8:
1401 case RADEON_TXFORMAT_RGB332:
1402 format = RADEON_COLOR_FORMAT_CI8;
1403 tex_width = tex->width * 1;
1404 blit_width = image->width * 1;
1405 break;
1406 default:
1407 DRM_ERROR( "invalid texture format %d\n", tex->format );
1408 return DRM_ERR(EINVAL);
1409 }
1410
1411 DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
1412
1413 do {
1414 DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1415 tex->offset >> 10, tex->pitch, tex->format,
1416 image->x, image->y, image->width, image->height );
1417
1418 /* Make a copy of some parameters in case we have to
1419 * update them for a multi-pass texture blit.
1420 */
1421 height = image->height;
1422 data = (const u8 *)image->data;
1423
1424 size = height * blit_width;
1425
1426 if ( size > RADEON_MAX_TEXTURE_SIZE ) {
1427 height = RADEON_MAX_TEXTURE_SIZE / blit_width;
1428 size = height * blit_width;
1429 } else if ( size < 4 && size > 0 ) {
1430 size = 4;
1431 } else if ( size == 0 ) {
1432 return 0;
1433 }
1434
1435 buf = radeon_freelist_get( dev );
1436 if ( 0 && !buf ) {
1437 radeon_do_cp_idle( dev_priv );
1438 buf = radeon_freelist_get( dev );
1439 }
1440 if ( !buf ) {
1441 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
1442 DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
1443 return DRM_ERR(EAGAIN);
1444 }
1445
1446
1447 /* Dispatch the indirect buffer.
1448 */
1449 buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
1450 dwords = size / 4;
1451 buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
1452 buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1453 RADEON_GMC_BRUSH_NONE |
1454 (format << 8) |
1455 RADEON_GMC_SRC_DATATYPE_COLOR |
1456 RADEON_ROP3_S |
1457 RADEON_DP_SRC_SOURCE_HOST_DATA |
1458 RADEON_GMC_CLR_CMP_CNTL_DIS |
1459 RADEON_GMC_WR_MSK_DIS);
1460
1461 buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
1462 buffer[3] = 0xffffffff;
1463 buffer[4] = 0xffffffff;
1464 buffer[5] = (image->y << 16) | image->x;
1465 buffer[6] = (height << 16) | image->width;
1466 buffer[7] = dwords;
1467 buffer += 8;
1468
1469 if ( tex_width >= 32 ) {
1470 /* Texture image width is larger than the minimum, so we
1471 * can upload it directly.
1472 */
1473 if ( DRM_COPY_FROM_USER( buffer, data,
1474 dwords * sizeof(u32) ) ) {
1475 DRM_ERROR( "EFAULT on data, %d dwords\n",
1476 dwords );
1477 return DRM_ERR(EFAULT);
1478 }
1479 } else {
1480 /* Texture image width is less than the minimum, so we
1481 * need to pad out each image scanline to the minimum
1482 * width.
1483 */
1484 for ( i = 0 ; i < tex->height ; i++ ) {
1485 if ( DRM_COPY_FROM_USER( buffer, data,
1486 tex_width ) ) {
1487 DRM_ERROR( "EFAULT on pad, %d bytes\n",
1488 tex_width );
1489 return DRM_ERR(EFAULT);
1490 }
1491 buffer += 8;
1492 data += tex_width;
1493 }
1494 }
1495
1496 buf->filp = filp;
1497 buf->used = (dwords + 8) * sizeof(u32);
1498 radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
1499 radeon_cp_discard_buffer( dev, buf );
1500
1501 /* Update the input parameters for next time */
1502 image->y += height;
1503 image->height -= height;
1504 image->data = (const u8 *)image->data + size;
1505 } while (image->height > 0);
1506
1507 /* Flush the pixel cache after the blit completes. This ensures
1508 * the texture data is written out to memory before rendering
1509 * continues.
1510 */
1511 BEGIN_RING( 4 );
1512 RADEON_FLUSH_CACHE();
1513 RADEON_WAIT_UNTIL_2D_IDLE();
1514 ADVANCE_RING();
1515 return 0;
1516 }
1517
1518
1519 static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
1520 {
1521 drm_radeon_private_t *dev_priv = dev->dev_private;
1522 int i;
1523 RING_LOCALS;
1524 DRM_DEBUG( "\n" );
1525
1526 BEGIN_RING( 35 );
1527
1528 OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );
1529 OUT_RING( 0x00000000 );
1530
1531 OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );
1532 for ( i = 0 ; i < 32 ; i++ ) {
1533 OUT_RING( stipple[i] );
1534 }
1535
1536 ADVANCE_RING();
1537 }
1538
1539
1540 /* ================================================================
1541 * IOCTL functions
1542 */
1543
1544 int radeon_cp_clear( DRM_IOCTL_ARGS )
1545 {
1546 DRM_DEVICE;
1547 drm_radeon_private_t *dev_priv = dev->dev_private;
1548 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1549 drm_radeon_clear_t clear;
1550 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1551 DRM_DEBUG( "\n" );
1552
1553 LOCK_TEST_WITH_RETURN( dev, filp );
1554
1555 DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data,
1556 sizeof(clear) );
1557
1558 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1559
1560 if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
1561 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1562
1563 if ( DRM_COPY_FROM_USER( &depth_boxes, clear.depth_boxes,
1564 sarea_priv->nbox * sizeof(depth_boxes[0]) ) )
1565 return DRM_ERR(EFAULT);
1566
1567 radeon_cp_dispatch_clear( dev, &clear, depth_boxes );
1568
1569 COMMIT_RING();
1570 return 0;
1571 }
1572
1573
1574 /* Not sure why this isn't set all the time:
1575 */
1576 static int radeon_do_init_pageflip( drm_device_t *dev )
1577 {
1578 drm_radeon_private_t *dev_priv = dev->dev_private;
1579 RING_LOCALS;
1580
1581 DRM_DEBUG( "\n" );
1582
1583 BEGIN_RING( 6 );
1584 RADEON_WAIT_UNTIL_3D_IDLE();
1585 OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) );
1586 OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
1587 OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) );
1588 OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
1589 ADVANCE_RING();
1590
1591 dev_priv->page_flipping = 1;
1592 dev_priv->current_page = 0;
1593 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1594
1595 return 0;
1596 }
1597
1598 /* Called whenever a client dies, from DRM(release).
1599 * NOTE: Lock isn't necessarily held when this is called!
1600 */
1601 int radeon_do_cleanup_pageflip( drm_device_t *dev )
1602 {
1603 drm_radeon_private_t *dev_priv = dev->dev_private;
1604 DRM_DEBUG( "\n" );
1605
1606 if (dev_priv->current_page != 0)
1607 radeon_cp_dispatch_flip( dev );
1608
1609 dev_priv->page_flipping = 0;
1610 return 0;
1611 }
1612
1613 /* Swapping and flipping are different operations, need different ioctls.
1614 * They can & should be intermixed to support multiple 3d windows.
1615 */
1616 int radeon_cp_flip( DRM_IOCTL_ARGS )
1617 {
1618 DRM_DEVICE;
1619 drm_radeon_private_t *dev_priv = dev->dev_private;
1620 DRM_DEBUG( "\n" );
1621
1622 LOCK_TEST_WITH_RETURN( dev, filp );
1623
1624 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1625
1626 if (!dev_priv->page_flipping)
1627 radeon_do_init_pageflip( dev );
1628
1629 radeon_cp_dispatch_flip( dev );
1630
1631 COMMIT_RING();
1632 return 0;
1633 }
1634
1635 int radeon_cp_swap( DRM_IOCTL_ARGS )
1636 {
1637 DRM_DEVICE;
1638 drm_radeon_private_t *dev_priv = dev->dev_private;
1639 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1640 DRM_DEBUG( "\n" );
1641
1642 LOCK_TEST_WITH_RETURN( dev, filp );
1643
1644 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1645
1646 if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
1647 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1648
1649 radeon_cp_dispatch_swap( dev );
1650 dev_priv->sarea_priv->ctx_owner = 0;
1651
1652 COMMIT_RING();
1653 return 0;
1654 }
1655
1656 int radeon_cp_vertex( DRM_IOCTL_ARGS )
1657 {
1658 DRM_DEVICE;
1659 drm_radeon_private_t *dev_priv = dev->dev_private;
1660 drm_file_t *filp_priv;
1661 drm_radeon_sarea_t *sarea_priv;
1662 drm_device_dma_t *dma = dev->dma;
1663 drm_buf_t *buf;
1664 drm_radeon_vertex_t vertex;
1665 drm_radeon_tcl_prim_t prim;
1666
1667 LOCK_TEST_WITH_RETURN( dev, filp );
1668
1669 if ( !dev_priv ) {
1670 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1671 return DRM_ERR(EINVAL);
1672 }
1673 sarea_priv = dev_priv->sarea_priv;
1674
1675 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1676
1677 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t *)data,
1678 sizeof(vertex) );
1679
1680 DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
1681 DRM_CURRENTPID,
1682 vertex.idx, vertex.count, vertex.discard );
1683
1684 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
1685 DRM_ERROR( "buffer index %d (of %d max)\n",
1686 vertex.idx, dma->buf_count - 1 );
1687 return DRM_ERR(EINVAL);
1688 }
1689 if ( vertex.prim < 0 ||
1690 vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
1691 DRM_ERROR( "buffer prim %d\n", vertex.prim );
1692 return DRM_ERR(EINVAL);
1693 }
1694
1695 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1696 VB_AGE_TEST_WITH_RETURN( dev_priv );
1697
1698 buf = dma->buflist[vertex.idx];
1699
1700 if ( buf->filp != filp ) {
1701 DRM_ERROR( "process %d using buffer owned by %p\n",
1702 DRM_CURRENTPID, buf->filp );
1703 return DRM_ERR(EINVAL);
1704 }
1705 if ( buf->pending ) {
1706 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
1707 return DRM_ERR(EINVAL);
1708 }
1709
1710 /* Build up a prim_t record:
1711 */
1712 if (vertex.count) {
1713 buf->used = vertex.count; /* not used? */
1714
1715 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
1716 if ( radeon_emit_state( dev_priv, filp_priv,
1717 &sarea_priv->context_state,
1718 sarea_priv->tex_state,
1719 sarea_priv->dirty ) ) {
1720 DRM_ERROR( "radeon_emit_state failed\n" );
1721 return DRM_ERR( EINVAL );
1722 }
1723
1724 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
1725 RADEON_UPLOAD_TEX1IMAGES |
1726 RADEON_UPLOAD_TEX2IMAGES |
1727 RADEON_REQUIRE_QUIESCENCE);
1728 }
1729
1730 prim.start = 0;
1731 prim.finish = vertex.count; /* unused */
1732 prim.prim = vertex.prim;
1733 prim.numverts = vertex.count;
1734 prim.vc_format = dev_priv->sarea_priv->vc_format;
1735
1736 radeon_cp_dispatch_vertex( dev, buf, &prim );
1737 }
1738
1739 if (vertex.discard) {
1740 radeon_cp_discard_buffer( dev, buf );
1741 }
1742
1743 COMMIT_RING();
1744 return 0;
1745 }
1746
1747 int radeon_cp_indices( DRM_IOCTL_ARGS )
1748 {
1749 DRM_DEVICE;
1750 drm_radeon_private_t *dev_priv = dev->dev_private;
1751 drm_file_t *filp_priv;
1752 drm_radeon_sarea_t *sarea_priv;
1753 drm_device_dma_t *dma = dev->dma;
1754 drm_buf_t *buf;
1755 drm_radeon_indices_t elts;
1756 drm_radeon_tcl_prim_t prim;
1757 int count;
1758
1759 LOCK_TEST_WITH_RETURN( dev, filp );
1760
1761 if ( !dev_priv ) {
1762 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1763 return DRM_ERR(EINVAL);
1764 }
1765 sarea_priv = dev_priv->sarea_priv;
1766
1767 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1768
1769 DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t *)data,
1770 sizeof(elts) );
1771
1772 DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n",
1773 DRM_CURRENTPID,
1774 elts.idx, elts.start, elts.end, elts.discard );
1775
1776 if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
1777 DRM_ERROR( "buffer index %d (of %d max)\n",
1778 elts.idx, dma->buf_count - 1 );
1779 return DRM_ERR(EINVAL);
1780 }
1781 if ( elts.prim < 0 ||
1782 elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
1783 DRM_ERROR( "buffer prim %d\n", elts.prim );
1784 return DRM_ERR(EINVAL);
1785 }
1786
1787 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1788 VB_AGE_TEST_WITH_RETURN( dev_priv );
1789
1790 buf = dma->buflist[elts.idx];
1791
1792 if ( buf->filp != filp ) {
1793 DRM_ERROR( "process %d using buffer owned by %p\n",
1794 DRM_CURRENTPID, buf->filp );
1795 return DRM_ERR(EINVAL);
1796 }
1797 if ( buf->pending ) {
1798 DRM_ERROR( "sending pending buffer %d\n", elts.idx );
1799 return DRM_ERR(EINVAL);
1800 }
1801
1802 count = (elts.end - elts.start) / sizeof(u16);
1803 elts.start -= RADEON_INDEX_PRIM_OFFSET;
1804
1805 if ( elts.start & 0x7 ) {
1806 DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
1807 return DRM_ERR(EINVAL);
1808 }
1809 if ( elts.start < buf->used ) {
1810 DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
1811 return DRM_ERR(EINVAL);
1812 }
1813
1814 buf->used = elts.end;
1815
1816 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
1817 if ( radeon_emit_state( dev_priv, filp_priv,
1818 &sarea_priv->context_state,
1819 sarea_priv->tex_state,
1820 sarea_priv->dirty ) ) {
1821 DRM_ERROR( "radeon_emit_state failed\n" );
1822 return DRM_ERR( EINVAL );
1823 }
1824
1825 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
1826 RADEON_UPLOAD_TEX1IMAGES |
1827 RADEON_UPLOAD_TEX2IMAGES |
1828 RADEON_REQUIRE_QUIESCENCE);
1829 }
1830
1831
1832 /* Build up a prim_t record:
1833 */
1834 prim.start = elts.start;
1835 prim.finish = elts.end;
1836 prim.prim = elts.prim;
1837 prim.offset = 0; /* offset from start of dma buffers */
1838 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
1839 prim.vc_format = dev_priv->sarea_priv->vc_format;
1840
1841 radeon_cp_dispatch_indices( dev, buf, &prim );
1842 if (elts.discard) {
1843 radeon_cp_discard_buffer( dev, buf );
1844 }
1845
1846 COMMIT_RING();
1847 return 0;
1848 }
1849
1850 int radeon_cp_texture( DRM_IOCTL_ARGS )
1851 {
1852 DRM_DEVICE;
1853 drm_radeon_private_t *dev_priv = dev->dev_private;
1854 drm_radeon_texture_t tex;
1855 drm_radeon_tex_image_t image;
1856 int ret;
1857
1858 LOCK_TEST_WITH_RETURN( dev, filp );
1859
1860 DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) );
1861
1862 if ( tex.image == NULL ) {
1863 DRM_ERROR( "null texture image!\n" );
1864 return DRM_ERR(EINVAL);
1865 }
1866
1867 if ( DRM_COPY_FROM_USER( &image,
1868 (drm_radeon_tex_image_t *)tex.image,
1869 sizeof(image) ) )
1870 return DRM_ERR(EFAULT);
1871
1872 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1873 VB_AGE_TEST_WITH_RETURN( dev_priv );
1874
1875 ret = radeon_cp_dispatch_texture( filp, dev, &tex, &image );
1876
1877 COMMIT_RING();
1878 return ret;
1879 }
1880
1881 int radeon_cp_stipple( DRM_IOCTL_ARGS )
1882 {
1883 DRM_DEVICE;
1884 drm_radeon_private_t *dev_priv = dev->dev_private;
1885 drm_radeon_stipple_t stipple;
1886 u32 mask[32];
1887
1888 LOCK_TEST_WITH_RETURN( dev, filp );
1889
1890 DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data,
1891 sizeof(stipple) );
1892
1893 if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) )
1894 return DRM_ERR(EFAULT);
1895
1896 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1897
1898 radeon_cp_dispatch_stipple( dev, mask );
1899
1900 COMMIT_RING();
1901 return 0;
1902 }
1903
1904 int radeon_cp_indirect( DRM_IOCTL_ARGS )
1905 {
1906 DRM_DEVICE;
1907 drm_radeon_private_t *dev_priv = dev->dev_private;
1908 drm_device_dma_t *dma = dev->dma;
1909 drm_buf_t *buf;
1910 drm_radeon_indirect_t indirect;
1911 RING_LOCALS;
1912
1913 LOCK_TEST_WITH_RETURN( dev, filp );
1914
1915 if ( !dev_priv ) {
1916 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1917 return DRM_ERR(EINVAL);
1918 }
1919
1920 DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t *)data,
1921 sizeof(indirect) );
1922
1923 DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
1924 indirect.idx, indirect.start,
1925 indirect.end, indirect.discard );
1926
1927 if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
1928 DRM_ERROR( "buffer index %d (of %d max)\n",
1929 indirect.idx, dma->buf_count - 1 );
1930 return DRM_ERR(EINVAL);
1931 }
1932
1933 buf = dma->buflist[indirect.idx];
1934
1935 if ( buf->filp != filp ) {
1936 DRM_ERROR( "process %d using buffer owned by %p\n",
1937 DRM_CURRENTPID, buf->filp );
1938 return DRM_ERR(EINVAL);
1939 }
1940 if ( buf->pending ) {
1941 DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
1942 return DRM_ERR(EINVAL);
1943 }
1944
1945 if ( indirect.start < buf->used ) {
1946 DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
1947 indirect.start, buf->used );
1948 return DRM_ERR(EINVAL);
1949 }
1950
1951 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1952 VB_AGE_TEST_WITH_RETURN( dev_priv );
1953
1954 buf->used = indirect.end;
1955
1956 /* Wait for the 3D stream to idle before the indirect buffer
1957 * containing 2D acceleration commands is processed.
1958 */
1959 BEGIN_RING( 2 );
1960
1961 RADEON_WAIT_UNTIL_3D_IDLE();
1962
1963 ADVANCE_RING();
1964
1965 /* Dispatch the indirect buffer full of commands from the
1966 * X server. This is insecure and is thus only available to
1967 * privileged clients.
1968 */
1969 radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end );
1970 if (indirect.discard) {
1971 radeon_cp_discard_buffer( dev, buf );
1972 }
1973
1974
1975 COMMIT_RING();
1976 return 0;
1977 }
1978
1979 int radeon_cp_vertex2( DRM_IOCTL_ARGS )
1980 {
1981 DRM_DEVICE;
1982 drm_radeon_private_t *dev_priv = dev->dev_private;
1983 drm_file_t *filp_priv;
1984 drm_radeon_sarea_t *sarea_priv;
1985 drm_device_dma_t *dma = dev->dma;
1986 drm_buf_t *buf;
1987 drm_radeon_vertex2_t vertex;
1988 int i;
1989 unsigned char laststate;
1990
1991 LOCK_TEST_WITH_RETURN( dev, filp );
1992
1993 if ( !dev_priv ) {
1994 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1995 return DRM_ERR(EINVAL);
1996 }
1997 sarea_priv = dev_priv->sarea_priv;
1998
1999 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2000
2001 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data,
2002 sizeof(vertex) );
2003
2004 DRM_DEBUG( "pid=%d index=%d discard=%d\n",
2005 DRM_CURRENTPID,
2006 vertex.idx, vertex.discard );
2007
2008 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
2009 DRM_ERROR( "buffer index %d (of %d max)\n",
2010 vertex.idx, dma->buf_count - 1 );
2011 return DRM_ERR(EINVAL);
2012 }
2013
2014 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2015 VB_AGE_TEST_WITH_RETURN( dev_priv );
2016
2017 buf = dma->buflist[vertex.idx];
2018
2019 if ( buf->filp != filp ) {
2020 DRM_ERROR( "process %d using buffer owned by %p\n",
2021 DRM_CURRENTPID, buf->filp );
2022 return DRM_ERR(EINVAL);
2023 }
2024
2025 if ( buf->pending ) {
2026 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
2027 return DRM_ERR(EINVAL);
2028 }
2029
2030 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2031 return DRM_ERR(EINVAL);
2032
2033 for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) {
2034 drm_radeon_prim_t prim;
2035 drm_radeon_tcl_prim_t tclprim;
2036
2037 if ( DRM_COPY_FROM_USER( &prim, &vertex.prim[i], sizeof(prim) ) )
2038 return DRM_ERR(EFAULT);
2039
2040 if ( prim.stateidx != laststate ) {
2041 drm_radeon_state_t state;
2042
2043 if ( DRM_COPY_FROM_USER( &state,
2044 &vertex.state[prim.stateidx],
2045 sizeof(state) ) )
2046 return DRM_ERR(EFAULT);
2047
2048 if ( radeon_emit_state2( dev_priv, filp_priv, &state ) ) {
2049 DRM_ERROR( "radeon_emit_state2 failed\n" );
2050 return DRM_ERR( EINVAL );
2051 }
2052
2053 laststate = prim.stateidx;
2054 }
2055
2056 tclprim.start = prim.start;
2057 tclprim.finish = prim.finish;
2058 tclprim.prim = prim.prim;
2059 tclprim.vc_format = prim.vc_format;
2060
2061 if ( prim.prim & RADEON_PRIM_WALK_IND ) {
2062 tclprim.offset = prim.numverts * 64;
2063 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2064
2065 radeon_cp_dispatch_indices( dev, buf, &tclprim );
2066 } else {
2067 tclprim.numverts = prim.numverts;
2068 tclprim.offset = 0; /* not used */
2069
2070 radeon_cp_dispatch_vertex( dev, buf, &tclprim );
2071 }
2072
2073 if (sarea_priv->nbox == 1)
2074 sarea_priv->nbox = 0;
2075 }
2076
2077 if ( vertex.discard ) {
2078 radeon_cp_discard_buffer( dev, buf );
2079 }
2080
2081 COMMIT_RING();
2082 return 0;
2083 }
2084
2085
2086 static int radeon_emit_packets(
2087 drm_radeon_private_t *dev_priv,
2088 drm_file_t *filp_priv,
2089 drm_radeon_cmd_header_t header,
2090 drm_radeon_cmd_buffer_t *cmdbuf )
2091 {
2092 int id = (int)header.packet.packet_id;
2093 int sz, reg;
2094 int *data = (int *)cmdbuf->buf;
2095 RING_LOCALS;
2096
2097 if (id >= RADEON_MAX_STATE_PACKETS)
2098 return DRM_ERR(EINVAL);
2099
2100 sz = packet[id].len;
2101 reg = packet[id].start;
2102
2103 if (sz * sizeof(int) > cmdbuf->bufsz) {
2104 DRM_ERROR( "Packet size provided larger than data provided\n" );
2105 return DRM_ERR(EINVAL);
2106 }
2107
2108 if ( radeon_check_and_fixup_packets( dev_priv, filp_priv, id, data ) ) {
2109 DRM_ERROR( "Packet verification failed\n" );
2110 return DRM_ERR( EINVAL );
2111 }
2112
2113 BEGIN_RING(sz+1);
2114 OUT_RING( CP_PACKET0( reg, (sz-1) ) );
2115 OUT_RING_USER_TABLE( data, sz );
2116 ADVANCE_RING();
2117
2118 cmdbuf->buf += sz * sizeof(int);
2119 cmdbuf->bufsz -= sz * sizeof(int);
2120 return 0;
2121 }
2122
2123 static __inline__ int radeon_emit_scalars(
2124 drm_radeon_private_t *dev_priv,
2125 drm_radeon_cmd_header_t header,
2126 drm_radeon_cmd_buffer_t *cmdbuf )
2127 {
2128 int sz = header.scalars.count;
2129 int *data = (int *)cmdbuf->buf;
2130 int start = header.scalars.offset;
2131 int stride = header.scalars.stride;
2132 RING_LOCALS;
2133
2134 BEGIN_RING( 3+sz );
2135 OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
2136 OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2137 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
2138 OUT_RING_USER_TABLE( data, sz );
2139 ADVANCE_RING();
2140 cmdbuf->buf += sz * sizeof(int);
2141 cmdbuf->bufsz -= sz * sizeof(int);
2142 return 0;
2143 }
2144
2145 /* God this is ugly
2146 */
2147 static __inline__ int radeon_emit_scalars2(
2148 drm_radeon_private_t *dev_priv,
2149 drm_radeon_cmd_header_t header,
2150 drm_radeon_cmd_buffer_t *cmdbuf )
2151 {
2152 int sz = header.scalars.count;
2153 int *data = (int *)cmdbuf->buf;
2154 int start = ((unsigned int)header.scalars.offset) + 0x100;
2155 int stride = header.scalars.stride;
2156 RING_LOCALS;
2157
2158 BEGIN_RING( 3+sz );
2159 OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
2160 OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2161 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
2162 OUT_RING_USER_TABLE( data, sz );
2163 ADVANCE_RING();
2164 cmdbuf->buf += sz * sizeof(int);
2165 cmdbuf->bufsz -= sz * sizeof(int);
2166 return 0;
2167 }
2168
2169 static __inline__ int radeon_emit_vectors(
2170 drm_radeon_private_t *dev_priv,
2171 drm_radeon_cmd_header_t header,
2172 drm_radeon_cmd_buffer_t *cmdbuf )
2173 {
2174 int sz = header.vectors.count;
2175 int *data = (int *)cmdbuf->buf;
2176 int start = header.vectors.offset;
2177 int stride = header.vectors.stride;
2178 RING_LOCALS;
2179
2180 BEGIN_RING( 3+sz );
2181 OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) );
2182 OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2183 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) );
2184 OUT_RING_USER_TABLE( data, sz );
2185 ADVANCE_RING();
2186
2187 cmdbuf->buf += sz * sizeof(int);
2188 cmdbuf->bufsz -= sz * sizeof(int);
2189 return 0;
2190 }
2191
2192
2193 static int radeon_emit_packet3( drm_device_t *dev,
2194 drm_file_t *filp_priv,
2195 drm_radeon_cmd_buffer_t *cmdbuf )
2196 {
2197 drm_radeon_private_t *dev_priv = dev->dev_private;
2198 unsigned int cmdsz;
2199 int *cmd = (int *)cmdbuf->buf, ret;
2200 RING_LOCALS;
2201
2202 DRM_DEBUG("\n");
2203
2204 if ( ( ret = radeon_check_and_fixup_packet3( dev_priv, filp_priv,
2205 cmdbuf, &cmdsz ) ) ) {
2206 DRM_ERROR( "Packet verification failed\n" );
2207 return ret;
2208 }
2209
2210 BEGIN_RING( cmdsz );
2211 OUT_RING_USER_TABLE( cmd, cmdsz );
2212 ADVANCE_RING();
2213
2214 cmdbuf->buf += cmdsz * 4;
2215 cmdbuf->bufsz -= cmdsz * 4;
2216 return 0;
2217 }
2218
2219
2220 static int radeon_emit_packet3_cliprect( drm_device_t *dev,
2221 drm_file_t *filp_priv,
2222 drm_radeon_cmd_buffer_t *cmdbuf,
2223 int orig_nbox )
2224 {
2225 drm_radeon_private_t *dev_priv = dev->dev_private;
2226 drm_clip_rect_t box;
2227 unsigned int cmdsz;
2228 int *cmd = (int *)cmdbuf->buf, ret;
2229 drm_clip_rect_t *boxes = cmdbuf->boxes;
2230 int i = 0;
2231 RING_LOCALS;
2232
2233 DRM_DEBUG("\n");
2234
2235 if ( ( ret = radeon_check_and_fixup_packet3( dev_priv, filp_priv,
2236 cmdbuf, &cmdsz ) ) ) {
2237 DRM_ERROR( "Packet verification failed\n" );
2238 return ret;
2239 }
2240
2241 if (!orig_nbox)
2242 goto out;
2243
2244 do {
2245 if ( i < cmdbuf->nbox ) {
2246 if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
2247 return DRM_ERR(EFAULT);
2248 /* FIXME The second and subsequent times round
2249 * this loop, send a WAIT_UNTIL_3D_IDLE before
2250 * calling emit_clip_rect(). This fixes a
2251 * lockup on fast machines when sending
2252 * several cliprects with a cmdbuf, as when
2253 * waving a 2D window over a 3D
2254 * window. Something in the commands from user
2255 * space seems to hang the card when they're
2256 * sent several times in a row. That would be
2257 * the correct place to fix it but this works
2258 * around it until I can figure that out - Tim
2259 * Smith */
2260 if ( i ) {
2261 BEGIN_RING( 2 );
2262 RADEON_WAIT_UNTIL_3D_IDLE();
2263 ADVANCE_RING();
2264 }
2265 radeon_emit_clip_rect( dev_priv, &box );
2266 }
2267
2268 BEGIN_RING( cmdsz );
2269 OUT_RING_USER_TABLE( cmd, cmdsz );
2270 ADVANCE_RING();
2271
2272 } while ( ++i < cmdbuf->nbox );
2273 if (cmdbuf->nbox == 1)
2274 cmdbuf->nbox = 0;
2275
2276 out:
2277 cmdbuf->buf += cmdsz * 4;
2278 cmdbuf->bufsz -= cmdsz * 4;
2279 return 0;
2280 }
2281
2282
2283 static int radeon_emit_wait( drm_device_t *dev, int flags )
2284 {
2285 drm_radeon_private_t *dev_priv = dev->dev_private;
2286 RING_LOCALS;
2287
2288 DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
2289 switch (flags) {
2290 case RADEON_WAIT_2D:
2291 BEGIN_RING( 2 );
2292 RADEON_WAIT_UNTIL_2D_IDLE();
2293 ADVANCE_RING();
2294 break;
2295 case RADEON_WAIT_3D:
2296 BEGIN_RING( 2 );
2297 RADEON_WAIT_UNTIL_3D_IDLE();
2298 ADVANCE_RING();
2299 break;
2300 case RADEON_WAIT_2D|RADEON_WAIT_3D:
2301 BEGIN_RING( 2 );
2302 RADEON_WAIT_UNTIL_IDLE();
2303 ADVANCE_RING();
2304 break;
2305 default:
2306 return DRM_ERR(EINVAL);
2307 }
2308
2309 return 0;
2310 }
2311
2312 int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
2313 {
2314 DRM_DEVICE;
2315 drm_radeon_private_t *dev_priv = dev->dev_private;
2316 drm_file_t *filp_priv;
2317 drm_device_dma_t *dma = dev->dma;
2318 drm_buf_t *buf = 0;
2319 int idx;
2320 drm_radeon_cmd_buffer_t cmdbuf;
2321 drm_radeon_cmd_header_t header;
2322 int orig_nbox;
2323
2324 LOCK_TEST_WITH_RETURN( dev, filp );
2325
2326 if ( !dev_priv ) {
2327 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2328 return DRM_ERR(EINVAL);
2329 }
2330
2331 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2332
2333 DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data,
2334 sizeof(cmdbuf) );
2335
2336 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2337 VB_AGE_TEST_WITH_RETURN( dev_priv );
2338
2339
2340 if (DRM_VERIFYAREA_READ( cmdbuf.buf, cmdbuf.bufsz ))
2341 return DRM_ERR(EFAULT);
2342
2343 if (cmdbuf.nbox &&
2344 DRM_VERIFYAREA_READ(cmdbuf.boxes,
2345 cmdbuf.nbox * sizeof(drm_clip_rect_t)))
2346 return DRM_ERR(EFAULT);
2347
2348 orig_nbox = cmdbuf.nbox;
2349
2350 while ( cmdbuf.bufsz >= sizeof(header) ) {
2351
2352 if (DRM_GET_USER_UNCHECKED( header.i, (int *)cmdbuf.buf )) {
2353 DRM_ERROR("__get_user %p\n", cmdbuf.buf);
2354 return DRM_ERR(EFAULT);
2355 }
2356
2357 cmdbuf.buf += sizeof(header);
2358 cmdbuf.bufsz -= sizeof(header);
2359
2360 switch (header.header.cmd_type) {
2361 case RADEON_CMD_PACKET:
2362 DRM_DEBUG("RADEON_CMD_PACKET\n");
2363 if (radeon_emit_packets( dev_priv, filp_priv, header, &cmdbuf )) {
2364 DRM_ERROR("radeon_emit_packets failed\n");
2365 return DRM_ERR(EINVAL);
2366 }
2367 break;
2368
2369 case RADEON_CMD_SCALARS:
2370 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2371 if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) {
2372 DRM_ERROR("radeon_emit_scalars failed\n");
2373 return DRM_ERR(EINVAL);
2374 }
2375 break;
2376
2377 case RADEON_CMD_VECTORS:
2378 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2379 if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) {
2380 DRM_ERROR("radeon_emit_vectors failed\n");
2381 return DRM_ERR(EINVAL);
2382 }
2383 break;
2384
2385 case RADEON_CMD_DMA_DISCARD:
2386 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2387 idx = header.dma.buf_idx;
2388 if ( idx < 0 || idx >= dma->buf_count ) {
2389 DRM_ERROR( "buffer index %d (of %d max)\n",
2390 idx, dma->buf_count - 1 );
2391 return DRM_ERR(EINVAL);
2392 }
2393
2394 buf = dma->buflist[idx];
2395 if ( buf->filp != filp || buf->pending ) {
2396 DRM_ERROR( "bad buffer %p %p %d\n",
2397 buf->filp, filp, buf->pending);
2398 return DRM_ERR(EINVAL);
2399 }
2400
2401 radeon_cp_discard_buffer( dev, buf );
2402 break;
2403
2404 case RADEON_CMD_PACKET3:
2405 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2406 if (radeon_emit_packet3( dev, filp_priv, &cmdbuf )) {
2407 DRM_ERROR("radeon_emit_packet3 failed\n");
2408 return DRM_ERR(EINVAL);
2409 }
2410 break;
2411
2412 case RADEON_CMD_PACKET3_CLIP:
2413 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2414 if (radeon_emit_packet3_cliprect( dev, filp_priv, &cmdbuf, orig_nbox )) {
2415 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2416 return DRM_ERR(EINVAL);
2417 }
2418 break;
2419
2420 case RADEON_CMD_SCALARS2:
2421 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2422 if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) {
2423 DRM_ERROR("radeon_emit_scalars2 failed\n");
2424 return DRM_ERR(EINVAL);
2425 }
2426 break;
2427
2428 case RADEON_CMD_WAIT:
2429 DRM_DEBUG("RADEON_CMD_WAIT\n");
2430 if (radeon_emit_wait( dev, header.wait.flags )) {
2431 DRM_ERROR("radeon_emit_wait failed\n");
2432 return DRM_ERR(EINVAL);
2433 }
2434 break;
2435 default:
2436 DRM_ERROR("bad cmd_type %d at %p\n",
2437 header.header.cmd_type,
2438 cmdbuf.buf - sizeof(header));
2439 return DRM_ERR(EINVAL);
2440 }
2441 }
2442
2443
2444 DRM_DEBUG("DONE\n");
2445 COMMIT_RING();
2446 return 0;
2447 }
2448
2449
2450
2451 int radeon_cp_getparam( DRM_IOCTL_ARGS )
2452 {
2453 DRM_DEVICE;
2454 drm_radeon_private_t *dev_priv = dev->dev_private;
2455 drm_radeon_getparam_t param;
2456 int value;
2457
2458 if ( !dev_priv ) {
2459 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2460 return DRM_ERR(EINVAL);
2461 }
2462
2463 DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data,
2464 sizeof(param) );
2465
2466 DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
2467
2468 switch( param.param ) {
2469 case RADEON_PARAM_GART_BUFFER_OFFSET:
2470 value = dev_priv->gart_buffers_offset;
2471 break;
2472 case RADEON_PARAM_LAST_FRAME:
2473 dev_priv->stats.last_frame_reads++;
2474 value = GET_SCRATCH( 0 );
2475 break;
2476 case RADEON_PARAM_LAST_DISPATCH:
2477 value = GET_SCRATCH( 1 );
2478 break;
2479 case RADEON_PARAM_LAST_CLEAR:
2480 dev_priv->stats.last_clear_reads++;
2481 value = GET_SCRATCH( 2 );
2482 break;
2483 case RADEON_PARAM_IRQ_NR:
2484 value = dev->irq;
2485 break;
2486 case RADEON_PARAM_GART_BASE:
2487 value = dev_priv->gart_vm_start;
2488 break;
2489 case RADEON_PARAM_REGISTER_HANDLE:
2490 value = dev_priv->mmio_offset;
2491 break;
2492 case RADEON_PARAM_STATUS_HANDLE:
2493 value = dev_priv->ring_rptr_offset;
2494 break;
2495 #if BITS_PER_LONG == 32
2496 /*
2497 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
2498 * pointer which can't fit into an int-sized variable. According to
2499 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
2500 * not supporting it shouldn't be a problem. If the same functionality
2501 * is needed on 64-bit platforms, a new ioctl() would have to be added,
2502 * so backwards-compatibility for the embedded platforms can be
2503 * maintained. --davidm 4-Feb-2004.
2504 */
2505 case RADEON_PARAM_SAREA_HANDLE:
2506 /* The lock is the first dword in the sarea. */
2507 value = (long)dev->lock.hw_lock;
2508 break;
2509 #endif
2510 case RADEON_PARAM_GART_TEX_HANDLE:
2511 value = dev_priv->gart_textures_offset;
2512 break;
2513 default:
2514 return DRM_ERR(EINVAL);
2515 }
2516
2517 if ( DRM_COPY_TO_USER( param.value, &value, sizeof(&value) ) ) {
2518 DRM_ERROR( "copy_to_user\n" );
2519 return DRM_ERR(EFAULT);
2520 }
2521
2522 return 0;
2523 }
2524
2525 int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
2526 DRM_DEVICE;
2527 drm_radeon_private_t *dev_priv = dev->dev_private;
2528 drm_file_t *filp_priv;
2529 drm_radeon_setparam_t sp;
2530
2531 if ( !dev_priv ) {
2532 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2533 return DRM_ERR( EINVAL );
2534 }
2535
2536 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2537
2538 DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t* )data,
2539 sizeof( sp ) );
2540
2541 switch( sp.param ) {
2542 case RADEON_SETPARAM_FB_LOCATION:
2543 filp_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
2544 break;
2545 default:
2546 DRM_DEBUG( "Invalid parameter %d\n", sp.param );
2547 return DRM_ERR( EINVAL );
2548 }
2549
2550 return 0;
2551 }
Cache object: 340f1bd273ba20ec13a12d4bbcebed2b
|