The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/drm/via_verifier.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright 2004 The Unichrome Project. All Rights Reserved.
    3  * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
    4  *
    5  * Permission is hereby granted, free of charge, to any person obtaining a
    6  * copy of this software and associated documentation files (the "Software"),
    7  * to deal in the Software without restriction, including without limitation
    8  * the rights to use, copy, modify, merge, publish, distribute, sub license,
    9  * and/or sell copies of the Software, and to permit persons to whom the
   10  * Software is furnished to do so, subject to the following conditions:
   11  *
   12  * The above copyright notice and this permission notice (including the
   13  * next paragraph) shall be included in all copies or substantial portions
   14  * of the Software.
   15  *
   16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
   19  * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
   20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
   21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
   22  * DEALINGS IN THE SOFTWARE.
   23  *
   24  * Author: Thomas Hellstrom 2004, 2005.
   25  * This code was written using docs obtained under NDA from VIA Inc.
   26  *
   27  * Don't run this code directly on an AGP buffer. Due to cache problems it will
   28  * be very slow.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD: releng/9.0/sys/dev/drm/via_verifier.c 203288 2010-01-31 14:30:39Z rnoland $");
   33 
   34 #include "dev/drm/via_3d_reg.h"
   35 #include "dev/drm/drmP.h"
   36 #include "dev/drm/drm.h"
   37 #include "dev/drm/via_drm.h"
   38 #include "dev/drm/via_verifier.h"
   39 #include "dev/drm/via_drv.h"
   40 
   41 typedef enum {
   42         state_command,
   43         state_header2,
   44         state_header1,
   45         state_vheader5,
   46         state_vheader6,
   47         state_error
   48 } verifier_state_t;
   49 
   50 typedef enum {
   51         no_check = 0,
   52         check_for_header2,
   53         check_for_header1,
   54         check_for_header2_err,
   55         check_for_header1_err,
   56         check_for_fire,
   57         check_z_buffer_addr0,
   58         check_z_buffer_addr1,
   59         check_z_buffer_addr_mode,
   60         check_destination_addr0,
   61         check_destination_addr1,
   62         check_destination_addr_mode,
   63         check_for_dummy,
   64         check_for_dd,
   65         check_texture_addr0,
   66         check_texture_addr1,
   67         check_texture_addr2,
   68         check_texture_addr3,
   69         check_texture_addr4,
   70         check_texture_addr5,
   71         check_texture_addr6,
   72         check_texture_addr7,
   73         check_texture_addr8,
   74         check_texture_addr_mode,
   75         check_for_vertex_count,
   76         check_number_texunits,
   77         forbidden_command
   78 } hazard_t;
   79 
   80 /*
   81  * Associates each hazard above with a possible multi-command
   82  * sequence. For example an address that is split over multiple
   83  * commands and that needs to be checked at the first command
   84  * that does not include any part of the address.
   85  */
   86 
   87 static drm_via_sequence_t seqs[] = {
   88         no_sequence,
   89         no_sequence,
   90         no_sequence,
   91         no_sequence,
   92         no_sequence,
   93         no_sequence,
   94         z_address,
   95         z_address,
   96         z_address,
   97         dest_address,
   98         dest_address,
   99         dest_address,
  100         no_sequence,
  101         no_sequence,
  102         tex_address,
  103         tex_address,
  104         tex_address,
  105         tex_address,
  106         tex_address,
  107         tex_address,
  108         tex_address,
  109         tex_address,
  110         tex_address,
  111         tex_address,
  112         no_sequence
  113 };
  114 
  115 typedef struct {
  116         unsigned int code;
  117         hazard_t hz;
  118 } hz_init_t;
  119 
  120 static hz_init_t init_table1[] = {
  121         {0xf2, check_for_header2_err},
  122         {0xf0, check_for_header1_err},
  123         {0xee, check_for_fire},
  124         {0xcc, check_for_dummy},
  125         {0xdd, check_for_dd},
  126         {0x00, no_check},
  127         {0x10, check_z_buffer_addr0},
  128         {0x11, check_z_buffer_addr1},
  129         {0x12, check_z_buffer_addr_mode},
  130         {0x13, no_check},
  131         {0x14, no_check},
  132         {0x15, no_check},
  133         {0x23, no_check},
  134         {0x24, no_check},
  135         {0x33, no_check},
  136         {0x34, no_check},
  137         {0x35, no_check},
  138         {0x36, no_check},
  139         {0x37, no_check},
  140         {0x38, no_check},
  141         {0x39, no_check},
  142         {0x3A, no_check},
  143         {0x3B, no_check},
  144         {0x3C, no_check},
  145         {0x3D, no_check},
  146         {0x3E, no_check},
  147         {0x40, check_destination_addr0},
  148         {0x41, check_destination_addr1},
  149         {0x42, check_destination_addr_mode},
  150         {0x43, no_check},
  151         {0x44, no_check},
  152         {0x50, no_check},
  153         {0x51, no_check},
  154         {0x52, no_check},
  155         {0x53, no_check},
  156         {0x54, no_check},
  157         {0x55, no_check},
  158         {0x56, no_check},
  159         {0x57, no_check},
  160         {0x58, no_check},
  161         {0x70, no_check},
  162         {0x71, no_check},
  163         {0x78, no_check},
  164         {0x79, no_check},
  165         {0x7A, no_check},
  166         {0x7B, no_check},
  167         {0x7C, no_check},
  168         {0x7D, check_for_vertex_count}
  169 };
  170 
  171 static hz_init_t init_table2[] = {
  172         {0xf2, check_for_header2_err},
  173         {0xf0, check_for_header1_err},
  174         {0xee, check_for_fire},
  175         {0xcc, check_for_dummy},
  176         {0x00, check_texture_addr0},
  177         {0x01, check_texture_addr0},
  178         {0x02, check_texture_addr0},
  179         {0x03, check_texture_addr0},
  180         {0x04, check_texture_addr0},
  181         {0x05, check_texture_addr0},
  182         {0x06, check_texture_addr0},
  183         {0x07, check_texture_addr0},
  184         {0x08, check_texture_addr0},
  185         {0x09, check_texture_addr0},
  186         {0x20, check_texture_addr1},
  187         {0x21, check_texture_addr1},
  188         {0x22, check_texture_addr1},
  189         {0x23, check_texture_addr4},
  190         {0x2B, check_texture_addr3},
  191         {0x2C, check_texture_addr3},
  192         {0x2D, check_texture_addr3},
  193         {0x2E, check_texture_addr3},
  194         {0x2F, check_texture_addr3},
  195         {0x30, check_texture_addr3},
  196         {0x31, check_texture_addr3},
  197         {0x32, check_texture_addr3},
  198         {0x33, check_texture_addr3},
  199         {0x34, check_texture_addr3},
  200         {0x4B, check_texture_addr5},
  201         {0x4C, check_texture_addr6},
  202         {0x51, check_texture_addr7},
  203         {0x52, check_texture_addr8},
  204         {0x77, check_texture_addr2},
  205         {0x78, no_check},
  206         {0x79, no_check},
  207         {0x7A, no_check},
  208         {0x7B, check_texture_addr_mode},
  209         {0x7C, no_check},
  210         {0x7D, no_check},
  211         {0x7E, no_check},
  212         {0x7F, no_check},
  213         {0x80, no_check},
  214         {0x81, no_check},
  215         {0x82, no_check},
  216         {0x83, no_check},
  217         {0x85, no_check},
  218         {0x86, no_check},
  219         {0x87, no_check},
  220         {0x88, no_check},
  221         {0x89, no_check},
  222         {0x8A, no_check},
  223         {0x90, no_check},
  224         {0x91, no_check},
  225         {0x92, no_check},
  226         {0x93, no_check}
  227 };
  228 
  229 static hz_init_t init_table3[] = {
  230         {0xf2, check_for_header2_err},
  231         {0xf0, check_for_header1_err},
  232         {0xcc, check_for_dummy},
  233         {0x00, check_number_texunits}
  234 };
  235 
  236 static hazard_t table1[256];
  237 static hazard_t table2[256];
  238 static hazard_t table3[256];
  239 
  240 static __inline__ int
  241 eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
  242 {
  243         if ((buf_end - *buf) >= num_words) {
  244                 *buf += num_words;
  245                 return 0;
  246         }
  247         DRM_ERROR("Illegal termination of DMA command buffer\n");
  248         return 1;
  249 }
  250 
  251 /*
  252  * Partially stolen from drm_memory.h
  253  */
  254 
  255 static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
  256                                                     unsigned long offset,
  257                                                     unsigned long size,
  258                                                     struct drm_device * dev)
  259 {
  260         drm_local_map_t *map = seq->map_cache;
  261 
  262         if (map && map->offset <= offset
  263             && (offset + size) <= (map->offset + map->size)) {
  264                 return map;
  265         }
  266 
  267         TAILQ_FOREACH(map, &dev->maplist, link) {
  268                 if (map->offset <= offset
  269                     && (offset + size) <= (map->offset + map->size)
  270                     && !(map->flags & _DRM_RESTRICTED)
  271                     && (map->type == _DRM_AGP)) {
  272                         seq->map_cache = map;
  273                         return map;
  274                 }
  275         }
  276         return NULL;
  277 }
  278 
  279 /*
  280  * Require that all AGP texture levels reside in the same AGP map which should
  281  * be mappable by the client. This is not a big restriction.
  282  * FIXME: To actually enforce this security policy strictly, drm_rmmap
  283  * would have to wait for dma quiescent before removing an AGP map.
  284  * The via_drm_lookup_agp_map call in reality seems to take
  285  * very little CPU time.
  286  */
  287 
  288 static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
  289 {
  290         switch (cur_seq->unfinished) {
  291         case z_address:
  292                 DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
  293                 break;
  294         case dest_address:
  295                 DRM_DEBUG("Destination start address is 0x%x\n",
  296                           cur_seq->d_addr);
  297                 break;
  298         case tex_address:
  299                 if (cur_seq->agp_texture) {
  300                         unsigned start =
  301                             cur_seq->tex_level_lo[cur_seq->texture];
  302                         unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
  303                         unsigned long lo = ~0, hi = 0, tmp;
  304                         uint32_t *addr, *pitch, *height, tex;
  305                         unsigned i;
  306                         int npot;
  307 
  308                         if (end > 9)
  309                                 end = 9;
  310                         if (start > 9)
  311                                 start = 9;
  312 
  313                         addr =
  314                             &(cur_seq->t_addr[tex = cur_seq->texture][start]);
  315                         pitch = &(cur_seq->pitch[tex][start]);
  316                         height = &(cur_seq->height[tex][start]);
  317                         npot = cur_seq->tex_npot[tex];
  318                         for (i = start; i <= end; ++i) {
  319                                 tmp = *addr++;
  320                                 if (tmp < lo)
  321                                         lo = tmp;
  322                                 if (i == 0 && npot)
  323                                         tmp += (*height++ * *pitch++);
  324                                 else
  325                                         tmp += (*height++ << *pitch++);
  326                                 if (tmp > hi)
  327                                         hi = tmp;
  328                         }
  329 
  330                         if (!via_drm_lookup_agp_map
  331                             (cur_seq, lo, hi - lo, cur_seq->dev)) {
  332                                 DRM_ERROR
  333                                     ("AGP texture is not in allowed map\n");
  334                                 return 2;
  335                         }
  336                 }
  337                 break;
  338         default:
  339                 break;
  340         }
  341         cur_seq->unfinished = no_sequence;
  342         return 0;
  343 }
  344 
  345 static __inline__ int
  346 investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
  347 {
  348         register uint32_t tmp, *tmp_addr;
  349 
  350         if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
  351                 int ret;
  352                 if ((ret = finish_current_sequence(cur_seq)))
  353                         return ret;
  354         }
  355 
  356         switch (hz) {
  357         case check_for_header2:
  358                 if (cmd == HALCYON_HEADER2)
  359                         return 1;
  360                 return 0;
  361         case check_for_header1:
  362                 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
  363                         return 1;
  364                 return 0;
  365         case check_for_header2_err:
  366                 if (cmd == HALCYON_HEADER2)
  367                         return 1;
  368                 DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
  369                 break;
  370         case check_for_header1_err:
  371                 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
  372                         return 1;
  373                 DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
  374                 break;
  375         case check_for_fire:
  376                 if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
  377                         return 1;
  378                 DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
  379                 break;
  380         case check_for_dummy:
  381                 if (HC_DUMMY == cmd)
  382                         return 0;
  383                 DRM_ERROR("Illegal DMA HC_DUMMY command\n");
  384                 break;
  385         case check_for_dd:
  386                 if (0xdddddddd == cmd)
  387                         return 0;
  388                 DRM_ERROR("Illegal DMA 0xdddddddd command\n");
  389                 break;
  390         case check_z_buffer_addr0:
  391                 cur_seq->unfinished = z_address;
  392                 cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
  393                     (cmd & 0x00FFFFFF);
  394                 return 0;
  395         case check_z_buffer_addr1:
  396                 cur_seq->unfinished = z_address;
  397                 cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
  398                     ((cmd & 0xFF) << 24);
  399                 return 0;
  400         case check_z_buffer_addr_mode:
  401                 cur_seq->unfinished = z_address;
  402                 if ((cmd & 0x0000C000) == 0)
  403                         return 0;
  404                 DRM_ERROR("Attempt to place Z buffer in system memory\n");
  405                 return 2;
  406         case check_destination_addr0:
  407                 cur_seq->unfinished = dest_address;
  408                 cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
  409                     (cmd & 0x00FFFFFF);
  410                 return 0;
  411         case check_destination_addr1:
  412                 cur_seq->unfinished = dest_address;
  413                 cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
  414                     ((cmd & 0xFF) << 24);
  415                 return 0;
  416         case check_destination_addr_mode:
  417                 cur_seq->unfinished = dest_address;
  418                 if ((cmd & 0x0000C000) == 0)
  419                         return 0;
  420                 DRM_ERROR
  421                     ("Attempt to place 3D drawing buffer in system memory\n");
  422                 return 2;
  423         case check_texture_addr0:
  424                 cur_seq->unfinished = tex_address;
  425                 tmp = (cmd >> 24);
  426                 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
  427                 *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
  428                 return 0;
  429         case check_texture_addr1:
  430                 cur_seq->unfinished = tex_address;
  431                 tmp = ((cmd >> 24) - 0x20);
  432                 tmp += tmp << 1;
  433                 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
  434                 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
  435                 tmp_addr++;
  436                 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
  437                 tmp_addr++;
  438                 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
  439                 return 0;
  440         case check_texture_addr2:
  441                 cur_seq->unfinished = tex_address;
  442                 cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
  443                 cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
  444                 return 0;
  445         case check_texture_addr3:
  446                 cur_seq->unfinished = tex_address;
  447                 tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
  448                 if (tmp == 0 &&
  449                     (cmd & HC_HTXnEnPit_MASK)) {
  450                         cur_seq->pitch[cur_seq->texture][tmp] =
  451                                 (cmd & HC_HTXnLnPit_MASK);
  452                         cur_seq->tex_npot[cur_seq->texture] = 1;
  453                 } else {
  454                         cur_seq->pitch[cur_seq->texture][tmp] =
  455                                 (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
  456                         cur_seq->tex_npot[cur_seq->texture] = 0;
  457                         if (cmd & 0x000FFFFF) {
  458                                 DRM_ERROR
  459                                         ("Unimplemented texture level 0 pitch mode.\n");
  460                                 return 2;
  461                         }
  462                 }
  463                 return 0;
  464         case check_texture_addr4:
  465                 cur_seq->unfinished = tex_address;
  466                 tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
  467                 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
  468                 return 0;
  469         case check_texture_addr5:
  470         case check_texture_addr6:
  471                 cur_seq->unfinished = tex_address;
  472                 /*
  473                  * Texture width. We don't care since we have the pitch.
  474                  */
  475                 return 0;
  476         case check_texture_addr7:
  477                 cur_seq->unfinished = tex_address;
  478                 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
  479                 tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
  480                 tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
  481                 tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
  482                 tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
  483                 tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
  484                 tmp_addr[0] = 1 << (cmd & 0x0000000F);
  485                 return 0;
  486         case check_texture_addr8:
  487                 cur_seq->unfinished = tex_address;
  488                 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
  489                 tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
  490                 tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
  491                 tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
  492                 tmp_addr[6] = 1 << (cmd & 0x0000000F);
  493                 return 0;
  494         case check_texture_addr_mode:
  495                 cur_seq->unfinished = tex_address;
  496                 if (2 == (tmp = cmd & 0x00000003)) {
  497                         DRM_ERROR
  498                             ("Attempt to fetch texture from system memory.\n");
  499                         return 2;
  500                 }
  501                 cur_seq->agp_texture = (tmp == 3);
  502                 cur_seq->tex_palette_size[cur_seq->texture] =
  503                     (cmd >> 16) & 0x000000007;
  504                 return 0;
  505         case check_for_vertex_count:
  506                 cur_seq->vertex_count = cmd & 0x0000FFFF;
  507                 return 0;
  508         case check_number_texunits:
  509                 cur_seq->multitex = (cmd >> 3) & 1;
  510                 return 0;
  511         default:
  512                 DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
  513                 return 2;
  514         }
  515         return 2;
  516 }
  517 
  518 static __inline__ int
  519 via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
  520                     drm_via_state_t * cur_seq)
  521 {
  522         drm_via_private_t *dev_priv =
  523             (drm_via_private_t *) cur_seq->dev->dev_private;
  524         uint32_t a_fire, bcmd, dw_count;
  525         int ret = 0;
  526         int have_fire;
  527         const uint32_t *buf = *buffer;
  528 
  529         while (buf < buf_end) {
  530                 have_fire = 0;
  531                 if ((buf_end - buf) < 2) {
  532                         DRM_ERROR
  533                             ("Unexpected termination of primitive list.\n");
  534                         ret = 1;
  535                         break;
  536                 }
  537                 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
  538                         break;
  539                 bcmd = *buf++;
  540                 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
  541                         DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
  542                                   *buf);
  543                         ret = 1;
  544                         break;
  545                 }
  546                 a_fire =
  547                     *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
  548                     HC_HE3Fire_MASK;
  549 
  550                 /*
  551                  * How many dwords per vertex ?
  552                  */
  553 
  554                 if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
  555                         DRM_ERROR("Illegal B command vertex data for AGP.\n");
  556                         ret = 1;
  557                         break;
  558                 }
  559 
  560                 dw_count = 0;
  561                 if (bcmd & (1 << 7))
  562                         dw_count += (cur_seq->multitex) ? 2 : 1;
  563                 if (bcmd & (1 << 8))
  564                         dw_count += (cur_seq->multitex) ? 2 : 1;
  565                 if (bcmd & (1 << 9))
  566                         dw_count++;
  567                 if (bcmd & (1 << 10))
  568                         dw_count++;
  569                 if (bcmd & (1 << 11))
  570                         dw_count++;
  571                 if (bcmd & (1 << 12))
  572                         dw_count++;
  573                 if (bcmd & (1 << 13))
  574                         dw_count++;
  575                 if (bcmd & (1 << 14))
  576                         dw_count++;
  577 
  578                 while (buf < buf_end) {
  579                         if (*buf == a_fire) {
  580                                 if (dev_priv->num_fire_offsets >=
  581                                     VIA_FIRE_BUF_SIZE) {
  582                                         DRM_ERROR("Fire offset buffer full.\n");
  583                                         ret = 1;
  584                                         break;
  585                                 }
  586                                 dev_priv->fire_offsets[dev_priv->
  587                                                        num_fire_offsets++] =
  588                                     buf;
  589                                 have_fire = 1;
  590                                 buf++;
  591                                 if (buf < buf_end && *buf == a_fire)
  592                                         buf++;
  593                                 break;
  594                         }
  595                         if ((*buf == HALCYON_HEADER2) ||
  596                             ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
  597                                 DRM_ERROR("Missing Vertex Fire command, "
  598                                           "Stray Vertex Fire command  or verifier "
  599                                           "lost sync.\n");
  600                                 ret = 1;
  601                                 break;
  602                         }
  603                         if ((ret = eat_words(&buf, buf_end, dw_count)))
  604                                 break;
  605                 }
  606                 if (buf >= buf_end && !have_fire) {
  607                         DRM_ERROR("Missing Vertex Fire command or verifier "
  608                                   "lost sync.\n");
  609                         ret = 1;
  610                         break;
  611                 }
  612                 if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
  613                         DRM_ERROR("AGP Primitive list end misaligned.\n");
  614                         ret = 1;
  615                         break;
  616                 }
  617         }
  618         *buffer = buf;
  619         return ret;
  620 }
  621 
  622 static __inline__ verifier_state_t
  623 via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
  624                   drm_via_state_t * hc_state)
  625 {
  626         uint32_t cmd;
  627         int hz_mode;
  628         hazard_t hz;
  629         const uint32_t *buf = *buffer;
  630         const hazard_t *hz_table;
  631 
  632         if ((buf_end - buf) < 2) {
  633                 DRM_ERROR
  634                     ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
  635                 return state_error;
  636         }
  637         buf++;
  638         cmd = (*buf++ & 0xFFFF0000) >> 16;
  639 
  640         switch (cmd) {
  641         case HC_ParaType_CmdVdata:
  642                 if (via_check_prim_list(&buf, buf_end, hc_state))
  643                         return state_error;
  644                 *buffer = buf;
  645                 return state_command;
  646         case HC_ParaType_NotTex:
  647                 hz_table = table1;
  648                 break;
  649         case HC_ParaType_Tex:
  650                 hc_state->texture = 0;
  651                 hz_table = table2;
  652                 break;
  653         case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
  654                 hc_state->texture = 1;
  655                 hz_table = table2;
  656                 break;
  657         case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
  658                 hz_table = table3;
  659                 break;
  660         case HC_ParaType_Auto:
  661                 if (eat_words(&buf, buf_end, 2))
  662                         return state_error;
  663                 *buffer = buf;
  664                 return state_command;
  665         case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
  666                 if (eat_words(&buf, buf_end, 32))
  667                         return state_error;
  668                 *buffer = buf;
  669                 return state_command;
  670         case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
  671         case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
  672                 DRM_ERROR("Texture palettes are rejected because of "
  673                           "lack of info how to determine their size.\n");
  674                 return state_error;
  675         case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
  676                 DRM_ERROR("Fog factor palettes are rejected because of "
  677                           "lack of info how to determine their size.\n");
  678                 return state_error;
  679         default:
  680 
  681                 /*
  682                  * There are some unimplemented HC_ParaTypes here, that
  683                  * need to be implemented if the Mesa driver is extended.
  684                  */
  685 
  686                 DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
  687                           "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
  688                           cmd, *(buf - 2));
  689                 *buffer = buf;
  690                 return state_error;
  691         }
  692 
  693         while (buf < buf_end) {
  694                 cmd = *buf++;
  695                 if ((hz = hz_table[cmd >> 24])) {
  696                         if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
  697                                 if (hz_mode == 1) {
  698                                         buf--;
  699                                         break;
  700                                 }
  701                                 return state_error;
  702                         }
  703                 } else if (hc_state->unfinished &&
  704                            finish_current_sequence(hc_state)) {
  705                         return state_error;
  706                 }
  707         }
  708         if (hc_state->unfinished && finish_current_sequence(hc_state)) {
  709                 return state_error;
  710         }
  711         *buffer = buf;
  712         return state_command;
  713 }
  714 
  715 static __inline__ verifier_state_t
  716 via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
  717                   const uint32_t * buf_end, int *fire_count)
  718 {
  719         uint32_t cmd;
  720         const uint32_t *buf = *buffer;
  721         const uint32_t *next_fire;
  722         int burst = 0;
  723 
  724         next_fire = dev_priv->fire_offsets[*fire_count];
  725         buf++;
  726         cmd = (*buf & 0xFFFF0000) >> 16;
  727         VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
  728         switch (cmd) {
  729         case HC_ParaType_CmdVdata:
  730                 while ((buf < buf_end) &&
  731                        (*fire_count < dev_priv->num_fire_offsets) &&
  732                        (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
  733                         while (buf <= next_fire) {
  734                                 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
  735                                           (burst & 63), *buf++);
  736                                 burst += 4;
  737                         }
  738                         if ((buf < buf_end)
  739                             && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
  740                                 buf++;
  741 
  742                         if (++(*fire_count) < dev_priv->num_fire_offsets)
  743                                 next_fire = dev_priv->fire_offsets[*fire_count];
  744                 }
  745                 break;
  746         default:
  747                 while (buf < buf_end) {
  748 
  749                         if (*buf == HC_HEADER2 ||
  750                             (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
  751                             (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
  752                             (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
  753                                 break;
  754 
  755                         VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
  756                                   (burst & 63), *buf++);
  757                         burst += 4;
  758                 }
  759         }
  760         *buffer = buf;
  761         return state_command;
  762 }
  763 
  764 static __inline__ int verify_mmio_address(uint32_t address)
  765 {
  766         if ((address > 0x3FF) && (address < 0xC00)) {
  767                 DRM_ERROR("Invalid VIDEO DMA command. "
  768                           "Attempt to access 3D- or command burst area.\n");
  769                 return 1;
  770         } else if ((address > 0xCFF) && (address < 0x1300)) {
  771                 DRM_ERROR("Invalid VIDEO DMA command. "
  772                           "Attempt to access PCI DMA area.\n");
  773                 return 1;
  774         } else if (address > 0x13FF) {
  775                 DRM_ERROR("Invalid VIDEO DMA command. "
  776                           "Attempt to access VGA registers.\n");
  777                 return 1;
  778         }
  779         return 0;
  780 }
  781 
  782 static __inline__ int
  783 verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
  784                   uint32_t dwords)
  785 {
  786         const uint32_t *buf = *buffer;
  787 
  788         if (buf_end - buf < dwords) {
  789                 DRM_ERROR("Illegal termination of video command.\n");
  790                 return 1;
  791         }
  792         while (dwords--) {
  793                 if (*buf++) {
  794                         DRM_ERROR("Illegal video command tail.\n");
  795                         return 1;
  796                 }
  797         }
  798         *buffer = buf;
  799         return 0;
  800 }
  801 
  802 static __inline__ verifier_state_t
  803 via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
  804 {
  805         uint32_t cmd;
  806         const uint32_t *buf = *buffer;
  807         verifier_state_t ret = state_command;
  808 
  809         while (buf < buf_end) {
  810                 cmd = *buf;
  811                 if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
  812                     (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
  813                         if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
  814                                 break;
  815                         DRM_ERROR("Invalid HALCYON_HEADER1 command. "
  816                                   "Attempt to access 3D- or command burst area.\n");
  817                         ret = state_error;
  818                         break;
  819                 } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
  820                         if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
  821                                 break;
  822                         DRM_ERROR("Invalid HALCYON_HEADER1 command. "
  823                                   "Attempt to access VGA registers.\n");
  824                         ret = state_error;
  825                         break;
  826                 } else {
  827                         buf += 2;
  828                 }
  829         }
  830         *buffer = buf;
  831         return ret;
  832 }
  833 
  834 static __inline__ verifier_state_t
  835 via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
  836                   const uint32_t * buf_end)
  837 {
  838         register uint32_t cmd;
  839         const uint32_t *buf = *buffer;
  840 
  841         while (buf < buf_end) {
  842                 cmd = *buf;
  843                 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
  844                         break;
  845                 VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
  846                 buf++;
  847         }
  848         *buffer = buf;
  849         return state_command;
  850 }
  851 
  852 static __inline__ verifier_state_t
  853 via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
  854 {
  855         uint32_t data;
  856         const uint32_t *buf = *buffer;
  857 
  858         if (buf_end - buf < 4) {
  859                 DRM_ERROR("Illegal termination of video header5 command\n");
  860                 return state_error;
  861         }
  862 
  863         data = *buf++ & ~VIA_VIDEOMASK;
  864         if (verify_mmio_address(data))
  865                 return state_error;
  866 
  867         data = *buf++;
  868         if (*buf++ != 0x00F50000) {
  869                 DRM_ERROR("Illegal header5 header data\n");
  870                 return state_error;
  871         }
  872         if (*buf++ != 0x00000000) {
  873                 DRM_ERROR("Illegal header5 header data\n");
  874                 return state_error;
  875         }
  876         if (eat_words(&buf, buf_end, data))
  877                 return state_error;
  878         if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
  879                 return state_error;
  880         *buffer = buf;
  881         return state_command;
  882 
  883 }
  884 
  885 static __inline__ verifier_state_t
  886 via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
  887                    const uint32_t * buf_end)
  888 {
  889         uint32_t addr, count, i;
  890         const uint32_t *buf = *buffer;
  891 
  892         addr = *buf++ & ~VIA_VIDEOMASK;
  893         i = count = *buf;
  894         buf += 3;
  895         while (i--) {
  896                 VIA_WRITE(addr, *buf++);
  897         }
  898         if (count & 3)
  899                 buf += 4 - (count & 3);
  900         *buffer = buf;
  901         return state_command;
  902 }
  903 
  904 static __inline__ verifier_state_t
  905 via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
  906 {
  907         uint32_t data;
  908         const uint32_t *buf = *buffer;
  909         uint32_t i;
  910 
  911         if (buf_end - buf < 4) {
  912                 DRM_ERROR("Illegal termination of video header6 command\n");
  913                 return state_error;
  914         }
  915         buf++;
  916         data = *buf++;
  917         if (*buf++ != 0x00F60000) {
  918                 DRM_ERROR("Illegal header6 header data\n");
  919                 return state_error;
  920         }
  921         if (*buf++ != 0x00000000) {
  922                 DRM_ERROR("Illegal header6 header data\n");
  923                 return state_error;
  924         }
  925         if ((buf_end - buf) < (data << 1)) {
  926                 DRM_ERROR("Illegal termination of video header6 command\n");
  927                 return state_error;
  928         }
  929         for (i = 0; i < data; ++i) {
  930                 if (verify_mmio_address(*buf++))
  931                         return state_error;
  932                 buf++;
  933         }
  934         data <<= 1;
  935         if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
  936                 return state_error;
  937         *buffer = buf;
  938         return state_command;
  939 }
  940 
  941 static __inline__ verifier_state_t
  942 via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
  943                    const uint32_t * buf_end)
  944 {
  945 
  946         uint32_t addr, count, i;
  947         const uint32_t *buf = *buffer;
  948 
  949         i = count = *++buf;
  950         buf += 3;
  951         while (i--) {
  952                 addr = *buf++;
  953                 VIA_WRITE(addr, *buf++);
  954         }
  955         count <<= 1;
  956         if (count & 3)
  957                 buf += 4 - (count & 3);
  958         *buffer = buf;
  959         return state_command;
  960 }
  961 
  962 int
  963 via_verify_command_stream(const uint32_t * buf, unsigned int size,
  964                           struct drm_device * dev, int agp)
  965 {
  966 
  967         drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  968         drm_via_state_t *hc_state = &dev_priv->hc_state;
  969         drm_via_state_t saved_state = *hc_state;
  970         uint32_t cmd;
  971         const uint32_t *buf_end = buf + (size >> 2);
  972         verifier_state_t state = state_command;
  973         int cme_video;
  974         int supported_3d;
  975 
  976         cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
  977                      dev_priv->chipset == VIA_DX9_0);
  978 
  979         supported_3d = dev_priv->chipset != VIA_DX9_0;
  980 
  981         hc_state->dev = dev;
  982         hc_state->unfinished = no_sequence;
  983         hc_state->map_cache = NULL;
  984         hc_state->agp = agp;
  985         hc_state->buf_start = buf;
  986         dev_priv->num_fire_offsets = 0;
  987 
  988         while (buf < buf_end) {
  989 
  990                 switch (state) {
  991                 case state_header2:
  992                         state = via_check_header2(&buf, buf_end, hc_state);
  993                         break;
  994                 case state_header1:
  995                         state = via_check_header1(&buf, buf_end);
  996                         break;
  997                 case state_vheader5:
  998                         state = via_check_vheader5(&buf, buf_end);
  999                         break;
 1000                 case state_vheader6:
 1001                         state = via_check_vheader6(&buf, buf_end);
 1002                         break;
 1003                 case state_command:
 1004                         if ((HALCYON_HEADER2 == (cmd = *buf)) &&
 1005                             supported_3d)
 1006                                 state = state_header2;
 1007                         else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
 1008                                 state = state_header1;
 1009                         else if (cme_video
 1010                                  && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
 1011                                 state = state_vheader5;
 1012                         else if (cme_video
 1013                                  && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
 1014                                 state = state_vheader6;
 1015                         else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
 1016                                 DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
 1017                                 state = state_error;
 1018                         } else {
 1019                                 DRM_ERROR
 1020                                     ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
 1021                                      cmd);
 1022                                 state = state_error;
 1023                         }
 1024                         break;
 1025                 case state_error:
 1026                 default:
 1027                         *hc_state = saved_state;
 1028                         return -EINVAL;
 1029                 }
 1030         }
 1031         if (state == state_error) {
 1032                 *hc_state = saved_state;
 1033                 return -EINVAL;
 1034         }
 1035         return 0;
 1036 }
 1037 
 1038 int
 1039 via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
 1040                          unsigned int size)
 1041 {
 1042 
 1043         drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 1044         uint32_t cmd;
 1045         const uint32_t *buf_end = buf + (size >> 2);
 1046         verifier_state_t state = state_command;
 1047         int fire_count = 0;
 1048 
 1049         while (buf < buf_end) {
 1050 
 1051                 switch (state) {
 1052                 case state_header2:
 1053                         state =
 1054                             via_parse_header2(dev_priv, &buf, buf_end,
 1055                                               &fire_count);
 1056                         break;
 1057                 case state_header1:
 1058                         state = via_parse_header1(dev_priv, &buf, buf_end);
 1059                         break;
 1060                 case state_vheader5:
 1061                         state = via_parse_vheader5(dev_priv, &buf, buf_end);
 1062                         break;
 1063                 case state_vheader6:
 1064                         state = via_parse_vheader6(dev_priv, &buf, buf_end);
 1065                         break;
 1066                 case state_command:
 1067                         if (HALCYON_HEADER2 == (cmd = *buf))
 1068                                 state = state_header2;
 1069                         else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
 1070                                 state = state_header1;
 1071                         else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
 1072                                 state = state_vheader5;
 1073                         else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
 1074                                 state = state_vheader6;
 1075                         else {
 1076                                 DRM_ERROR
 1077                                     ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
 1078                                      cmd);
 1079                                 state = state_error;
 1080                         }
 1081                         break;
 1082                 case state_error:
 1083                 default:
 1084                         return -EINVAL;
 1085                 }
 1086         }
 1087         if (state == state_error) {
 1088                 return -EINVAL;
 1089         }
 1090         return 0;
 1091 }
 1092 
 1093 static void
 1094 setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
 1095 {
 1096         int i;
 1097 
 1098         for (i = 0; i < 256; ++i) {
 1099                 table[i] = forbidden_command;
 1100         }
 1101 
 1102         for (i = 0; i < size; ++i) {
 1103                 table[init_table[i].code] = init_table[i].hz;
 1104         }
 1105 }
 1106 
 1107 void via_init_command_verifier(void)
 1108 {
 1109         setup_hazard_table(init_table1, table1,
 1110                            sizeof(init_table1) / sizeof(hz_init_t));
 1111         setup_hazard_table(init_table2, table2,
 1112                            sizeof(init_table2) / sizeof(hz_init_t));
 1113         setup_hazard_table(init_table3, table3,
 1114                            sizeof(init_table3) / sizeof(hz_init_t));
 1115 }

Cache object: c93d82dd54006ba5420163f031569269


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.