The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/drm/via_dma.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* via_dma.c -- DMA support for the VIA Unichrome/Pro
    2  *
    3  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
    4  * All Rights Reserved.
    5  *
    6  * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
    7  * All Rights Reserved.
    8  *
    9  * Copyright 2004 The Unichrome project.
   10  * All Rights Reserved.
   11  *
   12  * Permission is hereby granted, free of charge, to any person obtaining a
   13  * copy of this software and associated documentation files (the "Software"),
   14  * to deal in the Software without restriction, including without limitation
   15  * the rights to use, copy, modify, merge, publish, distribute, sub license,
   16  * and/or sell copies of the Software, and to permit persons to whom the
   17  * Software is furnished to do so, subject to the following conditions:
   18  *
   19  * The above copyright notice and this permission notice (including the
   20  * next paragraph) shall be included in all copies or substantial portions
   21  * of the Software.
   22  *
   23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
   26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
   27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
   28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
   29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
   30  *
   31  * Authors:
   32  *    Tungsten Graphics,
   33  *    Erdi Chen,
   34  *    Thomas Hellstrom.
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD: releng/9.0/sys/dev/drm/via_dma.c 207066 2010-04-22 18:21:25Z rnoland $");
   39 
   40 #include "dev/drm/drmP.h"
   41 #include "dev/drm/drm.h"
   42 #include "dev/drm/via_drm.h"
   43 #include "dev/drm/via_drv.h"
   44 #include "dev/drm/via_3d_reg.h"
   45 
   46 #define CMDBUF_ALIGNMENT_SIZE   (0x100)
   47 #define CMDBUF_ALIGNMENT_MASK   (0x0ff)
   48 
   49 /* defines for VIA 3D registers */
   50 #define VIA_REG_STATUS          0x400
   51 #define VIA_REG_TRANSET         0x43C
   52 #define VIA_REG_TRANSPACE       0x440
   53 
   54 /* VIA_REG_STATUS(0x400): Engine Status */
   55 #define VIA_CMD_RGTR_BUSY       0x00000080      /* Command Regulator is busy */
   56 #define VIA_2D_ENG_BUSY         0x00000001      /* 2D Engine is busy */
   57 #define VIA_3D_ENG_BUSY         0x00000002      /* 3D Engine is busy */
   58 #define VIA_VR_QUEUE_BUSY       0x00020000      /* Virtual Queue is busy */
   59 
   60 #define SetReg2DAGP(nReg, nData) {                              \
   61         *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;  \
   62         *((uint32_t *)(vb) + 1) = (nData);                      \
   63         vb = ((uint32_t *)vb) + 2;                              \
   64         dev_priv->dma_low +=8;                                  \
   65 }
   66 
   67 #define via_flush_write_combine() DRM_MEMORYBARRIER()
   68 
   69 #define VIA_OUT_RING_QW(w1,w2)                  \
   70         *vb++ = (w1);                           \
   71         *vb++ = (w2);                           \
   72         dev_priv->dma_low += 8;
   73 
   74 static void via_cmdbuf_start(drm_via_private_t * dev_priv);
   75 static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
   76 static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
   77 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
   78 static int via_wait_idle(drm_via_private_t * dev_priv);
   79 static void via_pad_cache(drm_via_private_t * dev_priv, int qwords);
   80 
   81 /*
   82  * Free space in command buffer.
   83  */
   84 
   85 static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
   86 {
   87         uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
   88         uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
   89 
   90         return ((hw_addr <= dev_priv->dma_low) ?
   91                 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
   92                 (hw_addr - dev_priv->dma_low));
   93 }
   94 
   95 /*
   96  * How much does the command regulator lag behind?
   97  */
   98 
   99 static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
  100 {
  101         uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  102         uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
  103 
  104         return ((hw_addr <= dev_priv->dma_low) ?
  105                 (dev_priv->dma_low - hw_addr) :
  106                 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
  107 }
  108 
  109 /*
  110  * Check that the given size fits in the buffer, otherwise wait.
  111  */
  112 
  113 static inline int
  114 via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
  115 {
  116         uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  117         uint32_t cur_addr, hw_addr, next_addr;
  118         volatile uint32_t *hw_addr_ptr;
  119         uint32_t count;
  120         hw_addr_ptr = dev_priv->hw_addr_ptr;
  121         cur_addr = dev_priv->dma_low;
  122         next_addr = cur_addr + size + 512 * 1024;
  123         count = 1000000;
  124         do {
  125                 hw_addr = *hw_addr_ptr - agp_base;
  126                 if (count-- == 0) {
  127                         DRM_ERROR
  128                             ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
  129                              hw_addr, cur_addr, next_addr);
  130                         return -1;
  131                 }
  132                 if  ((cur_addr < hw_addr) && (next_addr >= hw_addr))
  133                         DRM_UDELAY(1000);
  134         } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
  135         return 0;
  136 }
  137 
  138 /*
  139  * Checks whether buffer head has reach the end. Rewind the ring buffer
  140  * when necessary.
  141  *
  142  * Returns virtual pointer to ring buffer.
  143  */
  144 
  145 static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
  146                                       unsigned int size)
  147 {
  148         if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
  149             dev_priv->dma_high) {
  150                 via_cmdbuf_rewind(dev_priv);
  151         }
  152         if (via_cmdbuf_wait(dev_priv, size) != 0) {
  153                 return NULL;
  154         }
  155 
  156         return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
  157 }
  158 
  159 int via_dma_cleanup(struct drm_device * dev)
  160 {
  161         drm_via_blitq_t *blitq;
  162         int i;
  163 
  164         if (dev->dev_private) {
  165                 drm_via_private_t *dev_priv =
  166                     (drm_via_private_t *) dev->dev_private;
  167 
  168                 if (dev_priv->ring.virtual_start) {
  169                         via_cmdbuf_reset(dev_priv);
  170 
  171                         drm_core_ioremapfree(&dev_priv->ring.map, dev);
  172                         dev_priv->ring.virtual_start = NULL;
  173                 }
  174 
  175                 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
  176                         blitq = dev_priv->blit_queues + i;
  177                         mtx_destroy(&blitq->blit_lock);
  178                 }
  179         }
  180 
  181         return 0;
  182 }
  183 
  184 static int via_initialize(struct drm_device * dev,
  185                           drm_via_private_t * dev_priv,
  186                           drm_via_dma_init_t * init)
  187 {
  188         if (!dev_priv || !dev_priv->mmio) {
  189                 DRM_ERROR("via_dma_init called before via_map_init\n");
  190                 return -EFAULT;
  191         }
  192 
  193         if (dev_priv->ring.virtual_start != NULL) {
  194                 DRM_ERROR("called again without calling cleanup\n");
  195                 return -EFAULT;
  196         }
  197 
  198         if (!dev->agp || !dev->agp->base) {
  199                 DRM_ERROR("called with no agp memory available\n");
  200                 return -EFAULT;
  201         }
  202 
  203         if (dev_priv->chipset == VIA_DX9_0) {
  204                 DRM_ERROR("AGP DMA is not supported on this chip\n");
  205                 return -EINVAL;
  206         }
  207 
  208         dev_priv->ring.map.offset = dev->agp->base + init->offset;
  209         dev_priv->ring.map.size = init->size;
  210         dev_priv->ring.map.type = 0;
  211         dev_priv->ring.map.flags = 0;
  212         dev_priv->ring.map.mtrr = 0;
  213 
  214         drm_core_ioremap_wc(&dev_priv->ring.map, dev);
  215 
  216         if (dev_priv->ring.map.virtual == NULL) {
  217                 via_dma_cleanup(dev);
  218                 DRM_ERROR("can not ioremap virtual address for"
  219                           " ring buffer\n");
  220                 return -ENOMEM;
  221         }
  222 
  223         dev_priv->ring.virtual_start = dev_priv->ring.map.virtual;
  224 
  225         dev_priv->dma_ptr = dev_priv->ring.virtual_start;
  226         dev_priv->dma_low = 0;
  227         dev_priv->dma_high = init->size;
  228         dev_priv->dma_wrap = init->size;
  229         dev_priv->dma_offset = init->offset;
  230         dev_priv->last_pause_ptr = NULL;
  231         dev_priv->hw_addr_ptr =
  232                 (volatile uint32_t *)((char *)dev_priv->mmio->virtual +
  233                 init->reg_pause_addr);
  234 
  235         via_cmdbuf_start(dev_priv);
  236 
  237         return 0;
  238 }
  239 
  240 static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
  241 {
  242         drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  243         drm_via_dma_init_t *init = data;
  244         int retcode = 0;
  245 
  246         switch (init->func) {
  247         case VIA_INIT_DMA:
  248                 if (!DRM_SUSER(DRM_CURPROC))
  249                         retcode = -EPERM;
  250                 else
  251                         retcode = via_initialize(dev, dev_priv, init);
  252                 break;
  253         case VIA_CLEANUP_DMA:
  254                 if (!DRM_SUSER(DRM_CURPROC))
  255                         retcode = -EPERM;
  256                 else
  257                         retcode = via_dma_cleanup(dev);
  258                 break;
  259         case VIA_DMA_INITIALIZED:
  260                 retcode = (dev_priv->ring.virtual_start != NULL) ?
  261                         0 : -EFAULT;
  262                 break;
  263         default:
  264                 retcode = -EINVAL;
  265                 break;
  266         }
  267 
  268         return retcode;
  269 }
  270 
  271 static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
  272 {
  273         drm_via_private_t *dev_priv;
  274         uint32_t *vb;
  275         int ret;
  276 
  277         dev_priv = (drm_via_private_t *) dev->dev_private;
  278 
  279         if (dev_priv->ring.virtual_start == NULL) {
  280                 DRM_ERROR("called without initializing AGP ring buffer.\n");
  281                 return -EFAULT;
  282         }
  283 
  284         if (cmd->size > VIA_PCI_BUF_SIZE) {
  285                 return -ENOMEM;
  286         }
  287 
  288         if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
  289                 return -EFAULT;
  290 
  291         /*
  292          * Running this function on AGP memory is dead slow. Therefore
  293          * we run it on a temporary cacheable system memory buffer and
  294          * copy it to AGP memory when ready.
  295          */
  296 
  297         if ((ret =
  298              via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
  299                                        cmd->size, dev, 1))) {
  300                 return ret;
  301         }
  302 
  303         vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
  304         if (vb == NULL) {
  305                 return -EAGAIN;
  306         }
  307 
  308         memcpy(vb, dev_priv->pci_buf, cmd->size);
  309 
  310         dev_priv->dma_low += cmd->size;
  311 
  312         /*
  313          * Small submissions somehow stalls the CPU. (AGP cache effects?)
  314          * pad to greater size.
  315          */
  316 
  317         if (cmd->size < 0x100)
  318                 via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
  319         via_cmdbuf_pause(dev_priv);
  320 
  321         return 0;
  322 }
  323 
  324 int via_driver_dma_quiescent(struct drm_device * dev)
  325 {
  326         drm_via_private_t *dev_priv = dev->dev_private;
  327 
  328         if (!via_wait_idle(dev_priv)) {
  329                 return -EBUSY;
  330         }
  331         return 0;
  332 }
  333 
  334 static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
  335 {
  336 
  337         LOCK_TEST_WITH_RETURN(dev, file_priv);
  338 
  339         return via_driver_dma_quiescent(dev);
  340 }
  341 
  342 static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
  343 {
  344         drm_via_cmdbuffer_t *cmdbuf = data;
  345         int ret;
  346 
  347         LOCK_TEST_WITH_RETURN(dev, file_priv);
  348 
  349         DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
  350 
  351         ret = via_dispatch_cmdbuffer(dev, cmdbuf);
  352         if (ret) {
  353                 return ret;
  354         }
  355 
  356         return 0;
  357 }
  358 
  359 static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
  360                                       drm_via_cmdbuffer_t * cmd)
  361 {
  362         drm_via_private_t *dev_priv = dev->dev_private;
  363         int ret;
  364 
  365         if (cmd->size > VIA_PCI_BUF_SIZE) {
  366                 return -ENOMEM;
  367         }
  368         if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
  369                 return -EFAULT;
  370 
  371         if ((ret =
  372              via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
  373                                        cmd->size, dev, 0))) {
  374                 return ret;
  375         }
  376 
  377         ret =
  378             via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
  379                                      cmd->size);
  380         return ret;
  381 }
  382 
  383 static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
  384 {
  385         drm_via_cmdbuffer_t *cmdbuf = data;
  386         int ret;
  387 
  388         LOCK_TEST_WITH_RETURN(dev, file_priv);
  389 
  390         DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
  391 
  392         ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
  393         if (ret) {
  394                 return ret;
  395         }
  396 
  397         return 0;
  398 }
  399 
  400 static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
  401                                          uint32_t * vb, int qw_count)
  402 {
  403         for (; qw_count > 0; --qw_count) {
  404                 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
  405         }
  406         return vb;
  407 }
  408 
  409 /*
  410  * This function is used internally by ring buffer management code.
  411  *
  412  * Returns virtual pointer to ring buffer.
  413  */
  414 static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
  415 {
  416         return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
  417 }
  418 
  419 /*
  420  * Hooks a segment of data into the tail of the ring-buffer by
  421  * modifying the pause address stored in the buffer itself. If
  422  * the regulator has already paused, restart it.
  423  */
  424 static int via_hook_segment(drm_via_private_t * dev_priv,
  425                             uint32_t pause_addr_hi, uint32_t pause_addr_lo,
  426                             int no_pci_fire)
  427 {
  428         int paused, count;
  429         volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
  430         uint32_t reader,ptr;
  431         uint32_t diff;
  432 
  433         paused = 0;
  434         via_flush_write_combine();
  435         (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
  436 
  437         *paused_at = pause_addr_lo;
  438         via_flush_write_combine();
  439         (void) *paused_at;
  440 
  441         reader = *(dev_priv->hw_addr_ptr);
  442         ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
  443                 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
  444 
  445         dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
  446 
  447         /*
  448          * If there is a possibility that the command reader will
  449          * miss the new pause address and pause on the old one,
  450          * In that case we need to program the new start address
  451          * using PCI.
  452          */
  453 
  454         diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
  455         count = 10000000;
  456         while(diff == 0 && count--) {
  457                 paused = (VIA_READ(0x41c) & 0x80000000);
  458                 if (paused)
  459                         break;
  460                 reader = *(dev_priv->hw_addr_ptr);
  461                 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
  462         }
  463 
  464         paused = VIA_READ(0x41c) & 0x80000000;
  465 
  466         if (paused && !no_pci_fire) {
  467                 reader = *(dev_priv->hw_addr_ptr);
  468                 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
  469                 diff &= (dev_priv->dma_high - 1);
  470                 if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
  471                         DRM_ERROR("Paused at incorrect address. "
  472                                   "0x%08x, 0x%08x 0x%08x\n",
  473                                   ptr, reader, dev_priv->dma_diff);
  474                 } else if (diff == 0) {
  475                         /*
  476                          * There is a concern that these writes may stall the PCI bus
  477                          * if the GPU is not idle. However, idling the GPU first
  478                          * doesn't make a difference.
  479                          */
  480 
  481                         VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
  482                         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
  483                         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
  484                         VIA_READ(VIA_REG_TRANSPACE);
  485                 }
  486         }
  487         return paused;
  488 }
  489 
  490 static int via_wait_idle(drm_via_private_t * dev_priv)
  491 {
  492         int count = 10000000;
  493 
  494         while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
  495                 ;
  496 
  497         while (count && (VIA_READ(VIA_REG_STATUS) &
  498                            (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
  499                             VIA_3D_ENG_BUSY)))
  500                 --count;
  501         return count;
  502 }
  503 
  504 static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
  505                                uint32_t addr, uint32_t * cmd_addr_hi,
  506                                uint32_t * cmd_addr_lo, int skip_wait)
  507 {
  508         uint32_t agp_base;
  509         uint32_t cmd_addr, addr_lo, addr_hi;
  510         uint32_t *vb;
  511         uint32_t qw_pad_count;
  512 
  513         if (!skip_wait)
  514                 via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
  515 
  516         vb = via_get_dma(dev_priv);
  517         VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
  518                         (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
  519         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  520         qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
  521             ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
  522 
  523         cmd_addr = (addr) ? addr :
  524             agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
  525         addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
  526                    (cmd_addr & HC_HAGPBpL_MASK));
  527         addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
  528 
  529         vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
  530         VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
  531         return vb;
  532 }
  533 
  534 static void via_cmdbuf_start(drm_via_private_t * dev_priv)
  535 {
  536         uint32_t pause_addr_lo, pause_addr_hi;
  537         uint32_t start_addr, start_addr_lo;
  538         uint32_t end_addr, end_addr_lo;
  539         uint32_t command;
  540         uint32_t agp_base;
  541         uint32_t ptr;
  542         uint32_t reader;
  543         int count;
  544 
  545         dev_priv->dma_low = 0;
  546 
  547         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  548         start_addr = agp_base;
  549         end_addr = agp_base + dev_priv->dma_high;
  550 
  551         start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
  552         end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
  553         command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
  554                    ((end_addr & 0xff000000) >> 16));
  555 
  556         dev_priv->last_pause_ptr =
  557             via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
  558                           &pause_addr_hi, &pause_addr_lo, 1) - 1;
  559 
  560         via_flush_write_combine();
  561         (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
  562 
  563         VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
  564         VIA_WRITE(VIA_REG_TRANSPACE, command);
  565         VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
  566         VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
  567 
  568         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
  569         VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
  570         DRM_WRITEMEMORYBARRIER();
  571         VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
  572         VIA_READ(VIA_REG_TRANSPACE);
  573 
  574         dev_priv->dma_diff = 0;
  575 
  576         count = 10000000;
  577         while (!(VIA_READ(0x41c) & 0x80000000) && count--);
  578 
  579         reader = *(dev_priv->hw_addr_ptr);
  580         ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
  581             dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
  582 
  583         /*
  584          * This is the difference between where we tell the
  585          * command reader to pause and where it actually pauses.
  586          * This differs between hw implementation so we need to
  587          * detect it.
  588          */
  589 
  590         dev_priv->dma_diff = ptr - reader;
  591 }
  592 
  593 static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
  594 {
  595         uint32_t *vb;
  596 
  597         via_cmdbuf_wait(dev_priv, qwords + 2);
  598         vb = via_get_dma(dev_priv);
  599         VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
  600         via_align_buffer(dev_priv, vb, qwords);
  601 }
  602 
  603 static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
  604 {
  605         uint32_t *vb = via_get_dma(dev_priv);
  606         SetReg2DAGP(0x0C, (0 | (0 << 16)));
  607         SetReg2DAGP(0x10, 0 | (0 << 16));
  608         SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
  609 }
  610 
  611 static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
  612 {
  613         uint32_t agp_base;
  614         uint32_t pause_addr_lo, pause_addr_hi;
  615         uint32_t jump_addr_lo, jump_addr_hi;
  616         volatile uint32_t *last_pause_ptr;
  617         uint32_t dma_low_save1, dma_low_save2;
  618 
  619         agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  620         via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
  621                       &jump_addr_lo, 0);
  622 
  623         dev_priv->dma_wrap = dev_priv->dma_low;
  624 
  625         /*
  626          * Wrap command buffer to the beginning.
  627          */
  628 
  629         dev_priv->dma_low = 0;
  630         if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
  631                 DRM_ERROR("via_cmdbuf_jump failed\n");
  632         }
  633 
  634         via_dummy_bitblt(dev_priv);
  635         via_dummy_bitblt(dev_priv);
  636 
  637         last_pause_ptr =
  638             via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
  639                           &pause_addr_lo, 0) - 1;
  640         via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
  641                       &pause_addr_lo, 0);
  642 
  643         *last_pause_ptr = pause_addr_lo;
  644         dma_low_save1 = dev_priv->dma_low;
  645 
  646         /*
  647          * Now, set a trap that will pause the regulator if it tries to rerun the old
  648          * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
  649          * and reissues the jump command over PCI, while the regulator has already taken the jump
  650          * and actually paused at the current buffer end).
  651          * There appears to be no other way to detect this condition, since the hw_addr_pointer
  652          * does not seem to get updated immediately when a jump occurs.
  653          */
  654 
  655         last_pause_ptr =
  656                 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
  657                               &pause_addr_lo, 0) - 1;
  658         via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
  659                       &pause_addr_lo, 0);
  660         *last_pause_ptr = pause_addr_lo;
  661 
  662         dma_low_save2 = dev_priv->dma_low;
  663         dev_priv->dma_low = dma_low_save1;
  664         via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
  665         dev_priv->dma_low = dma_low_save2;
  666         via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
  667 }
  668 
  669 
  670 static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
  671 {
  672         via_cmdbuf_jump(dev_priv);
  673 }
  674 
  675 static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
  676 {
  677         uint32_t pause_addr_lo, pause_addr_hi;
  678 
  679         via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
  680         via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
  681 }
  682 
  683 static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
  684 {
  685         via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
  686 }
  687 
  688 static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
  689 {
  690         via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
  691         via_wait_idle(dev_priv);
  692 }
  693 
  694 /*
  695  * User interface to the space and lag functions.
  696  */
  697 
  698 static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
  699 {
  700         drm_via_cmdbuf_size_t *d_siz = data;
  701         int ret = 0;
  702         uint32_t tmp_size, count;
  703         drm_via_private_t *dev_priv;
  704 
  705         DRM_DEBUG("\n");
  706         LOCK_TEST_WITH_RETURN(dev, file_priv);
  707 
  708         dev_priv = (drm_via_private_t *) dev->dev_private;
  709 
  710         if (dev_priv->ring.virtual_start == NULL) {
  711                 DRM_ERROR("called without initializing AGP ring buffer.\n");
  712                 return -EFAULT;
  713         }
  714 
  715         count = 1000000;
  716         tmp_size = d_siz->size;
  717         switch (d_siz->func) {
  718         case VIA_CMDBUF_SPACE:
  719                 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
  720                        && --count) {
  721                         if (!d_siz->wait) {
  722                                 break;
  723                         }
  724                 }
  725                 if (!count) {
  726                         DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
  727                         ret = -EAGAIN;
  728                 }
  729                 break;
  730         case VIA_CMDBUF_LAG:
  731                 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
  732                        && --count) {
  733                         if (!d_siz->wait) {
  734                                 break;
  735                         }
  736                 }
  737                 if (!count) {
  738                         DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
  739                         ret = -EAGAIN;
  740                 }
  741                 break;
  742         default:
  743                 ret = -EFAULT;
  744         }
  745         d_siz->size = tmp_size;
  746 
  747         return ret;
  748 }
  749 
  750 struct drm_ioctl_desc via_ioctls[] = {
  751         DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
  752         DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
  753         DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
  754         DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
  755         DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
  756         DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
  757         DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
  758         DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
  759         DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
  760         DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
  761         DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
  762         DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
  763         DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
  764         DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
  765 };
  766 
  767 int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);

Cache object: 78aed5c8010317cc06a375a109222de7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.