The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/drm/savage_bci.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* savage_bci.c -- BCI support for Savage
    2  *
    3  * Copyright 2004  Felix Kuehling
    4  * All Rights Reserved.
    5  *
    6  * Permission is hereby granted, free of charge, to any person obtaining a
    7  * copy of this software and associated documentation files (the "Software"),
    8  * to deal in the Software without restriction, including without limitation
    9  * the rights to use, copy, modify, merge, publish, distribute, sub license,
   10  * and/or sell copies of the Software, and to permit persons to whom the
   11  * Software is furnished to do so, subject to the following conditions:
   12  *
   13  * The above copyright notice and this permission notice (including the
   14  * next paragraph) shall be included in all copies or substantial portions
   15  * of the Software.
   16  *
   17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   20  * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
   21  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
   22  * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
   23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
   24  */
   25 
   26 #include <sys/cdefs.h>
   27 __FBSDID("$FreeBSD: releng/6.4/sys/dev/drm/savage_bci.c 158686 2006-05-17 07:40:12Z anholt $");
   28 #include "dev/drm/drmP.h"
   29 #include "dev/drm/savage_drm.h"
   30 #include "dev/drm/savage_drv.h"
   31 
   32 /* Need a long timeout for shadow status updates can take a while
   33  * and so can waiting for events when the queue is full. */
   34 #define SAVAGE_DEFAULT_USEC_TIMEOUT     1000000 /* 1s */
   35 #define SAVAGE_EVENT_USEC_TIMEOUT       5000000 /* 5s */
   36 #define SAVAGE_FREELIST_DEBUG           0
   37 
   38 static int
   39 savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
   40 {
   41         uint32_t mask = dev_priv->status_used_mask;
   42         uint32_t threshold = dev_priv->bci_threshold_hi;
   43         uint32_t status;
   44         int i;
   45 
   46 #if SAVAGE_BCI_DEBUG
   47         if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
   48                 DRM_ERROR("Trying to emit %d words "
   49                           "(more than guaranteed space in COB)\n", n);
   50 #endif
   51 
   52         for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
   53                 DRM_MEMORYBARRIER();
   54                 status = dev_priv->status_ptr[0];
   55                 if ((status & mask) < threshold)
   56                         return 0;
   57                 DRM_UDELAY(1);
   58         }
   59 
   60 #if SAVAGE_BCI_DEBUG
   61         DRM_ERROR("failed!\n");
   62         DRM_INFO("   status=0x%08x, threshold=0x%08x\n", status, threshold);
   63 #endif
   64         return DRM_ERR(EBUSY);
   65 }
   66 
   67 static int
   68 savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
   69 {
   70         uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
   71         uint32_t status;
   72         int i;
   73 
   74         for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
   75                 status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
   76                 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
   77                         return 0;
   78                 DRM_UDELAY(1);
   79         }
   80 
   81 #if SAVAGE_BCI_DEBUG
   82         DRM_ERROR("failed!\n");
   83         DRM_INFO("   status=0x%08x\n", status);
   84 #endif
   85         return DRM_ERR(EBUSY);
   86 }
   87 
   88 static int
   89 savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
   90 {
   91         uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
   92         uint32_t status;
   93         int i;
   94 
   95         for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
   96                 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
   97                 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
   98                         return 0;
   99                 DRM_UDELAY(1);
  100         }
  101 
  102 #if SAVAGE_BCI_DEBUG
  103         DRM_ERROR("failed!\n");
  104         DRM_INFO("   status=0x%08x\n", status);
  105 #endif
  106         return DRM_ERR(EBUSY);
  107 }
  108 
  109 /*
  110  * Waiting for events.
  111  *
  112  * The BIOSresets the event tag to 0 on mode changes. Therefore we
  113  * never emit 0 to the event tag. If we find a 0 event tag we know the
  114  * BIOS stomped on it and return success assuming that the BIOS waited
  115  * for engine idle.
  116  *
  117  * Note: if the Xserver uses the event tag it has to follow the same
  118  * rule. Otherwise there may be glitches every 2^16 events.
  119  */
  120 static int
  121 savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
  122 {
  123         uint32_t status;
  124         int i;
  125 
  126         for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
  127                 DRM_MEMORYBARRIER();
  128                 status = dev_priv->status_ptr[1];
  129                 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
  130                     (status & 0xffff) == 0)
  131                         return 0;
  132                 DRM_UDELAY(1);
  133         }
  134 
  135 #if SAVAGE_BCI_DEBUG
  136         DRM_ERROR("failed!\n");
  137         DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
  138 #endif
  139 
  140         return DRM_ERR(EBUSY);
  141 }
  142 
  143 static int
  144 savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
  145 {
  146         uint32_t status;
  147         int i;
  148 
  149         for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
  150                 status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
  151                 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
  152                     (status & 0xffff) == 0)
  153                         return 0;
  154                 DRM_UDELAY(1);
  155         }
  156 
  157 #if SAVAGE_BCI_DEBUG
  158         DRM_ERROR("failed!\n");
  159         DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
  160 #endif
  161 
  162         return DRM_ERR(EBUSY);
  163 }
  164 
  165 uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
  166                                unsigned int flags)
  167 {
  168         uint16_t count;
  169         BCI_LOCALS;
  170 
  171         if (dev_priv->status_ptr) {
  172                 /* coordinate with Xserver */
  173                 count = dev_priv->status_ptr[1023];
  174                 if (count < dev_priv->event_counter)
  175                         dev_priv->event_wrap++;
  176         } else {
  177                 count = dev_priv->event_counter;
  178         }
  179         count = (count + 1) & 0xffff;
  180         if (count == 0) {
  181                 count++; /* See the comment above savage_wait_event_*. */
  182                 dev_priv->event_wrap++;
  183         }
  184         dev_priv->event_counter = count;
  185         if (dev_priv->status_ptr)
  186                 dev_priv->status_ptr[1023] = (uint32_t)count;
  187 
  188         if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
  189                 unsigned int wait_cmd = BCI_CMD_WAIT;
  190                 if ((flags & SAVAGE_WAIT_2D))
  191                         wait_cmd |= BCI_CMD_WAIT_2D;
  192                 if ((flags & SAVAGE_WAIT_3D))
  193                         wait_cmd |= BCI_CMD_WAIT_3D;
  194                 BEGIN_BCI(2);
  195                 BCI_WRITE(wait_cmd);
  196         } else {
  197                 BEGIN_BCI(1);
  198         }
  199         BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);
  200 
  201         return count;
  202 }
  203 
  204 /*
  205  * Freelist management
  206  */
  207 static int savage_freelist_init(drm_device_t *dev)
  208 {
  209         drm_savage_private_t *dev_priv = dev->dev_private;
  210         drm_device_dma_t *dma = dev->dma;
  211         drm_buf_t *buf;
  212         drm_savage_buf_priv_t *entry;
  213         int i;
  214         DRM_DEBUG("count=%d\n", dma->buf_count);
  215 
  216         dev_priv->head.next = &dev_priv->tail;
  217         dev_priv->head.prev = NULL;
  218         dev_priv->head.buf = NULL;
  219 
  220         dev_priv->tail.next = NULL;
  221         dev_priv->tail.prev = &dev_priv->head;
  222         dev_priv->tail.buf = NULL;
  223 
  224         for (i = 0; i < dma->buf_count; i++) {
  225                 buf = dma->buflist[i];
  226                 entry = buf->dev_private;
  227 
  228                 SET_AGE(&entry->age, 0, 0);
  229                 entry->buf = buf;
  230 
  231                 entry->next = dev_priv->head.next;
  232                 entry->prev = &dev_priv->head;
  233                 dev_priv->head.next->prev = entry;
  234                 dev_priv->head.next = entry;
  235         }
  236 
  237         return 0;
  238 }
  239 
  240 static drm_buf_t *savage_freelist_get(drm_device_t *dev)
  241 {
  242         drm_savage_private_t *dev_priv = dev->dev_private;
  243         drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
  244         uint16_t event;
  245         unsigned int wrap;
  246         DRM_DEBUG("\n");
  247 
  248         UPDATE_EVENT_COUNTER();
  249         if (dev_priv->status_ptr)
  250                 event = dev_priv->status_ptr[1] & 0xffff;
  251         else
  252                 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
  253         wrap = dev_priv->event_wrap;
  254         if (event > dev_priv->event_counter)
  255                 wrap--; /* hardware hasn't passed the last wrap yet */
  256 
  257         DRM_DEBUG("   tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
  258         DRM_DEBUG("   head=0x%04x %d\n", event, wrap);
  259 
  260         if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
  261                 drm_savage_buf_priv_t *next = tail->next;
  262                 drm_savage_buf_priv_t *prev = tail->prev;
  263                 prev->next = next;
  264                 next->prev = prev;
  265                 tail->next = tail->prev = NULL;
  266                 return tail->buf;
  267         }
  268 
  269         DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
  270         return NULL;
  271 }
  272 
  273 void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
  274 {
  275         drm_savage_private_t *dev_priv = dev->dev_private;
  276         drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
  277 
  278         DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
  279 
  280         if (entry->next != NULL || entry->prev != NULL) {
  281                 DRM_ERROR("entry already on freelist.\n");
  282                 return;
  283         }
  284 
  285         prev = &dev_priv->head;
  286         next = prev->next;
  287         prev->next = entry;
  288         next->prev = entry;
  289         entry->prev = prev;
  290         entry->next = next;
  291 }
  292 
  293 /*
  294  * Command DMA
  295  */
  296 static int savage_dma_init(drm_savage_private_t *dev_priv)
  297 {
  298         unsigned int i;
  299 
  300         dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
  301                 (SAVAGE_DMA_PAGE_SIZE*4);
  302         dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
  303                                         dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
  304         if (dev_priv->dma_pages == NULL)
  305                 return DRM_ERR(ENOMEM);
  306 
  307         for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
  308                 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
  309                 dev_priv->dma_pages[i].used = 0;
  310                 dev_priv->dma_pages[i].flushed = 0;
  311         }
  312         SET_AGE(&dev_priv->last_dma_age, 0, 0);
  313 
  314         dev_priv->first_dma_page = 0;
  315         dev_priv->current_dma_page = 0;
  316 
  317         return 0;
  318 }
  319 
  320 void savage_dma_reset(drm_savage_private_t *dev_priv)
  321 {
  322         uint16_t event;
  323         unsigned int wrap, i;
  324         event = savage_bci_emit_event(dev_priv, 0);
  325         wrap = dev_priv->event_wrap;
  326         for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
  327                 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
  328                 dev_priv->dma_pages[i].used = 0;
  329                 dev_priv->dma_pages[i].flushed = 0;
  330         }
  331         SET_AGE(&dev_priv->last_dma_age, event, wrap);
  332         dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
  333 }
  334 
  335 void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
  336 {
  337         uint16_t event;
  338         unsigned int wrap;
  339 
  340         /* Faked DMA buffer pages don't age. */
  341         if (dev_priv->cmd_dma == &dev_priv->fake_dma)
  342                 return;
  343 
  344         UPDATE_EVENT_COUNTER();
  345         if (dev_priv->status_ptr)
  346                 event = dev_priv->status_ptr[1] & 0xffff;
  347         else
  348                 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
  349         wrap = dev_priv->event_wrap;
  350         if (event > dev_priv->event_counter)
  351                 wrap--; /* hardware hasn't passed the last wrap yet */
  352 
  353         if (dev_priv->dma_pages[page].age.wrap > wrap ||
  354             (dev_priv->dma_pages[page].age.wrap == wrap &&
  355              dev_priv->dma_pages[page].age.event > event)) {
  356                 if (dev_priv->wait_evnt(dev_priv,
  357                                         dev_priv->dma_pages[page].age.event)
  358                     < 0)
  359                         DRM_ERROR("wait_evnt failed!\n");
  360         }
  361 }
  362 
  363 uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
  364 {
  365         unsigned int cur = dev_priv->current_dma_page;
  366         unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
  367                 dev_priv->dma_pages[cur].used;
  368         unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) /
  369                 SAVAGE_DMA_PAGE_SIZE;
  370         uint32_t *dma_ptr;
  371         unsigned int i;
  372 
  373         DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
  374                   cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
  375 
  376         if (cur + nr_pages < dev_priv->nr_dma_pages) {
  377                 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
  378                     cur*SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
  379                 if (n < rest)
  380                         rest = n;
  381                 dev_priv->dma_pages[cur].used += rest;
  382                 n -= rest;
  383                 cur++;
  384         } else {
  385                 dev_priv->dma_flush(dev_priv);
  386                 nr_pages =
  387                     (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
  388                 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
  389                         dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
  390                         dev_priv->dma_pages[i].used = 0;
  391                         dev_priv->dma_pages[i].flushed = 0;
  392                 }
  393                 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
  394                 dev_priv->first_dma_page = cur = 0;
  395         }
  396         for (i = cur; nr_pages > 0; ++i, --nr_pages) {
  397 #if SAVAGE_DMA_DEBUG
  398                 if (dev_priv->dma_pages[i].used) {
  399                         DRM_ERROR("unflushed page %u: used=%u\n",
  400                                   i, dev_priv->dma_pages[i].used);
  401                 }
  402 #endif
  403                 if (n > SAVAGE_DMA_PAGE_SIZE)
  404                         dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
  405                 else
  406                         dev_priv->dma_pages[i].used = n;
  407                 n -= SAVAGE_DMA_PAGE_SIZE;
  408         }
  409         dev_priv->current_dma_page = --i;
  410 
  411         DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
  412                   i, dev_priv->dma_pages[i].used, n);
  413 
  414         savage_dma_wait(dev_priv, dev_priv->current_dma_page);
  415 
  416         return dma_ptr;
  417 }
  418 
  419 static void savage_dma_flush(drm_savage_private_t *dev_priv)
  420 {
  421         unsigned int first = dev_priv->first_dma_page;
  422         unsigned int cur = dev_priv->current_dma_page;
  423         uint16_t event;
  424         unsigned int wrap, pad, align, len, i;
  425         unsigned long phys_addr;
  426         BCI_LOCALS;
  427 
  428         if (first == cur &&
  429             dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
  430                 return;
  431 
  432         /* pad length to multiples of 2 entries
  433          * align start of next DMA block to multiles of 8 entries */
  434         pad = -dev_priv->dma_pages[cur].used & 1;
  435         align = -(dev_priv->dma_pages[cur].used + pad) & 7;
  436 
  437         DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
  438                   "pad=%u, align=%u\n",
  439                   first, cur, dev_priv->dma_pages[first].flushed,
  440                   dev_priv->dma_pages[cur].used, pad, align);
  441 
  442         /* pad with noops */
  443         if (pad) {
  444                 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
  445                     cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
  446                 dev_priv->dma_pages[cur].used += pad;
  447                 while(pad != 0) {
  448                         *dma_ptr++ = BCI_CMD_WAIT;
  449                         pad--;
  450                 }
  451         }
  452 
  453         DRM_MEMORYBARRIER();
  454 
  455         /* do flush ... */
  456         phys_addr = dev_priv->cmd_dma->offset +
  457                 (first * SAVAGE_DMA_PAGE_SIZE +
  458                  dev_priv->dma_pages[first].flushed) * 4;
  459         len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
  460             dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
  461 
  462         DRM_DEBUG("phys_addr=%lx, len=%u\n",
  463                   phys_addr | dev_priv->dma_type, len);
  464 
  465         BEGIN_BCI(3);
  466         BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
  467         BCI_WRITE(phys_addr | dev_priv->dma_type);
  468         BCI_DMA(len);
  469 
  470         /* fix alignment of the start of the next block */
  471         dev_priv->dma_pages[cur].used += align;
  472 
  473         /* age DMA pages */
  474         event = savage_bci_emit_event(dev_priv, 0);
  475         wrap = dev_priv->event_wrap;
  476         for (i = first; i < cur; ++i) {
  477                 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
  478                 dev_priv->dma_pages[i].used = 0;
  479                 dev_priv->dma_pages[i].flushed = 0;
  480         }
  481         /* age the current page only when it's full */
  482         if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
  483                 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
  484                 dev_priv->dma_pages[cur].used = 0;
  485                 dev_priv->dma_pages[cur].flushed = 0;
  486                 /* advance to next page */
  487                 cur++;
  488                 if (cur == dev_priv->nr_dma_pages)
  489                         cur = 0;
  490                 dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
  491         } else {
  492                 dev_priv->first_dma_page = cur;
  493                 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
  494         }
  495         SET_AGE(&dev_priv->last_dma_age, event, wrap);
  496 
  497         DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
  498                   dev_priv->dma_pages[cur].used,
  499                   dev_priv->dma_pages[cur].flushed);
  500 }
  501 
  502 static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
  503 {
  504         unsigned int i, j;
  505         BCI_LOCALS;
  506 
  507         if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
  508             dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
  509                 return;
  510 
  511         DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
  512                   dev_priv->first_dma_page, dev_priv->current_dma_page,
  513                   dev_priv->dma_pages[dev_priv->current_dma_page].used);
  514 
  515         for (i = dev_priv->first_dma_page;
  516              i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
  517              ++i) {
  518                 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
  519                         i * SAVAGE_DMA_PAGE_SIZE;
  520 #if SAVAGE_DMA_DEBUG
  521                 /* Sanity check: all pages except the last one must be full. */
  522                 if (i < dev_priv->current_dma_page &&
  523                     dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
  524                         DRM_ERROR("partial DMA page %u: used=%u",
  525                                   i, dev_priv->dma_pages[i].used);
  526                 }
  527 #endif
  528                 BEGIN_BCI(dev_priv->dma_pages[i].used);
  529                 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
  530                         BCI_WRITE(dma_ptr[j]);
  531                 }
  532                 dev_priv->dma_pages[i].used = 0;
  533         }
  534 
  535         /* reset to first page */
  536         dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
  537 }
  538 
  539 int savage_driver_load(drm_device_t *dev, unsigned long chipset)
  540 {
  541         drm_savage_private_t *dev_priv;
  542 
  543         dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
  544         if (dev_priv == NULL)
  545                 return DRM_ERR(ENOMEM);
  546 
  547         memset(dev_priv, 0, sizeof(drm_savage_private_t));
  548         dev->dev_private = (void *)dev_priv;
  549 
  550         dev_priv->chipset = (enum savage_family)chipset;
  551 
  552         return 0;
  553 }
  554 
  555 /*
  556  * Initalize mappings. On Savage4 and SavageIX the alignment
  557  * and size of the aperture is not suitable for automatic MTRR setup
  558  * in drm_addmap. Therefore we add them manually before the maps are
  559  * initialized, and tear them down on last close.
  560  */
  561 int savage_driver_firstopen(drm_device_t *dev)
  562 {
  563         drm_savage_private_t *dev_priv = dev->dev_private;
  564         unsigned long mmio_base, fb_base, fb_size, aperture_base;
  565         /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
  566          * in case we decide we need information on the BAR for BSD in the
  567          * future.
  568          */
  569         unsigned int fb_rsrc, aper_rsrc;
  570         int ret = 0;
  571 
  572         dev_priv->mtrr[0].handle = -1;
  573         dev_priv->mtrr[1].handle = -1;
  574         dev_priv->mtrr[2].handle = -1;
  575         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
  576                 fb_rsrc = 0;
  577                 fb_base = drm_get_resource_start(dev, 0);
  578                 fb_size = SAVAGE_FB_SIZE_S3;
  579                 mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
  580                 aper_rsrc = 0;
  581                 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
  582                 /* this should always be true */
  583                 if (drm_get_resource_len(dev, 0) == 0x08000000) {
  584                         /* Don't make MMIO write-cobining! We need 3
  585                          * MTRRs. */
  586                         dev_priv->mtrr[0].base = fb_base;
  587                         dev_priv->mtrr[0].size = 0x01000000;
  588                         dev_priv->mtrr[0].handle = 
  589                             drm_mtrr_add(dev_priv->mtrr[0].base,
  590                                          dev_priv->mtrr[0].size, DRM_MTRR_WC);
  591                         dev_priv->mtrr[1].base = fb_base+0x02000000;
  592                         dev_priv->mtrr[1].size = 0x02000000;
  593                         dev_priv->mtrr[1].handle =
  594                             drm_mtrr_add(dev_priv->mtrr[1].base,
  595                                          dev_priv->mtrr[1].size, DRM_MTRR_WC);
  596                         dev_priv->mtrr[2].base = fb_base+0x04000000;
  597                         dev_priv->mtrr[2].size = 0x04000000;
  598                         dev_priv->mtrr[2].handle =
  599                             drm_mtrr_add(dev_priv->mtrr[2].base, 
  600                                          dev_priv->mtrr[2].size, DRM_MTRR_WC);
  601                 } else {
  602                         DRM_ERROR("strange pci_resource_len %08lx\n",
  603                                   drm_get_resource_len(dev, 0));
  604                 }
  605         } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
  606                    dev_priv->chipset != S3_SAVAGE2000) {
  607                 mmio_base = drm_get_resource_start(dev, 0);
  608                 fb_rsrc = 1;
  609                 fb_base = drm_get_resource_start(dev, 1);
  610                 fb_size = SAVAGE_FB_SIZE_S4;
  611                 aper_rsrc = 1;
  612                 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
  613                 /* this should always be true */
  614                 if (drm_get_resource_len(dev, 1) == 0x08000000) {
  615                         /* Can use one MTRR to cover both fb and
  616                          * aperture. */
  617                         dev_priv->mtrr[0].base = fb_base;
  618                         dev_priv->mtrr[0].size = 0x08000000;
  619                         dev_priv->mtrr[0].handle = 
  620                             drm_mtrr_add(dev_priv->mtrr[0].base,
  621                                          dev_priv->mtrr[0].size, DRM_MTRR_WC);
  622                 } else {
  623                         DRM_ERROR("strange pci_resource_len %08lx\n",
  624                                   drm_get_resource_len(dev, 1));
  625                 }
  626         } else {
  627                 mmio_base = drm_get_resource_start(dev, 0);
  628                 fb_rsrc = 1;
  629                 fb_base = drm_get_resource_start(dev, 1);
  630                 fb_size = drm_get_resource_len(dev, 1);
  631                 aper_rsrc = 2;
  632                 aperture_base = drm_get_resource_start(dev, 2);
  633                 /* Automatic MTRR setup will do the right thing. */
  634         }
  635 
  636         ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
  637                          _DRM_READ_ONLY, &dev_priv->mmio);
  638         if (ret)
  639                 return ret;
  640 
  641         ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
  642                          _DRM_WRITE_COMBINING, &dev_priv->fb);
  643         if (ret)
  644                 return ret;
  645 
  646         ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
  647                          _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
  648                          &dev_priv->aperture);
  649         if (ret)
  650                 return ret;
  651 
  652         return ret;
  653 }
  654 
  655 /*
  656  * Delete MTRRs and free device-private data.
  657  */
  658 void savage_driver_lastclose(drm_device_t *dev)
  659 {
  660         drm_savage_private_t *dev_priv = dev->dev_private;
  661         int i;
  662 
  663         for (i = 0; i < 3; ++i)
  664                 if (dev_priv->mtrr[i].handle >= 0)
  665                         drm_mtrr_del(dev_priv->mtrr[i].handle,
  666                                      dev_priv->mtrr[i].base,
  667                                      dev_priv->mtrr[i].size, DRM_MTRR_WC);
  668 }
  669 
  670 int savage_driver_unload(drm_device_t *dev)
  671 {
  672         drm_savage_private_t *dev_priv = dev->dev_private;
  673 
  674         drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
  675 
  676         return 0;
  677 }
  678 
  679 static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
  680 {
  681         drm_savage_private_t *dev_priv = dev->dev_private;
  682 
  683         if (init->fb_bpp != 16 && init->fb_bpp != 32) {
  684                 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
  685                 return DRM_ERR(EINVAL);
  686         }
  687         if (init->depth_bpp != 16 && init->depth_bpp != 32) {
  688                 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
  689                 return DRM_ERR(EINVAL);
  690         }
  691         if (init->dma_type != SAVAGE_DMA_AGP &&
  692             init->dma_type != SAVAGE_DMA_PCI) {
  693                 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
  694                 return DRM_ERR(EINVAL);
  695         }
  696 
  697         dev_priv->cob_size = init->cob_size;
  698         dev_priv->bci_threshold_lo = init->bci_threshold_lo;
  699         dev_priv->bci_threshold_hi = init->bci_threshold_hi;
  700         dev_priv->dma_type = init->dma_type;
  701 
  702         dev_priv->fb_bpp = init->fb_bpp;
  703         dev_priv->front_offset = init->front_offset;
  704         dev_priv->front_pitch = init->front_pitch;
  705         dev_priv->back_offset = init->back_offset;
  706         dev_priv->back_pitch = init->back_pitch;
  707         dev_priv->depth_bpp = init->depth_bpp;
  708         dev_priv->depth_offset = init->depth_offset;
  709         dev_priv->depth_pitch = init->depth_pitch;
  710 
  711         dev_priv->texture_offset = init->texture_offset;
  712         dev_priv->texture_size = init->texture_size;
  713 
  714         DRM_GETSAREA();
  715         if (!dev_priv->sarea) {
  716                 DRM_ERROR("could not find sarea!\n");
  717                 savage_do_cleanup_bci(dev);
  718                 return DRM_ERR(EINVAL);
  719         }
  720         if (init->status_offset != 0) {
  721                 dev_priv->status = drm_core_findmap(dev, init->status_offset);
  722                 if (!dev_priv->status) {
  723                         DRM_ERROR("could not find shadow status region!\n");
  724                         savage_do_cleanup_bci(dev);
  725                         return DRM_ERR(EINVAL);
  726                 }
  727         } else {
  728                 dev_priv->status = NULL;
  729         }
  730         if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
  731                 dev->agp_buffer_map = drm_core_findmap(dev,
  732                                                        init->buffers_offset);
  733                 if (!dev->agp_buffer_map) {
  734                         DRM_ERROR("could not find DMA buffer region!\n");
  735                         savage_do_cleanup_bci(dev);
  736                         return DRM_ERR(EINVAL);
  737                 }
  738                 drm_core_ioremap(dev->agp_buffer_map, dev);
  739                 if (!dev->agp_buffer_map) {
  740                         DRM_ERROR("failed to ioremap DMA buffer region!\n");
  741                         savage_do_cleanup_bci(dev);
  742                         return DRM_ERR(ENOMEM);
  743                 }
  744         }
  745         if (init->agp_textures_offset) {
  746                 dev_priv->agp_textures =
  747                         drm_core_findmap(dev, init->agp_textures_offset);
  748                 if (!dev_priv->agp_textures) {
  749                         DRM_ERROR("could not find agp texture region!\n");
  750                         savage_do_cleanup_bci(dev);
  751                         return DRM_ERR(EINVAL);
  752                 }
  753         } else {
  754                 dev_priv->agp_textures = NULL;
  755         }
  756 
  757         if (init->cmd_dma_offset) {
  758                 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
  759                         DRM_ERROR("command DMA not supported on "
  760                                   "Savage3D/MX/IX.\n");
  761                         savage_do_cleanup_bci(dev);
  762                         return DRM_ERR(EINVAL);
  763                 }
  764                 if (dev->dma && dev->dma->buflist) {
  765                         DRM_ERROR("command and vertex DMA not supported "
  766                                   "at the same time.\n");
  767                         savage_do_cleanup_bci(dev);
  768                         return DRM_ERR(EINVAL);
  769                 }
  770                 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
  771                 if (!dev_priv->cmd_dma) {
  772                         DRM_ERROR("could not find command DMA region!\n");
  773                         savage_do_cleanup_bci(dev);
  774                         return DRM_ERR(EINVAL);
  775                 }
  776                 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
  777                         if (dev_priv->cmd_dma->type != _DRM_AGP) {
  778                                 DRM_ERROR("AGP command DMA region is not a "
  779                                           "_DRM_AGP map!\n");
  780                                 savage_do_cleanup_bci(dev);
  781                                 return DRM_ERR(EINVAL);
  782                         }
  783                         drm_core_ioremap(dev_priv->cmd_dma, dev);
  784                         if (!dev_priv->cmd_dma->handle) {
  785                                 DRM_ERROR("failed to ioremap command "
  786                                           "DMA region!\n");
  787                                 savage_do_cleanup_bci(dev);
  788                                 return DRM_ERR(ENOMEM);
  789                         }
  790                 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
  791                         DRM_ERROR("PCI command DMA region is not a "
  792                                   "_DRM_CONSISTENT map!\n");
  793                         savage_do_cleanup_bci(dev);
  794                         return DRM_ERR(EINVAL);
  795                 }
  796         } else {
  797                 dev_priv->cmd_dma = NULL;
  798         }
  799 
  800         dev_priv->dma_flush = savage_dma_flush;
  801         if (!dev_priv->cmd_dma) {
  802                 DRM_DEBUG("falling back to faked command DMA.\n");
  803                 dev_priv->fake_dma.offset = 0;
  804                 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
  805                 dev_priv->fake_dma.type = _DRM_SHM;
  806                 dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
  807                                                       DRM_MEM_DRIVER);
  808                 if (!dev_priv->fake_dma.handle) {
  809                         DRM_ERROR("could not allocate faked DMA buffer!\n");
  810                         savage_do_cleanup_bci(dev);
  811                         return DRM_ERR(ENOMEM);
  812                 }
  813                 dev_priv->cmd_dma = &dev_priv->fake_dma;
  814                 dev_priv->dma_flush = savage_fake_dma_flush;
  815         }
  816 
  817         dev_priv->sarea_priv =
  818                 (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
  819                                        init->sarea_priv_offset);
  820 
  821         /* setup bitmap descriptors */
  822         {
  823                 unsigned int color_tile_format;
  824                 unsigned int depth_tile_format;
  825                 unsigned int front_stride, back_stride, depth_stride;
  826                 if (dev_priv->chipset <= S3_SAVAGE4) {
  827                         color_tile_format = dev_priv->fb_bpp == 16 ?
  828                                 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
  829                         depth_tile_format = dev_priv->depth_bpp == 16 ?
  830                                 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
  831                 } else {
  832                         color_tile_format = SAVAGE_BD_TILE_DEST;
  833                         depth_tile_format = SAVAGE_BD_TILE_DEST;
  834                 }
  835                 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
  836                 back_stride  = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
  837                 depth_stride = 
  838                     dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
  839 
  840                 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
  841                         (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
  842                         (color_tile_format << SAVAGE_BD_TILE_SHIFT);
  843 
  844                 dev_priv-> back_bd =  back_stride | SAVAGE_BD_BW_DISABLE |
  845                         (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
  846                         (color_tile_format << SAVAGE_BD_TILE_SHIFT);
  847 
  848                 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
  849                         (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
  850                         (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
  851         }
  852 
  853         /* setup status and bci ptr */
  854         dev_priv->event_counter = 0;
  855         dev_priv->event_wrap = 0;
  856         dev_priv->bci_ptr = (volatile uint32_t *)
  857             ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
  858         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
  859                 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
  860         } else {
  861                 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
  862         }
  863         if (dev_priv->status != NULL) {
  864                 dev_priv->status_ptr =
  865                         (volatile uint32_t *)dev_priv->status->handle;
  866                 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
  867                 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
  868                 dev_priv->status_ptr[1023] = dev_priv->event_counter;
  869         } else {
  870                 dev_priv->status_ptr = NULL;
  871                 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
  872                         dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
  873                 } else {
  874                         dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
  875                 }
  876                 dev_priv->wait_evnt = savage_bci_wait_event_reg;
  877         }
  878 
  879         /* cliprect functions */
  880         if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
  881                 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
  882         else
  883                 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
  884 
  885         if (savage_freelist_init(dev) < 0) {
  886                 DRM_ERROR("could not initialize freelist\n");
  887                 savage_do_cleanup_bci(dev);
  888                 return DRM_ERR(ENOMEM);
  889         }
  890 
  891         if (savage_dma_init(dev_priv) <  0) {
  892                 DRM_ERROR("could not initialize command DMA\n");
  893                 savage_do_cleanup_bci(dev);
  894                 return DRM_ERR(ENOMEM);
  895         }
  896 
  897         return 0;
  898 }
  899 
  900 int savage_do_cleanup_bci(drm_device_t *dev)
  901 {
  902         drm_savage_private_t *dev_priv = dev->dev_private;
  903 
  904         if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
  905                 if (dev_priv->fake_dma.handle)
  906                         drm_free(dev_priv->fake_dma.handle,
  907                                  SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
  908         } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
  909                    dev_priv->cmd_dma->type == _DRM_AGP &&
  910                    dev_priv->dma_type == SAVAGE_DMA_AGP)
  911                 drm_core_ioremapfree(dev_priv->cmd_dma, dev);
  912 
  913         if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
  914             dev->agp_buffer_map && dev->agp_buffer_map->handle) {
  915                 drm_core_ioremapfree(dev->agp_buffer_map, dev);
  916                 /* make sure the next instance (which may be running
  917                  * in PCI mode) doesn't try to use an old
  918                  * agp_buffer_map. */
  919                 dev->agp_buffer_map = NULL;
  920         }
  921 
  922         if (dev_priv->dma_pages)
  923                 drm_free(dev_priv->dma_pages,
  924                          sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
  925                          DRM_MEM_DRIVER);
  926 
  927         return 0;
  928 }
  929 
  930 static int savage_bci_init(DRM_IOCTL_ARGS)
  931 {
  932         DRM_DEVICE;
  933         drm_savage_init_t init;
  934 
  935         LOCK_TEST_WITH_RETURN(dev, filp);
  936 
  937         DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,
  938                                  sizeof(init));
  939 
  940         switch (init.func) {
  941         case SAVAGE_INIT_BCI:
  942                 return savage_do_init_bci(dev, &init);
  943         case SAVAGE_CLEANUP_BCI:
  944                 return savage_do_cleanup_bci(dev);
  945         }
  946 
  947         return DRM_ERR(EINVAL);
  948 }
  949 
  950 static int savage_bci_event_emit(DRM_IOCTL_ARGS)
  951 {
  952         DRM_DEVICE;
  953         drm_savage_private_t *dev_priv = dev->dev_private;
  954         drm_savage_event_emit_t event;
  955 
  956         DRM_DEBUG("\n");
  957 
  958         LOCK_TEST_WITH_RETURN(dev, filp);
  959 
  960         DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,
  961                                  sizeof(event));
  962 
  963         event.count = savage_bci_emit_event(dev_priv, event.flags);
  964         event.count |= dev_priv->event_wrap << 16;
  965         DRM_COPY_TO_USER_IOCTL((drm_savage_event_emit_t __user *)data,
  966                                event, sizeof(event));
  967         return 0;
  968 }
  969 
  970 static int savage_bci_event_wait(DRM_IOCTL_ARGS)
  971 {
  972         DRM_DEVICE;
  973         drm_savage_private_t *dev_priv = dev->dev_private;
  974         drm_savage_event_wait_t event;
  975         unsigned int event_e, hw_e;
  976         unsigned int event_w, hw_w;
  977 
  978         DRM_DEBUG("\n");
  979 
  980         DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,
  981                                  sizeof(event));
  982 
  983         UPDATE_EVENT_COUNTER();
  984         if (dev_priv->status_ptr)
  985                 hw_e = dev_priv->status_ptr[1] & 0xffff;
  986         else
  987                 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
  988         hw_w = dev_priv->event_wrap;
  989         if (hw_e > dev_priv->event_counter)
  990                 hw_w--; /* hardware hasn't passed the last wrap yet */
  991 
  992         event_e = event.count & 0xffff;
  993         event_w = event.count >> 16;
  994 
  995         /* Don't need to wait if
  996          * - event counter wrapped since the event was emitted or
  997          * - the hardware has advanced up to or over the event to wait for.
  998          */
  999         if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )
 1000                 return 0;
 1001         else
 1002                 return dev_priv->wait_evnt(dev_priv, event_e);
 1003 }
 1004 
 1005 /*
 1006  * DMA buffer management
 1007  */
 1008 
 1009 static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
 1010 {
 1011         drm_buf_t *buf;
 1012         int i;
 1013 
 1014         for (i = d->granted_count; i < d->request_count; i++) {
 1015                 buf = savage_freelist_get(dev);
 1016                 if (!buf)
 1017                         return DRM_ERR(EAGAIN);
 1018 
 1019                 buf->filp = filp;
 1020 
 1021                 if (DRM_COPY_TO_USER(&d->request_indices[i],
 1022                                      &buf->idx, sizeof(buf->idx)))
 1023                         return DRM_ERR(EFAULT);
 1024                 if (DRM_COPY_TO_USER(&d->request_sizes[i],
 1025                                      &buf->total, sizeof(buf->total)))
 1026                         return DRM_ERR(EFAULT);
 1027 
 1028                 d->granted_count++;
 1029         }
 1030         return 0;
 1031 }
 1032 
 1033 int savage_bci_buffers(DRM_IOCTL_ARGS)
 1034 {
 1035         DRM_DEVICE;
 1036         drm_device_dma_t *dma = dev->dma;
 1037         drm_dma_t d;
 1038         int ret = 0;
 1039 
 1040         LOCK_TEST_WITH_RETURN(dev, filp);
 1041 
 1042         DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));
 1043 
 1044         /* Please don't send us buffers.
 1045          */
 1046         if (d.send_count != 0) {
 1047                 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
 1048                           DRM_CURRENTPID, d.send_count);
 1049                 return DRM_ERR(EINVAL);
 1050         }
 1051 
 1052         /* We'll send you buffers.
 1053          */
 1054         if (d.request_count < 0 || d.request_count > dma->buf_count) {
 1055                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
 1056                           DRM_CURRENTPID, d.request_count, dma->buf_count);
 1057                 return DRM_ERR(EINVAL);
 1058         }
 1059 
 1060         d.granted_count = 0;
 1061 
 1062         if (d.request_count) {
 1063                 ret = savage_bci_get_buffers(filp, dev, &d);
 1064         }
 1065 
 1066         DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));
 1067 
 1068         return ret;
 1069 }
 1070 
 1071 void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
 1072 {
 1073         drm_device_dma_t *dma = dev->dma;
 1074         drm_savage_private_t *dev_priv = dev->dev_private;
 1075         int i;
 1076 
 1077         if (!dma)
 1078                 return;
 1079         if (!dev_priv)
 1080                 return;
 1081         if (!dma->buflist)
 1082                 return;
 1083 
 1084         /*i830_flush_queue(dev);*/
 1085 
 1086         for (i = 0; i < dma->buf_count; i++) {
 1087                 drm_buf_t *buf = dma->buflist[i];
 1088                 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
 1089 
 1090                 if (buf->filp == filp && buf_priv &&
 1091                     buf_priv->next == NULL && buf_priv->prev == NULL) {
 1092                         uint16_t event;
 1093                         DRM_DEBUG("reclaimed from client\n");
 1094                         event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
 1095                         SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
 1096                         savage_freelist_put(dev, buf);
 1097                 }
 1098         }
 1099 
 1100         drm_core_reclaim_buffers(dev, filp);
 1101 }
 1102 
 1103 drm_ioctl_desc_t savage_ioctls[] = {
 1104         [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 1105         [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH},
 1106         [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH},
 1107         [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH},
 1108 };
 1109 
 1110 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
 1111 

Cache object: 2a146c96245bb23938e1e061a7eb67cb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.