The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/drm2/ttm/ttm_execbuf_util.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2  *
    3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
    4  * All Rights Reserved.
    5  *
    6  * Permission is hereby granted, free of charge, to any person obtaining a
    7  * copy of this software and associated documentation files (the
    8  * "Software"), to deal in the Software without restriction, including
    9  * without limitation the rights to use, copy, modify, merge, publish,
   10  * distribute, sub license, and/or sell copies of the Software, and to
   11  * permit persons to whom the Software is furnished to do so, subject to
   12  * the following conditions:
   13  *
   14  * The above copyright notice and this permission notice (including the
   15  * next paragraph) shall be included in all copies or substantial portions
   16  * of the Software.
   17  *
   18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
   21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
   22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
   23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
   24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
   25  *
   26  **************************************************************************/
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include <dev/drm2/drmP.h>
   32 #include <dev/drm2/ttm/ttm_execbuf_util.h>
   33 #include <dev/drm2/ttm/ttm_bo_driver.h>
   34 #include <dev/drm2/ttm/ttm_placement.h>
   35 
   36 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
   37 {
   38         struct ttm_validate_buffer *entry;
   39 
   40         list_for_each_entry(entry, list, head) {
   41                 struct ttm_buffer_object *bo = entry->bo;
   42                 if (!entry->reserved)
   43                         continue;
   44 
   45                 if (entry->removed) {
   46                         ttm_bo_add_to_lru(bo);
   47                         entry->removed = false;
   48 
   49                 }
   50                 entry->reserved = false;
   51                 atomic_set(&bo->reserved, 0);
   52                 wakeup(bo);
   53         }
   54 }
   55 
   56 static void ttm_eu_del_from_lru_locked(struct list_head *list)
   57 {
   58         struct ttm_validate_buffer *entry;
   59 
   60         list_for_each_entry(entry, list, head) {
   61                 struct ttm_buffer_object *bo = entry->bo;
   62                 if (!entry->reserved)
   63                         continue;
   64 
   65                 if (!entry->removed) {
   66                         entry->put_count = ttm_bo_del_from_lru(bo);
   67                         entry->removed = true;
   68                 }
   69         }
   70 }
   71 
   72 static void ttm_eu_list_ref_sub(struct list_head *list)
   73 {
   74         struct ttm_validate_buffer *entry;
   75 
   76         list_for_each_entry(entry, list, head) {
   77                 struct ttm_buffer_object *bo = entry->bo;
   78 
   79                 if (entry->put_count) {
   80                         ttm_bo_list_ref_sub(bo, entry->put_count, true);
   81                         entry->put_count = 0;
   82                 }
   83         }
   84 }
   85 
   86 void ttm_eu_backoff_reservation(struct list_head *list)
   87 {
   88         struct ttm_validate_buffer *entry;
   89         struct ttm_bo_global *glob;
   90 
   91         if (list_empty(list))
   92                 return;
   93 
   94         entry = list_first_entry(list, struct ttm_validate_buffer, head);
   95         glob = entry->bo->glob;
   96         mtx_lock(&glob->lru_lock);
   97         ttm_eu_backoff_reservation_locked(list);
   98         mtx_unlock(&glob->lru_lock);
   99 }
  100 
  101 /*
  102  * Reserve buffers for validation.
  103  *
  104  * If a buffer in the list is marked for CPU access, we back off and
  105  * wait for that buffer to become free for GPU access.
  106  *
  107  * If a buffer is reserved for another validation, the validator with
  108  * the highest validation sequence backs off and waits for that buffer
  109  * to become unreserved. This prevents deadlocks when validating multiple
  110  * buffers in different orders.
  111  */
  112 
  113 int ttm_eu_reserve_buffers(struct list_head *list)
  114 {
  115         struct ttm_bo_global *glob;
  116         struct ttm_validate_buffer *entry;
  117         int ret;
  118         uint32_t val_seq;
  119 
  120         if (list_empty(list))
  121                 return 0;
  122 
  123         list_for_each_entry(entry, list, head) {
  124                 entry->reserved = false;
  125                 entry->put_count = 0;
  126                 entry->removed = false;
  127         }
  128 
  129         entry = list_first_entry(list, struct ttm_validate_buffer, head);
  130         glob = entry->bo->glob;
  131 
  132         mtx_lock(&glob->lru_lock);
  133         val_seq = entry->bo->bdev->val_seq++;
  134 
  135 retry_locked:
  136         list_for_each_entry(entry, list, head) {
  137                 struct ttm_buffer_object *bo = entry->bo;
  138 
  139                 /* already slowpath reserved? */
  140                 if (entry->reserved)
  141                         continue;
  142 
  143                 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
  144                 switch (ret) {
  145                 case 0:
  146                         break;
  147                 case -EBUSY:
  148                         ttm_eu_del_from_lru_locked(list);
  149                         ret = ttm_bo_reserve_nolru(bo, true, false,
  150                                                    true, val_seq);
  151                         if (!ret)
  152                                 break;
  153 
  154                         if (unlikely(ret != -EAGAIN))
  155                                 goto err;
  156 
  157                         /* fallthrough */
  158                 case -EAGAIN:
  159                         ttm_eu_backoff_reservation_locked(list);
  160 
  161                         /*
  162                          * temporarily increase sequence number every retry,
  163                          * to prevent us from seeing our old reservation
  164                          * sequence when someone else reserved the buffer,
  165                          * but hasn't updated the seq_valid/seqno members yet.
  166                          */
  167                         val_seq = entry->bo->bdev->val_seq++;
  168 
  169                         ttm_eu_list_ref_sub(list);
  170                         ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
  171                         if (unlikely(ret != 0)) {
  172                                 mtx_unlock(&glob->lru_lock);
  173                                 return ret;
  174                         }
  175                         entry->reserved = true;
  176                         if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
  177                                 ret = -EBUSY;
  178                                 goto err;
  179                         }
  180                         goto retry_locked;
  181                 default:
  182                         goto err;
  183                 }
  184 
  185                 entry->reserved = true;
  186                 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
  187                         ret = -EBUSY;
  188                         goto err;
  189                 }
  190         }
  191 
  192         ttm_eu_del_from_lru_locked(list);
  193         mtx_unlock(&glob->lru_lock);
  194         ttm_eu_list_ref_sub(list);
  195 
  196         return 0;
  197 
  198 err:
  199         ttm_eu_backoff_reservation_locked(list);
  200         mtx_unlock(&glob->lru_lock);
  201         ttm_eu_list_ref_sub(list);
  202         return ret;
  203 }
  204 
  205 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
  206 {
  207         struct ttm_validate_buffer *entry;
  208         struct ttm_buffer_object *bo;
  209         struct ttm_bo_global *glob;
  210         struct ttm_bo_device *bdev;
  211         struct ttm_bo_driver *driver;
  212 
  213         if (list_empty(list))
  214                 return;
  215 
  216         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
  217         bdev = bo->bdev;
  218         driver = bdev->driver;
  219         glob = bo->glob;
  220 
  221         mtx_lock(&glob->lru_lock);
  222         mtx_lock(&bdev->fence_lock);
  223 
  224         list_for_each_entry(entry, list, head) {
  225                 bo = entry->bo;
  226                 entry->old_sync_obj = bo->sync_obj;
  227                 bo->sync_obj = driver->sync_obj_ref(sync_obj);
  228                 ttm_bo_unreserve_locked(bo);
  229                 entry->reserved = false;
  230         }
  231         mtx_unlock(&bdev->fence_lock);
  232         mtx_unlock(&glob->lru_lock);
  233 
  234         list_for_each_entry(entry, list, head) {
  235                 if (entry->old_sync_obj)
  236                         driver->sync_obj_unref(&entry->old_sync_obj);
  237         }
  238 }

Cache object: 66305e20b68e4f801664c44e9fd6df0d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.