The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/drm2/ttm/ttm_bo.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2  *
    3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
    4  * All Rights Reserved.
    5  *
    6  * Permission is hereby granted, free of charge, to any person obtaining a
    7  * copy of this software and associated documentation files (the
    8  * "Software"), to deal in the Software without restriction, including
    9  * without limitation the rights to use, copy, modify, merge, publish,
   10  * distribute, sub license, and/or sell copies of the Software, and to
   11  * permit persons to whom the Software is furnished to do so, subject to
   12  * the following conditions:
   13  *
   14  * The above copyright notice and this permission notice (including the
   15  * next paragraph) shall be included in all copies or substantial portions
   16  * of the Software.
   17  *
   18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
   21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
   22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
   23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
   24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
   25  *
   26  **************************************************************************/
   27 /*
   28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include <dev/drm2/drmP.h>
   35 #include <dev/drm2/ttm/ttm_module.h>
   36 #include <dev/drm2/ttm/ttm_bo_driver.h>
   37 #include <dev/drm2/ttm/ttm_placement.h>
   38 #include <vm/vm_pageout.h>
   39 
   40 #define TTM_ASSERT_LOCKED(param)
   41 #define TTM_DEBUG(fmt, arg...)
   42 #define TTM_BO_HASH_ORDER 13
   43 
   44 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
   45 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
   46 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
   47 
   48 MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
   49 
   50 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
   51 {
   52         int i;
   53 
   54         for (i = 0; i <= TTM_PL_PRIV5; i++)
   55                 if (flags & (1 << i)) {
   56                         *mem_type = i;
   57                         return 0;
   58                 }
   59         return -EINVAL;
   60 }
   61 
   62 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
   63 {
   64         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
   65 
   66         printf("    has_type: %d\n", man->has_type);
   67         printf("    use_type: %d\n", man->use_type);
   68         printf("    flags: 0x%08X\n", man->flags);
   69         printf("    gpu_offset: 0x%08lX\n", man->gpu_offset);
   70         printf("    size: %ju\n", (uintmax_t)man->size);
   71         printf("    available_caching: 0x%08X\n", man->available_caching);
   72         printf("    default_caching: 0x%08X\n", man->default_caching);
   73         if (mem_type != TTM_PL_SYSTEM)
   74                 (*man->func->debug)(man, TTM_PFX);
   75 }
   76 
   77 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
   78                                         struct ttm_placement *placement)
   79 {
   80         int i, ret, mem_type;
   81 
   82         printf("No space for %p (%lu pages, %luK, %luM)\n",
   83                bo, bo->mem.num_pages, bo->mem.size >> 10,
   84                bo->mem.size >> 20);
   85         for (i = 0; i < placement->num_placement; i++) {
   86                 ret = ttm_mem_type_from_flags(placement->placement[i],
   87                                                 &mem_type);
   88                 if (ret)
   89                         return;
   90                 printf("  placement[%d]=0x%08X (%d)\n",
   91                        i, placement->placement[i], mem_type);
   92                 ttm_mem_type_debug(bo->bdev, mem_type);
   93         }
   94 }
   95 
   96 #if 0
   97 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
   98     char *buffer)
   99 {
  100 
  101         return snprintf(buffer, PAGE_SIZE, "%lu\n",
  102                         (unsigned long) atomic_read(&glob->bo_count));
  103 }
  104 #endif
  105 
  106 static inline uint32_t ttm_bo_type_flags(unsigned type)
  107 {
  108         return 1 << (type);
  109 }
  110 
  111 static void ttm_bo_release_list(struct ttm_buffer_object *bo)
  112 {
  113         struct ttm_bo_device *bdev = bo->bdev;
  114         size_t acc_size = bo->acc_size;
  115 
  116         MPASS(atomic_read(&bo->list_kref) == 0);
  117         MPASS(atomic_read(&bo->kref) == 0);
  118         MPASS(atomic_read(&bo->cpu_writers) == 0);
  119         MPASS(bo->sync_obj == NULL);
  120         MPASS(bo->mem.mm_node == NULL);
  121         MPASS(list_empty(&bo->lru));
  122         MPASS(list_empty(&bo->ddestroy));
  123 
  124         if (bo->ttm)
  125                 ttm_tt_destroy(bo->ttm);
  126         atomic_dec(&bo->glob->bo_count);
  127         if (bo->destroy)
  128                 bo->destroy(bo);
  129         else {
  130                 free(bo, M_TTM_BO);
  131         }
  132         ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
  133 }
  134 
  135 static int
  136 ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible)
  137 {
  138         const char *wmsg;
  139         int flags, ret;
  140 
  141         ret = 0;
  142         if (interruptible) {
  143                 flags = PCATCH;
  144                 wmsg = "ttbowi";
  145         } else {
  146                 flags = 0;
  147                 wmsg = "ttbowu";
  148         }
  149         while (ttm_bo_is_reserved(bo)) {
  150                 ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
  151                 if (ret == -EINTR || ret == -ERESTART)
  152                         ret = -ERESTARTSYS;
  153                 if (ret != 0)
  154                         break;
  155         }
  156         return (ret);
  157 }
  158 
  159 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  160 {
  161         struct ttm_bo_device *bdev = bo->bdev;
  162         struct ttm_mem_type_manager *man;
  163 
  164         MPASS(ttm_bo_is_reserved(bo));
  165 
  166         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  167 
  168                 MPASS(list_empty(&bo->lru));
  169 
  170                 man = &bdev->man[bo->mem.mem_type];
  171                 list_add_tail(&bo->lru, &man->lru);
  172                 refcount_acquire(&bo->list_kref);
  173 
  174                 if (bo->ttm != NULL) {
  175                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
  176                         refcount_acquire(&bo->list_kref);
  177                 }
  178         }
  179 }
  180 
  181 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  182 {
  183         int put_count = 0;
  184 
  185         if (!list_empty(&bo->swap)) {
  186                 list_del_init(&bo->swap);
  187                 ++put_count;
  188         }
  189         if (!list_empty(&bo->lru)) {
  190                 list_del_init(&bo->lru);
  191                 ++put_count;
  192         }
  193 
  194         /*
  195          * TODO: Add a driver hook to delete from
  196          * driver-specific LRU's here.
  197          */
  198 
  199         return put_count;
  200 }
  201 
  202 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
  203                           bool interruptible,
  204                           bool no_wait, bool use_sequence, uint32_t sequence)
  205 {
  206         int ret;
  207 
  208         while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
  209                 /**
  210                  * Deadlock avoidance for multi-bo reserving.
  211                  */
  212                 if (use_sequence && bo->seq_valid) {
  213                         /**
  214                          * We've already reserved this one.
  215                          */
  216                         if (unlikely(sequence == bo->val_seq))
  217                                 return -EDEADLK;
  218                         /**
  219                          * Already reserved by a thread that will not back
  220                          * off for us. We need to back off.
  221                          */
  222                         if (unlikely(sequence - bo->val_seq < (1U << 31)))
  223                                 return -EAGAIN;
  224                 }
  225 
  226                 if (no_wait)
  227                         return -EBUSY;
  228 
  229                 ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
  230 
  231                 if (unlikely(ret))
  232                         return ret;
  233         }
  234 
  235         if (use_sequence) {
  236                 bool wake_up = false;
  237                 /**
  238                  * Wake up waiters that may need to recheck for deadlock,
  239                  * if we decreased the sequence number.
  240                  */
  241                 if (unlikely((bo->val_seq - sequence < (1U << 31))
  242                              || !bo->seq_valid))
  243                         wake_up = true;
  244 
  245                 /*
  246                  * In the worst case with memory ordering these values can be
  247                  * seen in the wrong order. However since we call wake_up_all
  248                  * in that case, this will hopefully not pose a problem,
  249                  * and the worst case would only cause someone to accidentally
  250                  * hit -EAGAIN in ttm_bo_reserve when they see old value of
  251                  * val_seq. However this would only happen if seq_valid was
  252                  * written before val_seq was, and just means some slightly
  253                  * increased cpu usage
  254                  */
  255                 bo->val_seq = sequence;
  256                 bo->seq_valid = true;
  257                 if (wake_up)
  258                         wakeup(bo);
  259         } else {
  260                 bo->seq_valid = false;
  261         }
  262 
  263         return 0;
  264 }
  265 
  266 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
  267                          bool never_free)
  268 {
  269         u_int old;
  270 
  271         old = atomic_fetchadd_int(&bo->list_kref, -count);
  272         if (old <= count) {
  273                 if (never_free)
  274                         panic("ttm_bo_ref_buf");
  275                 ttm_bo_release_list(bo);
  276         }
  277 }
  278 
  279 int ttm_bo_reserve(struct ttm_buffer_object *bo,
  280                    bool interruptible,
  281                    bool no_wait, bool use_sequence, uint32_t sequence)
  282 {
  283         struct ttm_bo_global *glob = bo->glob;
  284         int put_count = 0;
  285         int ret;
  286 
  287         mtx_lock(&bo->glob->lru_lock);
  288         ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
  289                                    sequence);
  290         if (likely(ret == 0)) {
  291                 put_count = ttm_bo_del_from_lru(bo);
  292                 mtx_unlock(&glob->lru_lock);
  293                 ttm_bo_list_ref_sub(bo, put_count, true);
  294         } else
  295                 mtx_unlock(&bo->glob->lru_lock);
  296 
  297         return ret;
  298 }
  299 
  300 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
  301                                   bool interruptible, uint32_t sequence)
  302 {
  303         bool wake_up = false;
  304         int ret;
  305 
  306         while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
  307                 if (bo->seq_valid && sequence == bo->val_seq) {
  308                         DRM_ERROR(
  309                             "%s: bo->seq_valid && sequence == bo->val_seq",
  310                             __func__);
  311                 }
  312 
  313                 ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
  314 
  315                 if (unlikely(ret))
  316                         return ret;
  317         }
  318 
  319         if ((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid)
  320                 wake_up = true;
  321 
  322         /**
  323          * Wake up waiters that may need to recheck for deadlock,
  324          * if we decreased the sequence number.
  325          */
  326         bo->val_seq = sequence;
  327         bo->seq_valid = true;
  328         if (wake_up)
  329                 wakeup(bo);
  330 
  331         return 0;
  332 }
  333 
  334 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
  335                             bool interruptible, uint32_t sequence)
  336 {
  337         struct ttm_bo_global *glob = bo->glob;
  338         int put_count, ret;
  339 
  340         mtx_lock(&glob->lru_lock);
  341         ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
  342         if (likely(!ret)) {
  343                 put_count = ttm_bo_del_from_lru(bo);
  344                 mtx_unlock(&glob->lru_lock);
  345                 ttm_bo_list_ref_sub(bo, put_count, true);
  346         } else
  347                 mtx_unlock(&glob->lru_lock);
  348         return ret;
  349 }
  350 
  351 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
  352 {
  353         ttm_bo_add_to_lru(bo);
  354         atomic_set(&bo->reserved, 0);
  355         wakeup(bo);
  356 }
  357 
  358 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  359 {
  360         struct ttm_bo_global *glob = bo->glob;
  361 
  362         mtx_lock(&glob->lru_lock);
  363         ttm_bo_unreserve_locked(bo);
  364         mtx_unlock(&glob->lru_lock);
  365 }
  366 
  367 /*
  368  * Call bo->mutex locked.
  369  */
  370 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  371 {
  372         struct ttm_bo_device *bdev = bo->bdev;
  373         struct ttm_bo_global *glob = bo->glob;
  374         int ret = 0;
  375         uint32_t page_flags = 0;
  376 
  377         TTM_ASSERT_LOCKED(&bo->mutex);
  378         bo->ttm = NULL;
  379 
  380         if (bdev->need_dma32)
  381                 page_flags |= TTM_PAGE_FLAG_DMA32;
  382 
  383         switch (bo->type) {
  384         case ttm_bo_type_device:
  385                 if (zero_alloc)
  386                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  387         case ttm_bo_type_kernel:
  388                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  389                                                       page_flags, glob->dummy_read_page);
  390                 if (unlikely(bo->ttm == NULL))
  391                         ret = -ENOMEM;
  392                 break;
  393         case ttm_bo_type_sg:
  394                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  395                                                       page_flags | TTM_PAGE_FLAG_SG,
  396                                                       glob->dummy_read_page);
  397                 if (unlikely(bo->ttm == NULL)) {
  398                         ret = -ENOMEM;
  399                         break;
  400                 }
  401                 bo->ttm->sg = bo->sg;
  402                 break;
  403         default:
  404                 printf("[TTM] Illegal buffer object type\n");
  405                 ret = -EINVAL;
  406                 break;
  407         }
  408 
  409         return ret;
  410 }
  411 
  412 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  413                                   struct ttm_mem_reg *mem,
  414                                   bool evict, bool interruptible,
  415                                   bool no_wait_gpu)
  416 {
  417         struct ttm_bo_device *bdev = bo->bdev;
  418         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  419         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  420         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  421         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  422         int ret = 0;
  423 
  424         if (old_is_pci || new_is_pci ||
  425             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
  426                 ret = ttm_mem_io_lock(old_man, true);
  427                 if (unlikely(ret != 0))
  428                         goto out_err;
  429                 ttm_bo_unmap_virtual_locked(bo);
  430                 ttm_mem_io_unlock(old_man);
  431         }
  432 
  433         /*
  434          * Create and bind a ttm if required.
  435          */
  436 
  437         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  438                 if (bo->ttm == NULL) {
  439                         bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
  440                         ret = ttm_bo_add_ttm(bo, zero);
  441                         if (ret)
  442                                 goto out_err;
  443                 }
  444 
  445                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  446                 if (ret)
  447                         goto out_err;
  448 
  449                 if (mem->mem_type != TTM_PL_SYSTEM) {
  450                         ret = ttm_tt_bind(bo->ttm, mem);
  451                         if (ret)
  452                                 goto out_err;
  453                 }
  454 
  455                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  456                         if (bdev->driver->move_notify)
  457                                 bdev->driver->move_notify(bo, mem);
  458                         bo->mem = *mem;
  459                         mem->mm_node = NULL;
  460                         goto moved;
  461                 }
  462         }
  463 
  464         if (bdev->driver->move_notify)
  465                 bdev->driver->move_notify(bo, mem);
  466 
  467         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  468             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  469                 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
  470         else if (bdev->driver->move)
  471                 ret = bdev->driver->move(bo, evict, interruptible,
  472                                          no_wait_gpu, mem);
  473         else
  474                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
  475 
  476         if (ret) {
  477                 if (bdev->driver->move_notify) {
  478                         struct ttm_mem_reg tmp_mem = *mem;
  479                         *mem = bo->mem;
  480                         bo->mem = tmp_mem;
  481                         bdev->driver->move_notify(bo, mem);
  482                         bo->mem = *mem;
  483                         *mem = tmp_mem;
  484                 }
  485 
  486                 goto out_err;
  487         }
  488 
  489 moved:
  490         if (bo->evicted) {
  491                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  492                 if (ret)
  493                         printf("[TTM] Can not flush read caches\n");
  494                 bo->evicted = false;
  495         }
  496 
  497         if (bo->mem.mm_node) {
  498                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
  499                     bdev->man[bo->mem.mem_type].gpu_offset;
  500                 bo->cur_placement = bo->mem.placement;
  501         } else
  502                 bo->offset = 0;
  503 
  504         return 0;
  505 
  506 out_err:
  507         new_man = &bdev->man[bo->mem.mem_type];
  508         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
  509                 ttm_tt_unbind(bo->ttm);
  510                 ttm_tt_destroy(bo->ttm);
  511                 bo->ttm = NULL;
  512         }
  513 
  514         return ret;
  515 }
  516 
  517 /**
  518  * Call bo::reserved.
  519  * Will release GPU memory type usage on destruction.
  520  * This is the place to put in driver specific hooks to release
  521  * driver private resources.
  522  * Will release the bo::reserved lock.
  523  */
  524 
  525 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
  526 {
  527         if (bo->bdev->driver->move_notify)
  528                 bo->bdev->driver->move_notify(bo, NULL);
  529 
  530         if (bo->ttm) {
  531                 ttm_tt_unbind(bo->ttm);
  532                 ttm_tt_destroy(bo->ttm);
  533                 bo->ttm = NULL;
  534         }
  535         ttm_bo_mem_put(bo, &bo->mem);
  536 
  537         atomic_set(&bo->reserved, 0);
  538         wakeup(&bo);
  539 
  540         /*
  541          * Since the final reference to this bo may not be dropped by
  542          * the current task we have to put a memory barrier here to make
  543          * sure the changes done in this function are always visible.
  544          *
  545          * This function only needs protection against the final kref_put.
  546          */
  547         mb();
  548 }
  549 
  550 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
  551 {
  552         struct ttm_bo_device *bdev = bo->bdev;
  553         struct ttm_bo_global *glob = bo->glob;
  554         struct ttm_bo_driver *driver = bdev->driver;
  555         void *sync_obj = NULL;
  556         int put_count;
  557         int ret;
  558 
  559         mtx_lock(&glob->lru_lock);
  560         ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
  561 
  562         mtx_lock(&bdev->fence_lock);
  563         (void) ttm_bo_wait(bo, false, false, true);
  564         if (!ret && !bo->sync_obj) {
  565                 mtx_unlock(&bdev->fence_lock);
  566                 put_count = ttm_bo_del_from_lru(bo);
  567 
  568                 mtx_unlock(&glob->lru_lock);
  569                 ttm_bo_cleanup_memtype_use(bo);
  570 
  571                 ttm_bo_list_ref_sub(bo, put_count, true);
  572 
  573                 return;
  574         }
  575         if (bo->sync_obj)
  576                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
  577         mtx_unlock(&bdev->fence_lock);
  578 
  579         if (!ret) {
  580                 atomic_set(&bo->reserved, 0);
  581                 wakeup(bo);
  582         }
  583 
  584         refcount_acquire(&bo->list_kref);
  585         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  586         mtx_unlock(&glob->lru_lock);
  587 
  588         if (sync_obj) {
  589                 driver->sync_obj_flush(sync_obj);
  590                 driver->sync_obj_unref(&sync_obj);
  591         }
  592         taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
  593             ((hz / 100) < 1) ? 1 : hz / 100);
  594 }
  595 
  596 /**
  597  * function ttm_bo_cleanup_refs_and_unlock
  598  * If bo idle, remove from delayed- and lru lists, and unref.
  599  * If not idle, do nothing.
  600  *
  601  * Must be called with lru_lock and reservation held, this function
  602  * will drop both before returning.
  603  *
  604  * @interruptible         Any sleeps should occur interruptibly.
  605  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
  606  */
  607 
  608 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
  609                                           bool interruptible,
  610                                           bool no_wait_gpu)
  611 {
  612         struct ttm_bo_device *bdev = bo->bdev;
  613         struct ttm_bo_driver *driver = bdev->driver;
  614         struct ttm_bo_global *glob = bo->glob;
  615         int put_count;
  616         int ret;
  617 
  618         mtx_lock(&bdev->fence_lock);
  619         ret = ttm_bo_wait(bo, false, false, true);
  620 
  621         if (ret && !no_wait_gpu) {
  622                 void *sync_obj;
  623 
  624                 /*
  625                  * Take a reference to the fence and unreserve,
  626                  * at this point the buffer should be dead, so
  627                  * no new sync objects can be attached.
  628                  */
  629                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
  630                 mtx_unlock(&bdev->fence_lock);
  631 
  632                 atomic_set(&bo->reserved, 0);
  633                 wakeup(bo);
  634                 mtx_unlock(&glob->lru_lock);
  635 
  636                 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
  637                 driver->sync_obj_unref(&sync_obj);
  638                 if (ret)
  639                         return ret;
  640 
  641                 /*
  642                  * remove sync_obj with ttm_bo_wait, the wait should be
  643                  * finished, and no new wait object should have been added.
  644                  */
  645                 mtx_lock(&bdev->fence_lock);
  646                 ret = ttm_bo_wait(bo, false, false, true);
  647                 mtx_unlock(&bdev->fence_lock);
  648                 if (ret)
  649                         return ret;
  650 
  651                 mtx_lock(&glob->lru_lock);
  652                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
  653 
  654                 /*
  655                  * We raced, and lost, someone else holds the reservation now,
  656                  * and is probably busy in ttm_bo_cleanup_memtype_use.
  657                  *
  658                  * Even if it's not the case, because we finished waiting any
  659                  * delayed destruction would succeed, so just return success
  660                  * here.
  661                  */
  662                 if (ret) {
  663                         mtx_unlock(&glob->lru_lock);
  664                         return 0;
  665                 }
  666         } else
  667                 mtx_unlock(&bdev->fence_lock);
  668 
  669         if (ret || unlikely(list_empty(&bo->ddestroy))) {
  670                 atomic_set(&bo->reserved, 0);
  671                 wakeup(bo);
  672                 mtx_unlock(&glob->lru_lock);
  673                 return ret;
  674         }
  675 
  676         put_count = ttm_bo_del_from_lru(bo);
  677         list_del_init(&bo->ddestroy);
  678         ++put_count;
  679 
  680         mtx_unlock(&glob->lru_lock);
  681         ttm_bo_cleanup_memtype_use(bo);
  682 
  683         ttm_bo_list_ref_sub(bo, put_count, true);
  684 
  685         return 0;
  686 }
  687 
  688 /**
  689  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  690  * encountered buffers.
  691  */
  692 
  693 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  694 {
  695         struct ttm_bo_global *glob = bdev->glob;
  696         struct ttm_buffer_object *entry = NULL;
  697         int ret = 0;
  698 
  699         mtx_lock(&glob->lru_lock);
  700         if (list_empty(&bdev->ddestroy))
  701                 goto out_unlock;
  702 
  703         entry = list_first_entry(&bdev->ddestroy,
  704                 struct ttm_buffer_object, ddestroy);
  705         refcount_acquire(&entry->list_kref);
  706 
  707         for (;;) {
  708                 struct ttm_buffer_object *nentry = NULL;
  709 
  710                 if (entry->ddestroy.next != &bdev->ddestroy) {
  711                         nentry = list_first_entry(&entry->ddestroy,
  712                                 struct ttm_buffer_object, ddestroy);
  713                         refcount_acquire(&nentry->list_kref);
  714                 }
  715 
  716                 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
  717                 if (remove_all && ret) {
  718                         ret = ttm_bo_reserve_nolru(entry, false, false,
  719                                                    false, 0);
  720                 }
  721 
  722                 if (!ret)
  723                         ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
  724                                                              !remove_all);
  725                 else
  726                         mtx_unlock(&glob->lru_lock);
  727 
  728                 if (refcount_release(&entry->list_kref))
  729                         ttm_bo_release_list(entry);
  730                 entry = nentry;
  731 
  732                 if (ret || !entry)
  733                         goto out;
  734 
  735                 mtx_lock(&glob->lru_lock);
  736                 if (list_empty(&entry->ddestroy))
  737                         break;
  738         }
  739 
  740 out_unlock:
  741         mtx_unlock(&glob->lru_lock);
  742 out:
  743         if (entry && refcount_release(&entry->list_kref))
  744                 ttm_bo_release_list(entry);
  745         return ret;
  746 }
  747 
  748 static void ttm_bo_delayed_workqueue(void *arg, int pending __unused)
  749 {
  750         struct ttm_bo_device *bdev = arg;
  751 
  752         if (ttm_bo_delayed_delete(bdev, false)) {
  753                 taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
  754                     ((hz / 100) < 1) ? 1 : hz / 100);
  755         }
  756 }
  757 
  758 static void ttm_bo_release(struct ttm_buffer_object *bo)
  759 {
  760         struct ttm_bo_device *bdev = bo->bdev;
  761         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  762 
  763         rw_wlock(&bdev->vm_lock);
  764         if (likely(bo->vm_node != NULL)) {
  765                 RB_REMOVE(ttm_bo_device_buffer_objects,
  766                     &bdev->addr_space_rb, bo);
  767                 drm_mm_put_block(bo->vm_node);
  768                 bo->vm_node = NULL;
  769         }
  770         rw_wunlock(&bdev->vm_lock);
  771         ttm_mem_io_lock(man, false);
  772         ttm_mem_io_free_vm(bo);
  773         ttm_mem_io_unlock(man);
  774         ttm_bo_cleanup_refs_or_queue(bo);
  775         if (refcount_release(&bo->list_kref))
  776                 ttm_bo_release_list(bo);
  777 }
  778 
  779 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  780 {
  781         struct ttm_buffer_object *bo = *p_bo;
  782 
  783         *p_bo = NULL;
  784         if (refcount_release(&bo->kref))
  785                 ttm_bo_release(bo);
  786 }
  787 
  788 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
  789 {
  790         int pending;
  791 
  792         if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending))
  793                 taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
  794         return (pending);
  795 }
  796 
  797 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
  798 {
  799         if (resched) {
  800                 taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
  801                     ((hz / 100) < 1) ? 1 : hz / 100);
  802         }
  803 }
  804 
  805 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
  806                         bool no_wait_gpu)
  807 {
  808         struct ttm_bo_device *bdev = bo->bdev;
  809         struct ttm_mem_reg evict_mem;
  810         struct ttm_placement placement;
  811         int ret = 0;
  812 
  813         mtx_lock(&bdev->fence_lock);
  814         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
  815         mtx_unlock(&bdev->fence_lock);
  816 
  817         if (unlikely(ret != 0)) {
  818                 if (ret != -ERESTARTSYS) {
  819                         printf("[TTM] Failed to expire sync object before buffer eviction\n");
  820                 }
  821                 goto out;
  822         }
  823 
  824         MPASS(ttm_bo_is_reserved(bo));
  825 
  826         evict_mem = bo->mem;
  827         evict_mem.mm_node = NULL;
  828         evict_mem.bus.io_reserved_vm = false;
  829         evict_mem.bus.io_reserved_count = 0;
  830 
  831         placement.fpfn = 0;
  832         placement.lpfn = 0;
  833         placement.num_placement = 0;
  834         placement.num_busy_placement = 0;
  835         bdev->driver->evict_flags(bo, &placement);
  836         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
  837                                 no_wait_gpu);
  838         if (ret) {
  839                 if (ret != -ERESTARTSYS) {
  840                         printf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
  841                                bo);
  842                         ttm_bo_mem_space_debug(bo, &placement);
  843                 }
  844                 goto out;
  845         }
  846 
  847         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
  848                                      no_wait_gpu);
  849         if (ret) {
  850                 if (ret != -ERESTARTSYS)
  851                         printf("[TTM] Buffer eviction failed\n");
  852                 ttm_bo_mem_put(bo, &evict_mem);
  853                 goto out;
  854         }
  855         bo->evicted = true;
  856 out:
  857         return ret;
  858 }
  859 
  860 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  861                                 uint32_t mem_type,
  862                                 bool interruptible,
  863                                 bool no_wait_gpu)
  864 {
  865         struct ttm_bo_global *glob = bdev->glob;
  866         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  867         struct ttm_buffer_object *bo;
  868         int ret = -EBUSY, put_count;
  869 
  870         mtx_lock(&glob->lru_lock);
  871         list_for_each_entry(bo, &man->lru, lru) {
  872                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
  873                 if (!ret)
  874                         break;
  875         }
  876 
  877         if (ret) {
  878                 mtx_unlock(&glob->lru_lock);
  879                 return ret;
  880         }
  881 
  882         refcount_acquire(&bo->list_kref);
  883 
  884         if (!list_empty(&bo->ddestroy)) {
  885                 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
  886                                                      no_wait_gpu);
  887                 if (refcount_release(&bo->list_kref))
  888                         ttm_bo_release_list(bo);
  889                 return ret;
  890         }
  891 
  892         put_count = ttm_bo_del_from_lru(bo);
  893         mtx_unlock(&glob->lru_lock);
  894 
  895         MPASS(ret == 0);
  896 
  897         ttm_bo_list_ref_sub(bo, put_count, true);
  898 
  899         ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
  900         ttm_bo_unreserve(bo);
  901 
  902         if (refcount_release(&bo->list_kref))
  903                 ttm_bo_release_list(bo);
  904         return ret;
  905 }
  906 
  907 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
  908 {
  909         struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
  910 
  911         if (mem->mm_node)
  912                 (*man->func->put_node)(man, mem);
  913 }
  914 
  915 /**
  916  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  917  * space, or we've evicted everything and there isn't enough space.
  918  */
  919 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
  920                                         uint32_t mem_type,
  921                                         struct ttm_placement *placement,
  922                                         struct ttm_mem_reg *mem,
  923                                         bool interruptible,
  924                                         bool no_wait_gpu)
  925 {
  926         struct ttm_bo_device *bdev = bo->bdev;
  927         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  928         int ret;
  929 
  930         do {
  931                 ret = (*man->func->get_node)(man, bo, placement, mem);
  932                 if (unlikely(ret != 0))
  933                         return ret;
  934                 if (mem->mm_node)
  935                         break;
  936                 ret = ttm_mem_evict_first(bdev, mem_type,
  937                                           interruptible, no_wait_gpu);
  938                 if (unlikely(ret != 0))
  939                         return ret;
  940         } while (1);
  941         if (mem->mm_node == NULL)
  942                 return -ENOMEM;
  943         mem->mem_type = mem_type;
  944         return 0;
  945 }
  946 
  947 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
  948                                       uint32_t cur_placement,
  949                                       uint32_t proposed_placement)
  950 {
  951         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
  952         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
  953 
  954         /**
  955          * Keep current caching if possible.
  956          */
  957 
  958         if ((cur_placement & caching) != 0)
  959                 result |= (cur_placement & caching);
  960         else if ((man->default_caching & caching) != 0)
  961                 result |= man->default_caching;
  962         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
  963                 result |= TTM_PL_FLAG_CACHED;
  964         else if ((TTM_PL_FLAG_WC & caching) != 0)
  965                 result |= TTM_PL_FLAG_WC;
  966         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
  967                 result |= TTM_PL_FLAG_UNCACHED;
  968 
  969         return result;
  970 }
  971 
  972 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  973                                  uint32_t mem_type,
  974                                  uint32_t proposed_placement,
  975                                  uint32_t *masked_placement)
  976 {
  977         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  978 
  979         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
  980                 return false;
  981 
  982         if ((proposed_placement & man->available_caching) == 0)
  983                 return false;
  984 
  985         cur_flags |= (proposed_placement & man->available_caching);
  986 
  987         *masked_placement = cur_flags;
  988         return true;
  989 }
  990 
  991 /**
  992  * Creates space for memory region @mem according to its type.
  993  *
  994  * This function first searches for free space in compatible memory types in
  995  * the priority order defined by the driver.  If free space isn't found, then
  996  * ttm_bo_mem_force_space is attempted in priority order to evict and find
  997  * space.
  998  */
  999 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 1000                         struct ttm_placement *placement,
 1001                         struct ttm_mem_reg *mem,
 1002                         bool interruptible,
 1003                         bool no_wait_gpu)
 1004 {
 1005         struct ttm_bo_device *bdev = bo->bdev;
 1006         struct ttm_mem_type_manager *man;
 1007         uint32_t mem_type = TTM_PL_SYSTEM;
 1008         uint32_t cur_flags = 0;
 1009         bool type_found = false;
 1010         bool type_ok = false;
 1011         bool has_erestartsys = false;
 1012         int i, ret;
 1013 
 1014         mem->mm_node = NULL;
 1015         for (i = 0; i < placement->num_placement; ++i) {
 1016                 ret = ttm_mem_type_from_flags(placement->placement[i],
 1017                                                 &mem_type);
 1018                 if (ret)
 1019                         return ret;
 1020                 man = &bdev->man[mem_type];
 1021 
 1022                 type_ok = ttm_bo_mt_compatible(man,
 1023                                                 mem_type,
 1024                                                 placement->placement[i],
 1025                                                 &cur_flags);
 1026 
 1027                 if (!type_ok)
 1028                         continue;
 1029 
 1030                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 1031                                                   cur_flags);
 1032                 /*
 1033                  * Use the access and other non-mapping-related flag bits from
 1034                  * the memory placement flags to the current flags
 1035                  */
 1036                 ttm_flag_masked(&cur_flags, placement->placement[i],
 1037                                 ~TTM_PL_MASK_MEMTYPE);
 1038 
 1039                 if (mem_type == TTM_PL_SYSTEM)
 1040                         break;
 1041 
 1042                 if (man->has_type && man->use_type) {
 1043                         type_found = true;
 1044                         ret = (*man->func->get_node)(man, bo, placement, mem);
 1045                         if (unlikely(ret))
 1046                                 return ret;
 1047                 }
 1048                 if (mem->mm_node)
 1049                         break;
 1050         }
 1051 
 1052         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
 1053                 mem->mem_type = mem_type;
 1054                 mem->placement = cur_flags;
 1055                 return 0;
 1056         }
 1057 
 1058         if (!type_found)
 1059                 return -EINVAL;
 1060 
 1061         for (i = 0; i < placement->num_busy_placement; ++i) {
 1062                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
 1063                                                 &mem_type);
 1064                 if (ret)
 1065                         return ret;
 1066                 man = &bdev->man[mem_type];
 1067                 if (!man->has_type)
 1068                         continue;
 1069                 if (!ttm_bo_mt_compatible(man,
 1070                                                 mem_type,
 1071                                                 placement->busy_placement[i],
 1072                                                 &cur_flags))
 1073                         continue;
 1074 
 1075                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 1076                                                   cur_flags);
 1077                 /*
 1078                  * Use the access and other non-mapping-related flag bits from
 1079                  * the memory placement flags to the current flags
 1080                  */
 1081                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
 1082                                 ~TTM_PL_MASK_MEMTYPE);
 1083 
 1084 
 1085                 if (mem_type == TTM_PL_SYSTEM) {
 1086                         mem->mem_type = mem_type;
 1087                         mem->placement = cur_flags;
 1088                         mem->mm_node = NULL;
 1089                         return 0;
 1090                 }
 1091 
 1092                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
 1093                                                 interruptible, no_wait_gpu);
 1094                 if (ret == 0 && mem->mm_node) {
 1095                         mem->placement = cur_flags;
 1096                         return 0;
 1097                 }
 1098                 if (ret == -ERESTARTSYS)
 1099                         has_erestartsys = true;
 1100         }
 1101         ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
 1102         return ret;
 1103 }
 1104 
 1105 static
 1106 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 1107                         struct ttm_placement *placement,
 1108                         bool interruptible,
 1109                         bool no_wait_gpu)
 1110 {
 1111         int ret = 0;
 1112         struct ttm_mem_reg mem;
 1113         struct ttm_bo_device *bdev = bo->bdev;
 1114 
 1115         MPASS(ttm_bo_is_reserved(bo));
 1116 
 1117         /*
 1118          * FIXME: It's possible to pipeline buffer moves.
 1119          * Have the driver move function wait for idle when necessary,
 1120          * instead of doing it here.
 1121          */
 1122         mtx_lock(&bdev->fence_lock);
 1123         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
 1124         mtx_unlock(&bdev->fence_lock);
 1125         if (ret)
 1126                 return ret;
 1127         mem.num_pages = bo->num_pages;
 1128         mem.size = mem.num_pages << PAGE_SHIFT;
 1129         mem.page_alignment = bo->mem.page_alignment;
 1130         mem.bus.io_reserved_vm = false;
 1131         mem.bus.io_reserved_count = 0;
 1132         /*
 1133          * Determine where to move the buffer.
 1134          */
 1135         ret = ttm_bo_mem_space(bo, placement, &mem,
 1136                                interruptible, no_wait_gpu);
 1137         if (ret)
 1138                 goto out_unlock;
 1139         ret = ttm_bo_handle_move_mem(bo, &mem, false,
 1140                                      interruptible, no_wait_gpu);
 1141 out_unlock:
 1142         if (ret && mem.mm_node)
 1143                 ttm_bo_mem_put(bo, &mem);
 1144         return ret;
 1145 }
 1146 
 1147 static int ttm_bo_mem_compat(struct ttm_placement *placement,
 1148                              struct ttm_mem_reg *mem)
 1149 {
 1150         int i;
 1151 
 1152         if (mem->mm_node && placement->lpfn != 0 &&
 1153             (mem->start < placement->fpfn ||
 1154              mem->start + mem->num_pages > placement->lpfn))
 1155                 return -1;
 1156 
 1157         for (i = 0; i < placement->num_placement; i++) {
 1158                 if ((placement->placement[i] & mem->placement &
 1159                         TTM_PL_MASK_CACHING) &&
 1160                         (placement->placement[i] & mem->placement &
 1161                         TTM_PL_MASK_MEM))
 1162                         return i;
 1163         }
 1164         return -1;
 1165 }
 1166 
 1167 int ttm_bo_validate(struct ttm_buffer_object *bo,
 1168                         struct ttm_placement *placement,
 1169                         bool interruptible,
 1170                         bool no_wait_gpu)
 1171 {
 1172         int ret;
 1173 
 1174         MPASS(ttm_bo_is_reserved(bo));
 1175         /* Check that range is valid */
 1176         if (placement->lpfn || placement->fpfn)
 1177                 if (placement->fpfn > placement->lpfn ||
 1178                         (placement->lpfn - placement->fpfn) < bo->num_pages)
 1179                         return -EINVAL;
 1180         /*
 1181          * Check whether we need to move buffer.
 1182          */
 1183         ret = ttm_bo_mem_compat(placement, &bo->mem);
 1184         if (ret < 0) {
 1185                 ret = ttm_bo_move_buffer(bo, placement, interruptible,
 1186                                          no_wait_gpu);
 1187                 if (ret)
 1188                         return ret;
 1189         } else {
 1190                 /*
 1191                  * Use the access and other non-mapping-related flag bits from
 1192                  * the compatible memory placement flags to the active flags
 1193                  */
 1194                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
 1195                                 ~TTM_PL_MASK_MEMTYPE);
 1196         }
 1197         /*
 1198          * We might need to add a TTM.
 1199          */
 1200         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 1201                 ret = ttm_bo_add_ttm(bo, true);
 1202                 if (ret)
 1203                         return ret;
 1204         }
 1205         return 0;
 1206 }
 1207 
 1208 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
 1209                                 struct ttm_placement *placement)
 1210 {
 1211         MPASS(!((placement->fpfn || placement->lpfn) &&
 1212             (bo->mem.num_pages > (placement->lpfn - placement->fpfn))));
 1213 
 1214         return 0;
 1215 }
 1216 
 1217 int ttm_bo_init(struct ttm_bo_device *bdev,
 1218                 struct ttm_buffer_object *bo,
 1219                 unsigned long size,
 1220                 enum ttm_bo_type type,
 1221                 struct ttm_placement *placement,
 1222                 uint32_t page_alignment,
 1223                 bool interruptible,
 1224                 struct vm_object *persistent_swap_storage,
 1225                 size_t acc_size,
 1226                 struct sg_table *sg,
 1227                 void (*destroy) (struct ttm_buffer_object *))
 1228 {
 1229         int ret = 0;
 1230         unsigned long num_pages;
 1231         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
 1232 
 1233         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
 1234         if (ret) {
 1235                 printf("[TTM] Out of kernel memory\n");
 1236                 if (destroy)
 1237                         (*destroy)(bo);
 1238                 else
 1239                         free(bo, M_TTM_BO);
 1240                 return -ENOMEM;
 1241         }
 1242 
 1243         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 1244         if (num_pages == 0) {
 1245                 printf("[TTM] Illegal buffer object size\n");
 1246                 if (destroy)
 1247                         (*destroy)(bo);
 1248                 else
 1249                         free(bo, M_TTM_BO);
 1250                 ttm_mem_global_free(mem_glob, acc_size);
 1251                 return -EINVAL;
 1252         }
 1253         bo->destroy = destroy;
 1254 
 1255         refcount_init(&bo->kref, 1);
 1256         refcount_init(&bo->list_kref, 1);
 1257         atomic_set(&bo->cpu_writers, 0);
 1258         atomic_set(&bo->reserved, 1);
 1259         INIT_LIST_HEAD(&bo->lru);
 1260         INIT_LIST_HEAD(&bo->ddestroy);
 1261         INIT_LIST_HEAD(&bo->swap);
 1262         INIT_LIST_HEAD(&bo->io_reserve_lru);
 1263         bo->bdev = bdev;
 1264         bo->glob = bdev->glob;
 1265         bo->type = type;
 1266         bo->num_pages = num_pages;
 1267         bo->mem.size = num_pages << PAGE_SHIFT;
 1268         bo->mem.mem_type = TTM_PL_SYSTEM;
 1269         bo->mem.num_pages = bo->num_pages;
 1270         bo->mem.mm_node = NULL;
 1271         bo->mem.page_alignment = page_alignment;
 1272         bo->mem.bus.io_reserved_vm = false;
 1273         bo->mem.bus.io_reserved_count = 0;
 1274         bo->priv_flags = 0;
 1275         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
 1276         bo->seq_valid = false;
 1277         bo->persistent_swap_storage = persistent_swap_storage;
 1278         bo->acc_size = acc_size;
 1279         bo->sg = sg;
 1280         atomic_inc(&bo->glob->bo_count);
 1281 
 1282         ret = ttm_bo_check_placement(bo, placement);
 1283         if (unlikely(ret != 0))
 1284                 goto out_err;
 1285 
 1286         /*
 1287          * For ttm_bo_type_device buffers, allocate
 1288          * address space from the device.
 1289          */
 1290         if (bo->type == ttm_bo_type_device ||
 1291             bo->type == ttm_bo_type_sg) {
 1292                 ret = ttm_bo_setup_vm(bo);
 1293                 if (ret)
 1294                         goto out_err;
 1295         }
 1296 
 1297         ret = ttm_bo_validate(bo, placement, interruptible, false);
 1298         if (ret)
 1299                 goto out_err;
 1300 
 1301         ttm_bo_unreserve(bo);
 1302         return 0;
 1303 
 1304 out_err:
 1305         ttm_bo_unreserve(bo);
 1306         ttm_bo_unref(&bo);
 1307 
 1308         return ret;
 1309 }
 1310 
 1311 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
 1312                        unsigned long bo_size,
 1313                        unsigned struct_size)
 1314 {
 1315         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
 1316         size_t size = 0;
 1317 
 1318         size += ttm_round_pot(struct_size);
 1319         size += PAGE_ALIGN(npages * sizeof(void *));
 1320         size += ttm_round_pot(sizeof(struct ttm_tt));
 1321         return size;
 1322 }
 1323 
 1324 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
 1325                            unsigned long bo_size,
 1326                            unsigned struct_size)
 1327 {
 1328         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
 1329         size_t size = 0;
 1330 
 1331         size += ttm_round_pot(struct_size);
 1332         size += PAGE_ALIGN(npages * sizeof(void *));
 1333         size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
 1334         size += ttm_round_pot(sizeof(struct ttm_dma_tt));
 1335         return size;
 1336 }
 1337 
 1338 int ttm_bo_create(struct ttm_bo_device *bdev,
 1339                         unsigned long size,
 1340                         enum ttm_bo_type type,
 1341                         struct ttm_placement *placement,
 1342                         uint32_t page_alignment,
 1343                         bool interruptible,
 1344                         struct vm_object *persistent_swap_storage,
 1345                         struct ttm_buffer_object **p_bo)
 1346 {
 1347         struct ttm_buffer_object *bo;
 1348         size_t acc_size;
 1349         int ret;
 1350 
 1351         bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
 1352         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
 1353         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
 1354                           interruptible, persistent_swap_storage, acc_size,
 1355                           NULL, NULL);
 1356         if (likely(ret == 0))
 1357                 *p_bo = bo;
 1358 
 1359         return ret;
 1360 }
 1361 
 1362 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 1363                                         unsigned mem_type, bool allow_errors)
 1364 {
 1365         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 1366         struct ttm_bo_global *glob = bdev->glob;
 1367         int ret;
 1368 
 1369         /*
 1370          * Can't use standard list traversal since we're unlocking.
 1371          */
 1372 
 1373         mtx_lock(&glob->lru_lock);
 1374         while (!list_empty(&man->lru)) {
 1375                 mtx_unlock(&glob->lru_lock);
 1376                 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
 1377                 if (ret) {
 1378                         if (allow_errors) {
 1379                                 return ret;
 1380                         } else {
 1381                                 printf("[TTM] Cleanup eviction failed\n");
 1382                         }
 1383                 }
 1384                 mtx_lock(&glob->lru_lock);
 1385         }
 1386         mtx_unlock(&glob->lru_lock);
 1387         return 0;
 1388 }
 1389 
 1390 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 1391 {
 1392         struct ttm_mem_type_manager *man;
 1393         int ret = -EINVAL;
 1394 
 1395         if (mem_type >= TTM_NUM_MEM_TYPES) {
 1396                 printf("[TTM] Illegal memory type %d\n", mem_type);
 1397                 return ret;
 1398         }
 1399         man = &bdev->man[mem_type];
 1400 
 1401         if (!man->has_type) {
 1402                 printf("[TTM] Trying to take down uninitialized memory manager type %u\n",
 1403                        mem_type);
 1404                 return ret;
 1405         }
 1406 
 1407         man->use_type = false;
 1408         man->has_type = false;
 1409 
 1410         ret = 0;
 1411         if (mem_type > 0) {
 1412                 ttm_bo_force_list_clean(bdev, mem_type, false);
 1413 
 1414                 ret = (*man->func->takedown)(man);
 1415         }
 1416 
 1417         return ret;
 1418 }
 1419 
 1420 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 1421 {
 1422         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 1423 
 1424         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
 1425                 printf("[TTM] Illegal memory manager memory type %u\n", mem_type);
 1426                 return -EINVAL;
 1427         }
 1428 
 1429         if (!man->has_type) {
 1430                 printf("[TTM] Memory type %u has not been initialized\n", mem_type);
 1431                 return 0;
 1432         }
 1433 
 1434         return ttm_bo_force_list_clean(bdev, mem_type, true);
 1435 }
 1436 
 1437 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
 1438                         unsigned long p_size)
 1439 {
 1440         int ret = -EINVAL;
 1441         struct ttm_mem_type_manager *man;
 1442 
 1443         MPASS(type < TTM_NUM_MEM_TYPES);
 1444         man = &bdev->man[type];
 1445         MPASS(!man->has_type);
 1446         man->io_reserve_fastpath = true;
 1447         man->use_io_reserve_lru = false;
 1448         sx_init(&man->io_reserve_mutex, "ttmman");
 1449         INIT_LIST_HEAD(&man->io_reserve_lru);
 1450 
 1451         ret = bdev->driver->init_mem_type(bdev, type, man);
 1452         if (ret)
 1453                 return ret;
 1454         man->bdev = bdev;
 1455 
 1456         ret = 0;
 1457         if (type != TTM_PL_SYSTEM) {
 1458                 ret = (*man->func->init)(man, p_size);
 1459                 if (ret)
 1460                         return ret;
 1461         }
 1462         man->has_type = true;
 1463         man->use_type = true;
 1464         man->size = p_size;
 1465 
 1466         INIT_LIST_HEAD(&man->lru);
 1467 
 1468         return 0;
 1469 }
 1470 
 1471 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
 1472 {
 1473 
 1474         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
 1475         vm_page_free(glob->dummy_read_page);
 1476 }
 1477 
 1478 void ttm_bo_global_release(struct drm_global_reference *ref)
 1479 {
 1480         struct ttm_bo_global *glob = ref->object;
 1481 
 1482         if (refcount_release(&glob->kobj_ref))
 1483                 ttm_bo_global_kobj_release(glob);
 1484 }
 1485 
 1486 int ttm_bo_global_init(struct drm_global_reference *ref)
 1487 {
 1488         struct ttm_bo_global_ref *bo_ref =
 1489                 container_of(ref, struct ttm_bo_global_ref, ref);
 1490         struct ttm_bo_global *glob = ref->object;
 1491         int ret;
 1492         int tries;
 1493 
 1494         sx_init(&glob->device_list_mutex, "ttmdlm");
 1495         mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
 1496         glob->mem_glob = bo_ref->mem_glob;
 1497         tries = 0;
 1498 retry:
 1499         glob->dummy_read_page = vm_page_alloc_noobj_contig(0, 1, 0,
 1500             VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
 1501 
 1502         if (unlikely(glob->dummy_read_page == NULL)) {
 1503                 if (tries < 1 && vm_page_reclaim_contig(0, 1, 0,
 1504                     VM_MAX_ADDRESS, PAGE_SIZE, 0)) {
 1505                         tries++;
 1506                         goto retry;
 1507                 }
 1508                 ret = -ENOMEM;
 1509                 goto out_no_drp;
 1510         }
 1511 
 1512         INIT_LIST_HEAD(&glob->swap_lru);
 1513         INIT_LIST_HEAD(&glob->device_list);
 1514 
 1515         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
 1516         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
 1517         if (unlikely(ret != 0)) {
 1518                 printf("[TTM] Could not register buffer object swapout\n");
 1519                 goto out_no_shrink;
 1520         }
 1521 
 1522         atomic_set(&glob->bo_count, 0);
 1523 
 1524         refcount_init(&glob->kobj_ref, 1);
 1525         return (0);
 1526 
 1527 out_no_shrink:
 1528         vm_page_free(glob->dummy_read_page);
 1529 out_no_drp:
 1530         free(glob, M_DRM_GLOBAL);
 1531         return ret;
 1532 }
 1533 
 1534 int ttm_bo_device_release(struct ttm_bo_device *bdev)
 1535 {
 1536         int ret = 0;
 1537         unsigned i = TTM_NUM_MEM_TYPES;
 1538         struct ttm_mem_type_manager *man;
 1539         struct ttm_bo_global *glob = bdev->glob;
 1540 
 1541         while (i--) {
 1542                 man = &bdev->man[i];
 1543                 if (man->has_type) {
 1544                         man->use_type = false;
 1545                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
 1546                                 ret = -EBUSY;
 1547                                 printf("[TTM] DRM memory manager type %d is not clean\n",
 1548                                        i);
 1549                         }
 1550                         man->has_type = false;
 1551                 }
 1552         }
 1553 
 1554         sx_xlock(&glob->device_list_mutex);
 1555         list_del(&bdev->device_list);
 1556         sx_xunlock(&glob->device_list_mutex);
 1557 
 1558         if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL))
 1559                 taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
 1560 
 1561         while (ttm_bo_delayed_delete(bdev, true))
 1562                 ;
 1563 
 1564         mtx_lock(&glob->lru_lock);
 1565         if (list_empty(&bdev->ddestroy))
 1566                 TTM_DEBUG("Delayed destroy list was clean\n");
 1567 
 1568         if (list_empty(&bdev->man[0].lru))
 1569                 TTM_DEBUG("Swap list was clean\n");
 1570         mtx_unlock(&glob->lru_lock);
 1571 
 1572         MPASS(drm_mm_clean(&bdev->addr_space_mm));
 1573         rw_wlock(&bdev->vm_lock);
 1574         drm_mm_takedown(&bdev->addr_space_mm);
 1575         rw_wunlock(&bdev->vm_lock);
 1576 
 1577         return ret;
 1578 }
 1579 
 1580 int ttm_bo_device_init(struct ttm_bo_device *bdev,
 1581                        struct ttm_bo_global *glob,
 1582                        struct ttm_bo_driver *driver,
 1583                        uint64_t file_page_offset,
 1584                        bool need_dma32)
 1585 {
 1586         int ret = -EINVAL;
 1587 
 1588         rw_init(&bdev->vm_lock, "ttmvml");
 1589         bdev->driver = driver;
 1590 
 1591         memset(bdev->man, 0, sizeof(bdev->man));
 1592 
 1593         /*
 1594          * Initialize the system memory buffer type.
 1595          * Other types need to be driver / IOCTL initialized.
 1596          */
 1597         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
 1598         if (unlikely(ret != 0))
 1599                 goto out_no_sys;
 1600 
 1601         RB_INIT(&bdev->addr_space_rb);
 1602         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
 1603         if (unlikely(ret != 0))
 1604                 goto out_no_addr_mm;
 1605 
 1606         TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0,
 1607             ttm_bo_delayed_workqueue, bdev);
 1608         INIT_LIST_HEAD(&bdev->ddestroy);
 1609         bdev->dev_mapping = NULL;
 1610         bdev->glob = glob;
 1611         bdev->need_dma32 = need_dma32;
 1612         bdev->val_seq = 0;
 1613         mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF);
 1614         sx_xlock(&glob->device_list_mutex);
 1615         list_add_tail(&bdev->device_list, &glob->device_list);
 1616         sx_xunlock(&glob->device_list_mutex);
 1617 
 1618         return 0;
 1619 out_no_addr_mm:
 1620         ttm_bo_clean_mm(bdev, 0);
 1621 out_no_sys:
 1622         return ret;
 1623 }
 1624 
 1625 /*
 1626  * buffer object vm functions.
 1627  */
 1628 
 1629 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 1630 {
 1631         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 1632 
 1633         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
 1634                 if (mem->mem_type == TTM_PL_SYSTEM)
 1635                         return false;
 1636 
 1637                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
 1638                         return false;
 1639 
 1640                 if (mem->placement & TTM_PL_FLAG_CACHED)
 1641                         return false;
 1642         }
 1643         return true;
 1644 }
 1645 
 1646 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
 1647 {
 1648 
 1649         ttm_bo_release_mmap(bo);
 1650         ttm_mem_io_free_vm(bo);
 1651 }
 1652 
 1653 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 1654 {
 1655         struct ttm_bo_device *bdev = bo->bdev;
 1656         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 1657 
 1658         ttm_mem_io_lock(man, false);
 1659         ttm_bo_unmap_virtual_locked(bo);
 1660         ttm_mem_io_unlock(man);
 1661 }
 1662 
 1663 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
 1664 {
 1665         struct ttm_bo_device *bdev = bo->bdev;
 1666 
 1667         /* The caller acquired bdev->vm_lock. */
 1668         RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
 1669 }
 1670 
 1671 /**
 1672  * ttm_bo_setup_vm:
 1673  *
 1674  * @bo: the buffer to allocate address space for
 1675  *
 1676  * Allocate address space in the drm device so that applications
 1677  * can mmap the buffer and access the contents. This only
 1678  * applies to ttm_bo_type_device objects as others are not
 1679  * placed in the drm device address space.
 1680  */
 1681 
 1682 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
 1683 {
 1684         struct ttm_bo_device *bdev = bo->bdev;
 1685         int ret;
 1686 
 1687 retry_pre_get:
 1688         ret = drm_mm_pre_get(&bdev->addr_space_mm);
 1689         if (unlikely(ret != 0))
 1690                 return ret;
 1691 
 1692         rw_wlock(&bdev->vm_lock);
 1693         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
 1694                                          bo->mem.num_pages, 0, 0);
 1695 
 1696         if (unlikely(bo->vm_node == NULL)) {
 1697                 ret = -ENOMEM;
 1698                 goto out_unlock;
 1699         }
 1700 
 1701         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
 1702                                               bo->mem.num_pages, 0);
 1703 
 1704         if (unlikely(bo->vm_node == NULL)) {
 1705                 rw_wunlock(&bdev->vm_lock);
 1706                 goto retry_pre_get;
 1707         }
 1708 
 1709         ttm_bo_vm_insert_rb(bo);
 1710         rw_wunlock(&bdev->vm_lock);
 1711         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
 1712 
 1713         return 0;
 1714 out_unlock:
 1715         rw_wunlock(&bdev->vm_lock);
 1716         return ret;
 1717 }
 1718 
 1719 int ttm_bo_wait(struct ttm_buffer_object *bo,
 1720                 bool lazy, bool interruptible, bool no_wait)
 1721 {
 1722         struct ttm_bo_driver *driver = bo->bdev->driver;
 1723         struct ttm_bo_device *bdev = bo->bdev;
 1724         void *sync_obj;
 1725         int ret = 0;
 1726 
 1727         if (likely(bo->sync_obj == NULL))
 1728                 return 0;
 1729 
 1730         while (bo->sync_obj) {
 1731 
 1732                 if (driver->sync_obj_signaled(bo->sync_obj)) {
 1733                         void *tmp_obj = bo->sync_obj;
 1734                         bo->sync_obj = NULL;
 1735                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
 1736                         mtx_unlock(&bdev->fence_lock);
 1737                         driver->sync_obj_unref(&tmp_obj);
 1738                         mtx_lock(&bdev->fence_lock);
 1739                         continue;
 1740                 }
 1741 
 1742                 if (no_wait)
 1743                         return -EBUSY;
 1744 
 1745                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
 1746                 mtx_unlock(&bdev->fence_lock);
 1747                 ret = driver->sync_obj_wait(sync_obj,
 1748                                             lazy, interruptible);
 1749                 if (unlikely(ret != 0)) {
 1750                         driver->sync_obj_unref(&sync_obj);
 1751                         mtx_lock(&bdev->fence_lock);
 1752                         return ret;
 1753                 }
 1754                 mtx_lock(&bdev->fence_lock);
 1755                 if (likely(bo->sync_obj == sync_obj)) {
 1756                         void *tmp_obj = bo->sync_obj;
 1757                         bo->sync_obj = NULL;
 1758                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
 1759                                   &bo->priv_flags);
 1760                         mtx_unlock(&bdev->fence_lock);
 1761                         driver->sync_obj_unref(&sync_obj);
 1762                         driver->sync_obj_unref(&tmp_obj);
 1763                         mtx_lock(&bdev->fence_lock);
 1764                 } else {
 1765                         mtx_unlock(&bdev->fence_lock);
 1766                         driver->sync_obj_unref(&sync_obj);
 1767                         mtx_lock(&bdev->fence_lock);
 1768                 }
 1769         }
 1770         return 0;
 1771 }
 1772 
 1773 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
 1774 {
 1775         struct ttm_bo_device *bdev = bo->bdev;
 1776         int ret = 0;
 1777 
 1778         /*
 1779          * Using ttm_bo_reserve makes sure the lru lists are updated.
 1780          */
 1781 
 1782         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
 1783         if (unlikely(ret != 0))
 1784                 return ret;
 1785         mtx_lock(&bdev->fence_lock);
 1786         ret = ttm_bo_wait(bo, false, true, no_wait);
 1787         mtx_unlock(&bdev->fence_lock);
 1788         if (likely(ret == 0))
 1789                 atomic_inc(&bo->cpu_writers);
 1790         ttm_bo_unreserve(bo);
 1791         return ret;
 1792 }
 1793 
 1794 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
 1795 {
 1796         atomic_dec(&bo->cpu_writers);
 1797 }
 1798 
 1799 /**
 1800  * A buffer object shrink method that tries to swap out the first
 1801  * buffer object on the bo_global::swap_lru list.
 1802  */
 1803 
 1804 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 1805 {
 1806         struct ttm_bo_global *glob =
 1807             container_of(shrink, struct ttm_bo_global, shrink);
 1808         struct ttm_buffer_object *bo;
 1809         int ret = -EBUSY;
 1810         int put_count;
 1811         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
 1812 
 1813         mtx_lock(&glob->lru_lock);
 1814         list_for_each_entry(bo, &glob->swap_lru, swap) {
 1815                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 1816                 if (!ret)
 1817                         break;
 1818         }
 1819 
 1820         if (ret) {
 1821                 mtx_unlock(&glob->lru_lock);
 1822                 return ret;
 1823         }
 1824 
 1825         refcount_acquire(&bo->list_kref);
 1826 
 1827         if (!list_empty(&bo->ddestroy)) {
 1828                 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
 1829                 if (refcount_release(&bo->list_kref))
 1830                         ttm_bo_release_list(bo);
 1831                 return ret;
 1832         }
 1833 
 1834         put_count = ttm_bo_del_from_lru(bo);
 1835         mtx_unlock(&glob->lru_lock);
 1836 
 1837         ttm_bo_list_ref_sub(bo, put_count, true);
 1838 
 1839         /**
 1840          * Wait for GPU, then move to system cached.
 1841          */
 1842 
 1843         mtx_lock(&bo->bdev->fence_lock);
 1844         ret = ttm_bo_wait(bo, false, false, false);
 1845         mtx_unlock(&bo->bdev->fence_lock);
 1846 
 1847         if (unlikely(ret != 0))
 1848                 goto out;
 1849 
 1850         if ((bo->mem.placement & swap_placement) != swap_placement) {
 1851                 struct ttm_mem_reg evict_mem;
 1852 
 1853                 evict_mem = bo->mem;
 1854                 evict_mem.mm_node = NULL;
 1855                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
 1856                 evict_mem.mem_type = TTM_PL_SYSTEM;
 1857 
 1858                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
 1859                                              false, false);
 1860                 if (unlikely(ret != 0))
 1861                         goto out;
 1862         }
 1863 
 1864         ttm_bo_unmap_virtual(bo);
 1865 
 1866         /**
 1867          * Swap out. Buffer will be swapped in again as soon as
 1868          * anyone tries to access a ttm page.
 1869          */
 1870 
 1871         if (bo->bdev->driver->swap_notify)
 1872                 bo->bdev->driver->swap_notify(bo);
 1873 
 1874         ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
 1875 out:
 1876 
 1877         /**
 1878          *
 1879          * Unreserve without putting on LRU to avoid swapping out an
 1880          * already swapped buffer.
 1881          */
 1882 
 1883         atomic_set(&bo->reserved, 0);
 1884         wakeup(bo);
 1885         if (refcount_release(&bo->list_kref))
 1886                 ttm_bo_release_list(bo);
 1887         return ret;
 1888 }
 1889 
 1890 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
 1891 {
 1892         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
 1893                 ;
 1894 }

Cache object: 12531aba9bedf046c1e3c5a2d7dd48d5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.