The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/drm2/ttm/ttm_bo_util.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2  *
    3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
    4  * All Rights Reserved.
    5  *
    6  * Permission is hereby granted, free of charge, to any person obtaining a
    7  * copy of this software and associated documentation files (the
    8  * "Software"), to deal in the Software without restriction, including
    9  * without limitation the rights to use, copy, modify, merge, publish,
   10  * distribute, sub license, and/or sell copies of the Software, and to
   11  * permit persons to whom the Software is furnished to do so, subject to
   12  * the following conditions:
   13  *
   14  * The above copyright notice and this permission notice (including the
   15  * next paragraph) shall be included in all copies or substantial portions
   16  * of the Software.
   17  *
   18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
   21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
   22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
   23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
   24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
   25  *
   26  **************************************************************************/
   27 /*
   28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include <dev/drm2/drmP.h>
   35 #include <dev/drm2/ttm/ttm_bo_driver.h>
   36 #include <dev/drm2/ttm/ttm_placement.h>
   37 #include <sys/sf_buf.h>
   38 
   39 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
   40 {
   41         ttm_bo_mem_put(bo, &bo->mem);
   42 }
   43 
   44 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
   45                     bool evict,
   46                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
   47 {
   48         struct ttm_tt *ttm = bo->ttm;
   49         struct ttm_mem_reg *old_mem = &bo->mem;
   50         int ret;
   51 
   52         if (old_mem->mem_type != TTM_PL_SYSTEM) {
   53                 ttm_tt_unbind(ttm);
   54                 ttm_bo_free_old_node(bo);
   55                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
   56                                 TTM_PL_MASK_MEM);
   57                 old_mem->mem_type = TTM_PL_SYSTEM;
   58         }
   59 
   60         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
   61         if (unlikely(ret != 0))
   62                 return ret;
   63 
   64         if (new_mem->mem_type != TTM_PL_SYSTEM) {
   65                 ret = ttm_tt_bind(ttm, new_mem);
   66                 if (unlikely(ret != 0))
   67                         return ret;
   68         }
   69 
   70         *old_mem = *new_mem;
   71         new_mem->mm_node = NULL;
   72 
   73         return 0;
   74 }
   75 
   76 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
   77 {
   78         if (likely(man->io_reserve_fastpath))
   79                 return 0;
   80 
   81         if (interruptible) {
   82                 if (sx_xlock_sig(&man->io_reserve_mutex))
   83                         return (-EINTR);
   84                 else
   85                         return (0);
   86         }
   87 
   88         sx_xlock(&man->io_reserve_mutex);
   89         return 0;
   90 }
   91 
   92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
   93 {
   94         if (likely(man->io_reserve_fastpath))
   95                 return;
   96 
   97         sx_xunlock(&man->io_reserve_mutex);
   98 }
   99 
  100 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
  101 {
  102         struct ttm_buffer_object *bo;
  103 
  104         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
  105                 return -EAGAIN;
  106 
  107         bo = list_first_entry(&man->io_reserve_lru,
  108                               struct ttm_buffer_object,
  109                               io_reserve_lru);
  110         list_del_init(&bo->io_reserve_lru);
  111         ttm_bo_unmap_virtual_locked(bo);
  112 
  113         return 0;
  114 }
  115 
  116 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
  117                               struct ttm_mem_reg *mem)
  118 {
  119         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  120         int ret = 0;
  121 
  122         if (!bdev->driver->io_mem_reserve)
  123                 return 0;
  124         if (likely(man->io_reserve_fastpath))
  125                 return bdev->driver->io_mem_reserve(bdev, mem);
  126 
  127         if (bdev->driver->io_mem_reserve &&
  128             mem->bus.io_reserved_count++ == 0) {
  129 retry:
  130                 ret = bdev->driver->io_mem_reserve(bdev, mem);
  131                 if (ret == -EAGAIN) {
  132                         ret = ttm_mem_io_evict(man);
  133                         if (ret == 0)
  134                                 goto retry;
  135                 }
  136         }
  137         return ret;
  138 }
  139 
  140 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
  141                             struct ttm_mem_reg *mem)
  142 {
  143         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  144 
  145         if (likely(man->io_reserve_fastpath))
  146                 return;
  147 
  148         if (bdev->driver->io_mem_reserve &&
  149             --mem->bus.io_reserved_count == 0 &&
  150             bdev->driver->io_mem_free)
  151                 bdev->driver->io_mem_free(bdev, mem);
  152 
  153 }
  154 
  155 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
  156 {
  157         struct ttm_mem_reg *mem = &bo->mem;
  158         int ret;
  159 
  160         if (!mem->bus.io_reserved_vm) {
  161                 struct ttm_mem_type_manager *man =
  162                         &bo->bdev->man[mem->mem_type];
  163 
  164                 ret = ttm_mem_io_reserve(bo->bdev, mem);
  165                 if (unlikely(ret != 0))
  166                         return ret;
  167                 mem->bus.io_reserved_vm = true;
  168                 if (man->use_io_reserve_lru)
  169                         list_add_tail(&bo->io_reserve_lru,
  170                                       &man->io_reserve_lru);
  171         }
  172         return 0;
  173 }
  174 
  175 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
  176 {
  177         struct ttm_mem_reg *mem = &bo->mem;
  178 
  179         if (mem->bus.io_reserved_vm) {
  180                 mem->bus.io_reserved_vm = false;
  181                 list_del_init(&bo->io_reserve_lru);
  182                 ttm_mem_io_free(bo->bdev, mem);
  183         }
  184 }
  185 
  186 static
  187 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
  188                         void **virtual)
  189 {
  190         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  191         int ret;
  192         void *addr;
  193 
  194         *virtual = NULL;
  195         (void) ttm_mem_io_lock(man, false);
  196         ret = ttm_mem_io_reserve(bdev, mem);
  197         ttm_mem_io_unlock(man);
  198         if (ret || !mem->bus.is_iomem)
  199                 return ret;
  200 
  201         if (mem->bus.addr) {
  202                 addr = mem->bus.addr;
  203         } else {
  204                 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
  205                     mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
  206                     VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
  207                 if (!addr) {
  208                         (void) ttm_mem_io_lock(man, false);
  209                         ttm_mem_io_free(bdev, mem);
  210                         ttm_mem_io_unlock(man);
  211                         return -ENOMEM;
  212                 }
  213         }
  214         *virtual = addr;
  215         return 0;
  216 }
  217 
  218 static
  219 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
  220                          void *virtual)
  221 {
  222         struct ttm_mem_type_manager *man;
  223 
  224         man = &bdev->man[mem->mem_type];
  225 
  226         if (virtual && mem->bus.addr == NULL)
  227                 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
  228         (void) ttm_mem_io_lock(man, false);
  229         ttm_mem_io_free(bdev, mem);
  230         ttm_mem_io_unlock(man);
  231 }
  232 
  233 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
  234 {
  235         uint32_t *dstP =
  236             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
  237         uint32_t *srcP =
  238             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
  239 
  240         int i;
  241         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
  242                 /* iowrite32(ioread32(srcP++), dstP++); */
  243                 *dstP++ = *srcP++;
  244         return 0;
  245 }
  246 
  247 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
  248                                 unsigned long page,
  249                                 vm_memattr_t prot)
  250 {
  251         vm_page_t d = ttm->pages[page];
  252         void *dst;
  253 
  254         if (!d)
  255                 return -ENOMEM;
  256 
  257         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
  258 
  259         /* XXXKIB can't sleep ? */
  260         dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
  261         if (!dst)
  262                 return -ENOMEM;
  263 
  264         memcpy(dst, src, PAGE_SIZE);
  265 
  266         pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
  267 
  268         return 0;
  269 }
  270 
  271 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
  272                                 unsigned long page,
  273                                 vm_memattr_t prot)
  274 {
  275         vm_page_t s = ttm->pages[page];
  276         void *src;
  277 
  278         if (!s)
  279                 return -ENOMEM;
  280 
  281         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
  282         src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
  283         if (!src)
  284                 return -ENOMEM;
  285 
  286         memcpy(dst, src, PAGE_SIZE);
  287 
  288         pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
  289 
  290         return 0;
  291 }
  292 
  293 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
  294                        bool evict, bool no_wait_gpu,
  295                        struct ttm_mem_reg *new_mem)
  296 {
  297         struct ttm_bo_device *bdev = bo->bdev;
  298         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
  299         struct ttm_tt *ttm = bo->ttm;
  300         struct ttm_mem_reg *old_mem = &bo->mem;
  301         struct ttm_mem_reg old_copy = *old_mem;
  302         void *old_iomap;
  303         void *new_iomap;
  304         int ret;
  305         unsigned long i;
  306         unsigned long page;
  307         unsigned long add = 0;
  308         int dir;
  309 
  310         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
  311         if (ret)
  312                 return ret;
  313         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
  314         if (ret)
  315                 goto out;
  316 
  317         if (old_iomap == NULL && new_iomap == NULL)
  318                 goto out2;
  319         if (old_iomap == NULL && ttm == NULL)
  320                 goto out2;
  321 
  322         if (ttm->state == tt_unpopulated) {
  323                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
  324                 if (ret) {
  325                         /* if we fail here don't nuke the mm node
  326                          * as the bo still owns it */
  327                         old_copy.mm_node = NULL;
  328                         goto out1;
  329                 }
  330         }
  331 
  332         add = 0;
  333         dir = 1;
  334 
  335         if ((old_mem->mem_type == new_mem->mem_type) &&
  336             (new_mem->start < old_mem->start + old_mem->size)) {
  337                 dir = -1;
  338                 add = new_mem->num_pages - 1;
  339         }
  340 
  341         for (i = 0; i < new_mem->num_pages; ++i) {
  342                 page = i * dir + add;
  343                 if (old_iomap == NULL) {
  344                         vm_memattr_t prot = ttm_io_prot(old_mem->placement);
  345                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
  346                                                    prot);
  347                 } else if (new_iomap == NULL) {
  348                         vm_memattr_t prot = ttm_io_prot(new_mem->placement);
  349                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
  350                                                    prot);
  351                 } else
  352                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
  353                 if (ret) {
  354                         /* failing here, means keep old copy as-is */
  355                         old_copy.mm_node = NULL;
  356                         goto out1;
  357                 }
  358         }
  359         mb();
  360 out2:
  361         old_copy = *old_mem;
  362         *old_mem = *new_mem;
  363         new_mem->mm_node = NULL;
  364 
  365         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
  366                 ttm_tt_unbind(ttm);
  367                 ttm_tt_destroy(ttm);
  368                 bo->ttm = NULL;
  369         }
  370 
  371 out1:
  372         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
  373 out:
  374         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
  375         ttm_bo_mem_put(bo, &old_copy);
  376         return ret;
  377 }
  378 
  379 MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects");
  380 
  381 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
  382 {
  383         free(bo, M_TTM_TRANSF_OBJ);
  384 }
  385 
  386 /**
  387  * ttm_buffer_object_transfer
  388  *
  389  * @bo: A pointer to a struct ttm_buffer_object.
  390  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
  391  * holding the data of @bo with the old placement.
  392  *
  393  * This is a utility function that may be called after an accelerated move
  394  * has been scheduled. A new buffer object is created as a placeholder for
  395  * the old data while it's being copied. When that buffer object is idle,
  396  * it can be destroyed, releasing the space of the old placement.
  397  * Returns:
  398  * !0: Failure.
  399  */
  400 
  401 static int
  402 ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
  403     struct ttm_buffer_object **new_obj)
  404 {
  405         struct ttm_buffer_object *fbo;
  406         struct ttm_bo_device *bdev = bo->bdev;
  407         struct ttm_bo_driver *driver = bdev->driver;
  408 
  409         fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_WAITOK);
  410         *fbo = *bo;
  411 
  412         /**
  413          * Fix up members that we shouldn't copy directly:
  414          * TODO: Explicit member copy would probably be better here.
  415          */
  416 
  417         INIT_LIST_HEAD(&fbo->ddestroy);
  418         INIT_LIST_HEAD(&fbo->lru);
  419         INIT_LIST_HEAD(&fbo->swap);
  420         INIT_LIST_HEAD(&fbo->io_reserve_lru);
  421         fbo->vm_node = NULL;
  422         atomic_set(&fbo->cpu_writers, 0);
  423 
  424         mtx_lock(&bdev->fence_lock);
  425         if (bo->sync_obj)
  426                 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
  427         else
  428                 fbo->sync_obj = NULL;
  429         mtx_unlock(&bdev->fence_lock);
  430         refcount_init(&fbo->list_kref, 1);
  431         refcount_init(&fbo->kref, 1);
  432         fbo->destroy = &ttm_transfered_destroy;
  433         fbo->acc_size = 0;
  434 
  435         *new_obj = fbo;
  436         return 0;
  437 }
  438 
  439 vm_memattr_t
  440 ttm_io_prot(uint32_t caching_flags)
  441 {
  442 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) ||  \
  443  defined(__arm__)
  444         if (caching_flags & TTM_PL_FLAG_WC)
  445                 return (VM_MEMATTR_WRITE_COMBINING);
  446         else
  447                 /*
  448                  * We do not support i386, look at the linux source
  449                  * for the reason of the comment.
  450                  */
  451                 return (VM_MEMATTR_UNCACHEABLE);
  452 #else
  453 #error Port me
  454 #endif
  455 }
  456 
  457 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
  458                           unsigned long offset,
  459                           unsigned long size,
  460                           struct ttm_bo_kmap_obj *map)
  461 {
  462         struct ttm_mem_reg *mem = &bo->mem;
  463 
  464         if (bo->mem.bus.addr) {
  465                 map->bo_kmap_type = ttm_bo_map_premapped;
  466                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
  467         } else {
  468                 map->bo_kmap_type = ttm_bo_map_iomap;
  469                 map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
  470                     bo->mem.bus.offset + offset, size,
  471                     (mem->placement & TTM_PL_FLAG_WC) ?
  472                     VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
  473                 map->size = size;
  474         }
  475         return (!map->virtual) ? -ENOMEM : 0;
  476 }
  477 
  478 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
  479                            unsigned long start_page,
  480                            unsigned long num_pages,
  481                            struct ttm_bo_kmap_obj *map)
  482 {
  483         struct ttm_mem_reg *mem = &bo->mem;
  484         vm_memattr_t prot;
  485         struct ttm_tt *ttm = bo->ttm;
  486         int i, ret;
  487 
  488         MPASS(ttm != NULL);
  489 
  490         if (ttm->state == tt_unpopulated) {
  491                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
  492                 if (ret)
  493                         return ret;
  494         }
  495 
  496         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
  497                 /*
  498                  * We're mapping a single page, and the desired
  499                  * page protection is consistent with the bo.
  500                  */
  501 
  502                 map->bo_kmap_type = ttm_bo_map_kmap;
  503                 map->page = ttm->pages[start_page];
  504                 map->sf = sf_buf_alloc(map->page, 0);
  505                 map->virtual = (void *)sf_buf_kva(map->sf);
  506         } else {
  507                 /*
  508                  * We need to use vmap to get the desired page protection
  509                  * or to make the buffer object look contiguous.
  510                  */
  511                 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
  512                         VM_MEMATTR_DEFAULT : ttm_io_prot(mem->placement);
  513                 map->bo_kmap_type = ttm_bo_map_vmap;
  514                 map->num_pages = num_pages;
  515                 map->virtual = (void *)kva_alloc(num_pages * PAGE_SIZE);
  516                 if (map->virtual != NULL) {
  517                         for (i = 0; i < num_pages; i++) {
  518                                 /* XXXKIB hack */
  519                                 pmap_page_set_memattr(ttm->pages[start_page +
  520                                     i], prot);
  521                         }
  522                         pmap_qenter((vm_offset_t)map->virtual,
  523                             &ttm->pages[start_page], num_pages);
  524                 }
  525         }
  526         return (!map->virtual) ? -ENOMEM : 0;
  527 }
  528 
  529 int ttm_bo_kmap(struct ttm_buffer_object *bo,
  530                 unsigned long start_page, unsigned long num_pages,
  531                 struct ttm_bo_kmap_obj *map)
  532 {
  533         struct ttm_mem_type_manager *man =
  534                 &bo->bdev->man[bo->mem.mem_type];
  535         unsigned long offset, size;
  536         int ret;
  537 
  538         MPASS(list_empty(&bo->swap));
  539         map->virtual = NULL;
  540         map->bo = bo;
  541         if (num_pages > bo->num_pages)
  542                 return -EINVAL;
  543         if (start_page > bo->num_pages)
  544                 return -EINVAL;
  545 #if 0
  546         if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
  547                 return -EPERM;
  548 #endif
  549         (void) ttm_mem_io_lock(man, false);
  550         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
  551         ttm_mem_io_unlock(man);
  552         if (ret)
  553                 return ret;
  554         if (!bo->mem.bus.is_iomem) {
  555                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
  556         } else {
  557                 offset = start_page << PAGE_SHIFT;
  558                 size = num_pages << PAGE_SHIFT;
  559                 return ttm_bo_ioremap(bo, offset, size, map);
  560         }
  561 }
  562 
  563 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
  564 {
  565         struct ttm_buffer_object *bo = map->bo;
  566         struct ttm_mem_type_manager *man =
  567                 &bo->bdev->man[bo->mem.mem_type];
  568 
  569         if (!map->virtual)
  570                 return;
  571         switch (map->bo_kmap_type) {
  572         case ttm_bo_map_iomap:
  573                 pmap_unmapdev((vm_offset_t)map->virtual, map->size);
  574                 break;
  575         case ttm_bo_map_vmap:
  576                 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
  577                 kva_free((vm_offset_t)map->virtual,
  578                     map->num_pages * PAGE_SIZE);
  579                 break;
  580         case ttm_bo_map_kmap:
  581                 sf_buf_free(map->sf);
  582                 break;
  583         case ttm_bo_map_premapped:
  584                 break;
  585         default:
  586                 MPASS(0);
  587         }
  588         (void) ttm_mem_io_lock(man, false);
  589         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
  590         ttm_mem_io_unlock(man);
  591         map->virtual = NULL;
  592         map->page = NULL;
  593         map->sf = NULL;
  594 }
  595 
  596 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
  597                               void *sync_obj,
  598                               bool evict,
  599                               bool no_wait_gpu,
  600                               struct ttm_mem_reg *new_mem)
  601 {
  602         struct ttm_bo_device *bdev = bo->bdev;
  603         struct ttm_bo_driver *driver = bdev->driver;
  604         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
  605         struct ttm_mem_reg *old_mem = &bo->mem;
  606         int ret;
  607         struct ttm_buffer_object *ghost_obj;
  608         void *tmp_obj = NULL;
  609 
  610         mtx_lock(&bdev->fence_lock);
  611         if (bo->sync_obj) {
  612                 tmp_obj = bo->sync_obj;
  613                 bo->sync_obj = NULL;
  614         }
  615         bo->sync_obj = driver->sync_obj_ref(sync_obj);
  616         if (evict) {
  617                 ret = ttm_bo_wait(bo, false, false, false);
  618                 mtx_unlock(&bdev->fence_lock);
  619                 if (tmp_obj)
  620                         driver->sync_obj_unref(&tmp_obj);
  621                 if (ret)
  622                         return ret;
  623 
  624                 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  625                     (bo->ttm != NULL)) {
  626                         ttm_tt_unbind(bo->ttm);
  627                         ttm_tt_destroy(bo->ttm);
  628                         bo->ttm = NULL;
  629                 }
  630                 ttm_bo_free_old_node(bo);
  631         } else {
  632                 /**
  633                  * This should help pipeline ordinary buffer moves.
  634                  *
  635                  * Hang old buffer memory on a new buffer object,
  636                  * and leave it to be released when the GPU
  637                  * operation has completed.
  638                  */
  639 
  640                 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  641                 mtx_unlock(&bdev->fence_lock);
  642                 if (tmp_obj)
  643                         driver->sync_obj_unref(&tmp_obj);
  644 
  645                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
  646                 if (ret)
  647                         return ret;
  648 
  649                 /**
  650                  * If we're not moving to fixed memory, the TTM object
  651                  * needs to stay alive. Otherwhise hang it on the ghost
  652                  * bo to be unbound and destroyed.
  653                  */
  654 
  655                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
  656                         ghost_obj->ttm = NULL;
  657                 else
  658                         bo->ttm = NULL;
  659 
  660                 ttm_bo_unreserve(ghost_obj);
  661                 ttm_bo_unref(&ghost_obj);
  662         }
  663 
  664         *old_mem = *new_mem;
  665         new_mem->mm_node = NULL;
  666 
  667         return 0;
  668 }

Cache object: 28441884191ba34dd6def9eee17c61e3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.