The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/external/bsd/drm/dist/libdrm/nouveau/nouveau_bo.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright 2007 Nouveau Project
    3  *
    4  * Permission is hereby granted, free of charge, to any person obtaining a
    5  * copy of this software and associated documentation files (the "Software"),
    6  * to deal in the Software without restriction, including without limitation
    7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
    8  * and/or sell copies of the Software, and to permit persons to whom the
    9  * Software is furnished to do so, subject to the following conditions:
   10  *
   11  * The above copyright notice and this permission notice shall be included in
   12  * all copies or substantial portions of the Software.
   13  *
   14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   17  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
   18  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
   19  * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   20  * SOFTWARE.
   21  */
   22 
   23 #include <stdint.h>
   24 #include <stdlib.h>
   25 #include <errno.h>
   26 #include <assert.h>
   27 
   28 #include <sys/mman.h>
   29 #include <sys/ioctl.h>
   30 
   31 #include "nouveau_private.h"
   32 
   33 int
   34 nouveau_bo_init(struct nouveau_device *dev)
   35 {
   36         return 0;
   37 }
   38 
   39 void
   40 nouveau_bo_takedown(struct nouveau_device *dev)
   41 {
   42 }
   43 
   44 static int
   45 nouveau_bo_allocated(struct nouveau_bo_priv *nvbo)
   46 {
   47         if (nvbo->sysmem || nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
   48                 return 1;
   49         return 0;
   50 }
   51 
   52 static int
   53 nouveau_bo_ualloc(struct nouveau_bo_priv *nvbo)
   54 {
   55         if (nvbo->user || nvbo->sysmem) {
   56                 assert(nvbo->sysmem);
   57                 return 0;
   58         }
   59 
   60         nvbo->sysmem = malloc(nvbo->size);
   61         if (!nvbo->sysmem)
   62                 return -ENOMEM;
   63 
   64         return 0;
   65 }
   66 
   67 static void
   68 nouveau_bo_ufree(struct nouveau_bo_priv *nvbo)
   69 {
   70         if (nvbo->sysmem) {
   71                 if (!nvbo->user)
   72                         free(nvbo->sysmem);
   73                 nvbo->sysmem = NULL;
   74         }
   75 }
   76 
   77 static void
   78 nouveau_bo_kfree_nomm(struct nouveau_bo_priv *nvbo)
   79 {
   80         struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
   81         struct drm_nouveau_mem_free req;
   82 
   83         if (nvbo->map) {
   84                 drmUnmap(nvbo->map, nvbo->size);
   85                 nvbo->map = NULL;
   86         }
   87 
   88         req.offset = nvbo->offset;
   89         if (nvbo->domain & NOUVEAU_BO_GART)
   90                 req.flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI;
   91         else
   92         if (nvbo->domain & NOUVEAU_BO_VRAM)
   93                 req.flags = NOUVEAU_MEM_FB;
   94         drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_FREE, &req, sizeof(req));
   95 
   96         nvbo->handle = 0;
   97 }
   98 
   99 static void
  100 nouveau_bo_kfree(struct nouveau_bo_priv *nvbo)
  101 {
  102         struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
  103         struct drm_gem_close req;
  104 
  105         if (!nvbo->handle)
  106                 return;
  107 
  108         if (!nvdev->mm_enabled) {
  109                 nouveau_bo_kfree_nomm(nvbo);
  110                 return;
  111         }
  112 
  113         if (nvbo->map) {
  114                 munmap(nvbo->map, nvbo->size);
  115                 nvbo->map = NULL;
  116         }
  117 
  118         req.handle = nvbo->handle;
  119         nvbo->handle = 0;
  120         ioctl(nvdev->fd, DRM_IOCTL_GEM_CLOSE, &req);
  121 }
  122 
  123 static int
  124 nouveau_bo_kalloc_nomm(struct nouveau_bo_priv *nvbo)
  125 {
  126         struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
  127         struct drm_nouveau_mem_alloc req;
  128         int ret;
  129 
  130         if (nvbo->handle)
  131                 return 0;
  132 
  133         if (!(nvbo->flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART)))
  134                 nvbo->flags |= (NOUVEAU_BO_GART | NOUVEAU_BO_VRAM);
  135 
  136         req.size = nvbo->size;
  137         req.alignment = nvbo->align;
  138         req.flags = 0;
  139         if (nvbo->flags & NOUVEAU_BO_VRAM)
  140                 req.flags |= NOUVEAU_MEM_FB;
  141         if (nvbo->flags & NOUVEAU_BO_GART)
  142                 req.flags |= (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI);
  143         if (nvbo->flags & NOUVEAU_BO_TILED) {
  144                 req.flags |= NOUVEAU_MEM_TILE;
  145                 if (nvbo->flags & NOUVEAU_BO_ZTILE)
  146                         req.flags |= NOUVEAU_MEM_TILE_ZETA;
  147         }
  148         req.flags |= NOUVEAU_MEM_MAPPED;
  149 
  150         ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_MEM_ALLOC,
  151                                   &req, sizeof(req));
  152         if (ret)
  153                 return ret;
  154 
  155         nvbo->handle = req.map_handle;
  156         nvbo->size = req.size;
  157         nvbo->offset = req.offset;
  158         if (req.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI))
  159                 nvbo->domain = NOUVEAU_BO_GART;
  160         else
  161         if (req.flags & NOUVEAU_MEM_FB)
  162                 nvbo->domain = NOUVEAU_BO_VRAM;
  163 
  164         return 0;
  165 }
  166 
  167 static int
  168 nouveau_bo_kalloc(struct nouveau_bo_priv *nvbo, struct nouveau_channel *chan)
  169 {
  170         struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
  171         struct drm_nouveau_gem_new req;
  172         int ret;
  173 
  174         if (nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
  175                 return 0;
  176 
  177         if (!nvdev->mm_enabled)
  178                 return nouveau_bo_kalloc_nomm(nvbo);
  179 
  180         req.channel_hint = chan ? chan->id : 0;
  181 
  182         req.size = nvbo->size;
  183         req.align = nvbo->align;
  184 
  185         req.domain = 0;
  186 
  187         if (nvbo->flags & NOUVEAU_BO_VRAM)
  188                 req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
  189 
  190         if (nvbo->flags & NOUVEAU_BO_GART)
  191                 req.domain |= NOUVEAU_GEM_DOMAIN_GART;
  192 
  193         if (nvbo->flags & NOUVEAU_BO_TILED) {
  194                 req.domain |= NOUVEAU_GEM_DOMAIN_TILE;
  195                 if (nvbo->flags & NOUVEAU_BO_ZTILE)
  196                         req.domain |= NOUVEAU_GEM_DOMAIN_TILE_ZETA;
  197         }
  198 
  199         if (!req.domain) {
  200                 req.domain |= (NOUVEAU_GEM_DOMAIN_VRAM |
  201                                NOUVEAU_GEM_DOMAIN_GART);
  202         }
  203 
  204         ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_NEW,
  205                                   &req, sizeof(req));
  206         if (ret)
  207                 return ret;
  208         nvbo->handle = nvbo->base.handle = req.handle;
  209         nvbo->size = req.size;
  210         nvbo->domain = req.domain;
  211         nvbo->offset = req.offset;
  212 
  213         return 0;
  214 }
  215 
  216 static int
  217 nouveau_bo_kmap_nomm(struct nouveau_bo_priv *nvbo)
  218 {
  219         struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
  220         int ret;
  221 
  222         ret = drmMap(nvdev->fd, nvbo->handle, nvbo->size, &nvbo->map);
  223         if (ret) {
  224                 nvbo->map = NULL;
  225                 return ret;
  226         }
  227 
  228         return 0;
  229 }
  230 
  231 static int
  232 nouveau_bo_kmap(struct nouveau_bo_priv *nvbo)
  233 {
  234         struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
  235         struct drm_nouveau_gem_mmap req;
  236         int ret;
  237 
  238         if (nvbo->map)
  239                 return 0;
  240 
  241         if (!nvbo->handle)
  242                 return -EINVAL;
  243 
  244         if (!nvdev->mm_enabled)
  245                 return nouveau_bo_kmap_nomm(nvbo);
  246 
  247         req.handle = nvbo->handle;
  248         ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_MMAP,
  249                                   &req, sizeof(req));
  250         if (ret)
  251                 return ret;
  252 
  253         nvbo->map = (void *)(unsigned long)req.vaddr;
  254         return 0;
  255 }
  256 
  257 int
  258 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
  259                int size, struct nouveau_bo **bo)
  260 {
  261         struct nouveau_bo_priv *nvbo;
  262         int ret;
  263 
  264         if (!dev || !bo || *bo)
  265                 return -EINVAL;
  266 
  267         nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
  268         if (!nvbo)
  269                 return -ENOMEM;
  270         nvbo->base.device = dev;
  271         nvbo->base.size = size;
  272 
  273         nvbo->refcount = 1;
  274         /* Don't set NOUVEAU_BO_PIN here, or nouveau_bo_allocated() will
  275          * decided the buffer's already allocated when it's not.  The
  276          * call to nouveau_bo_pin() later will set this flag.
  277          */
  278         nvbo->flags = (flags & ~NOUVEAU_BO_PIN);
  279         nvbo->size = size;
  280         nvbo->align = align;
  281 
  282         /*XXX: murder me violently */
  283         if (flags & NOUVEAU_BO_TILED) {
  284                 nvbo->base.tiled = 1;
  285                 if (flags & NOUVEAU_BO_ZTILE)
  286                         nvbo->base.tiled |= 2;
  287         }
  288 
  289         if (flags & NOUVEAU_BO_PIN) {
  290                 ret = nouveau_bo_pin((void *)nvbo, nvbo->flags);
  291                 if (ret) {
  292                         nouveau_bo_ref(NULL, (void *)nvbo);
  293                         return ret;
  294                 }
  295         }
  296 
  297         *bo = &nvbo->base;
  298         return 0;
  299 }
  300 
  301 int
  302 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
  303                 struct nouveau_bo **bo)
  304 {
  305         struct nouveau_bo_priv *nvbo;
  306         int ret;
  307 
  308         ret = nouveau_bo_new(dev, 0, 0, size, bo);
  309         if (ret)
  310                 return ret;
  311         nvbo = nouveau_bo(*bo);
  312 
  313         nvbo->sysmem = ptr;
  314         nvbo->user = 1;
  315         return 0;
  316 }
  317 
  318 int
  319 nouveau_bo_fake(struct nouveau_device *dev, uint64_t offset, uint32_t flags,
  320                 uint32_t size, void *map, struct nouveau_bo **bo)
  321 {
  322         struct nouveau_bo_priv *nvbo;
  323         int ret;
  324 
  325         ret = nouveau_bo_new(dev, flags & ~NOUVEAU_BO_PIN, 0, size, bo);
  326         if (ret)
  327                 return ret;
  328         nvbo = nouveau_bo(*bo);
  329 
  330         nvbo->flags = flags | NOUVEAU_BO_PIN;
  331         nvbo->domain = (flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART));
  332         nvbo->offset = offset;
  333         nvbo->size = nvbo->base.size = size;
  334         nvbo->map = map;
  335         nvbo->base.flags = nvbo->flags;
  336         nvbo->base.offset = nvbo->offset;
  337         return 0;
  338 }
  339 
  340 int
  341 nouveau_bo_handle_get(struct nouveau_bo *bo, uint32_t *handle)
  342 {
  343         struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
  344         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  345         int ret;
  346  
  347         if (!bo || !handle)
  348                 return -EINVAL;
  349 
  350         if (!nvbo->global_handle) {
  351                 struct drm_gem_flink req;
  352  
  353                 ret = nouveau_bo_kalloc(nvbo, NULL);
  354                 if (ret)
  355                         return ret;
  356 
  357                 if (nvdev->mm_enabled) {
  358                         req.handle = nvbo->handle;
  359                         ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_FLINK, &req);
  360                         if (ret) {
  361                                 nouveau_bo_kfree(nvbo);
  362                                 return ret;
  363                         }
  364          
  365                         nvbo->global_handle = req.name;
  366                 } else {
  367                         nvbo->global_handle = nvbo->offset;
  368                 }
  369         }
  370  
  371         *handle = nvbo->global_handle;
  372         return 0;
  373 }
  374  
  375 int
  376 nouveau_bo_handle_ref(struct nouveau_device *dev, uint32_t handle,
  377                       struct nouveau_bo **bo)
  378 {
  379         struct nouveau_device_priv *nvdev = nouveau_device(dev);
  380         struct nouveau_bo_priv *nvbo;
  381         struct drm_gem_open req;
  382         int ret;
  383 
  384         ret = nouveau_bo_new(dev, 0, 0, 0, bo);
  385         if (ret)
  386                 return ret;
  387         nvbo = nouveau_bo(*bo);
  388 
  389         if (!nvdev->mm_enabled) {
  390                 nvbo->handle = 0;
  391                 nvbo->offset =  handle;
  392                 nvbo->domain = NOUVEAU_BO_VRAM;
  393                 nvbo->flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_PIN;
  394                 nvbo->base.offset = nvbo->offset;
  395                 nvbo->base.flags = nvbo->flags;
  396         } else {
  397                 req.name = handle;
  398                 ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_OPEN, &req);
  399                 if (ret) {
  400                         nouveau_bo_ref(NULL, bo);
  401                         return ret;
  402                 }
  403 
  404                 nvbo->size = req.size;
  405                 nvbo->handle = req.handle;
  406         }
  407 
  408         nvbo->base.handle = nvbo->handle;
  409         return 0;
  410 } 
  411 
  412 static void
  413 nouveau_bo_del_cb(void *priv)
  414 {
  415         struct nouveau_bo_priv *nvbo = priv;
  416 
  417         nouveau_fence_ref(NULL, &nvbo->fence);
  418         nouveau_fence_ref(NULL, &nvbo->wr_fence);
  419         nouveau_bo_kfree(nvbo);
  420         free(nvbo);
  421 }
  422 
  423 static void
  424 nouveau_bo_del(struct nouveau_bo **bo)
  425 {
  426         struct nouveau_bo_priv *nvbo;
  427 
  428         if (!bo || !*bo)
  429                 return;
  430         nvbo = nouveau_bo(*bo);
  431         *bo = NULL;
  432 
  433         if (--nvbo->refcount)
  434                 return;
  435 
  436         if (nvbo->pending) {
  437                 nvbo->pending = NULL;
  438                 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
  439         }
  440 
  441         nouveau_bo_ufree(nvbo);
  442 
  443         if (!nouveau_device(nvbo->base.device)->mm_enabled && nvbo->fence) {
  444                 nouveau_fence_flush(nvbo->fence->channel);
  445                 if (nouveau_fence(nvbo->fence)->signalled) {
  446                         nouveau_bo_del_cb(nvbo);
  447                 } else {
  448                         nouveau_fence_signal_cb(nvbo->fence,
  449                                                 nouveau_bo_del_cb, nvbo);
  450                 }
  451         } else {
  452                 nouveau_bo_del_cb(nvbo);
  453         }
  454 }
  455 
  456 int
  457 nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pbo)
  458 {
  459         if (!pbo)
  460                 return -EINVAL;
  461 
  462         if (ref)
  463                 nouveau_bo(ref)->refcount++;
  464 
  465         if (*pbo)
  466                 nouveau_bo_del(pbo);
  467 
  468         *pbo = ref;
  469         return 0;
  470 }
  471 
  472 static int
  473 nouveau_bo_wait_nomm(struct nouveau_bo *bo, int cpu_write)
  474 {
  475         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  476         int ret = 0;
  477 
  478         if (cpu_write)
  479                 ret = nouveau_fence_wait(&nvbo->fence);
  480         else
  481                 ret = nouveau_fence_wait(&nvbo->wr_fence);
  482         if (ret)
  483                 return ret;
  484 
  485         nvbo->write_marker = 0;
  486         return 0;
  487 }
  488 
  489 static int
  490 nouveau_bo_wait(struct nouveau_bo *bo, int cpu_write)
  491 {
  492         struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
  493         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  494         struct drm_nouveau_gem_cpu_prep req;
  495         int ret;
  496 
  497         if (!nvbo->global_handle && !nvbo->write_marker && !cpu_write)
  498                 return 0;
  499 
  500         if (nvbo->pending &&
  501             (nvbo->pending->write_domains || cpu_write)) {
  502                 nvbo->pending = NULL;
  503                 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
  504         }
  505 
  506         if (!nvdev->mm_enabled)
  507                 return nouveau_bo_wait_nomm(bo, cpu_write);
  508 
  509         req.handle = nvbo->handle;
  510         ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_PREP,
  511                               &req, sizeof(req));
  512         if (ret)
  513                 return ret;
  514 
  515         nvbo->write_marker = 0;
  516         return 0;
  517 }
  518 
  519 int
  520 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
  521 {
  522         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  523         int ret;
  524 
  525         if (!nvbo || bo->map)
  526                 return -EINVAL;
  527 
  528         if (!nouveau_bo_allocated(nvbo)) {
  529                 if (nvbo->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)) {
  530                         ret = nouveau_bo_kalloc(nvbo, NULL);
  531                         if (ret)
  532                                 return ret;
  533                 }
  534 
  535                 if (!nouveau_bo_allocated(nvbo)) {
  536                         ret = nouveau_bo_ualloc(nvbo);
  537                         if (ret)
  538                                 return ret;
  539                 }
  540         }
  541 
  542         if (nvbo->sysmem) {
  543                 bo->map = nvbo->sysmem;
  544         } else {
  545                 ret = nouveau_bo_kmap(nvbo);
  546                 if (ret)
  547                         return ret;
  548 
  549                 ret = nouveau_bo_wait(bo, (flags & NOUVEAU_BO_WR));
  550                 if (ret)
  551                         return ret;
  552 
  553                 bo->map = nvbo->map;
  554         }
  555 
  556         return 0;
  557 }
  558 
  559 void
  560 nouveau_bo_unmap(struct nouveau_bo *bo)
  561 {
  562         struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
  563         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  564 
  565         if (nvdev->mm_enabled && bo->map && !nvbo->sysmem) {
  566                 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
  567                 struct drm_nouveau_gem_cpu_fini req;
  568 
  569                 req.handle = nvbo->handle;
  570                 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_FINI,
  571                                 &req, sizeof(req));
  572         }
  573 
  574         bo->map = NULL;
  575 }
  576 
  577 int
  578 nouveau_bo_validate_nomm(struct nouveau_bo_priv *nvbo, uint32_t flags)
  579 {
  580         struct nouveau_bo *new = NULL;
  581         uint32_t t_handle, t_domain, t_offset, t_size;
  582         void *t_map;
  583         int ret;
  584 
  585         if ((flags & NOUVEAU_BO_VRAM) && nvbo->domain == NOUVEAU_BO_VRAM)
  586                 return 0;
  587         if ((flags & NOUVEAU_BO_GART) && nvbo->domain == NOUVEAU_BO_GART)
  588                 return 0;
  589         assert(flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART));
  590 
  591         /* Keep tiling info */
  592         flags |= (nvbo->flags & (NOUVEAU_BO_TILED|NOUVEAU_BO_ZTILE));
  593 
  594         ret = nouveau_bo_new(nvbo->base.device, flags, 0, nvbo->size, &new);
  595         if (ret)
  596                 return ret;
  597 
  598         ret = nouveau_bo_kalloc(nouveau_bo(new), NULL);
  599         if (ret) {
  600                 nouveau_bo_ref(NULL, &new);
  601                 return ret;
  602         }
  603 
  604         if (nvbo->handle || nvbo->sysmem) {
  605         nouveau_bo_kmap(nouveau_bo(new));
  606 
  607         if (!nvbo->base.map) {
  608                 nouveau_bo_map(&nvbo->base, NOUVEAU_BO_RD);
  609                 memcpy(nouveau_bo(new)->map, nvbo->base.map, nvbo->base.size);
  610                 nouveau_bo_unmap(&nvbo->base);
  611         } else {
  612                 memcpy(nouveau_bo(new)->map, nvbo->base.map, nvbo->base.size);
  613         }
  614         }
  615 
  616         t_handle = nvbo->handle;
  617         t_domain = nvbo->domain;
  618         t_offset = nvbo->offset;
  619         t_size = nvbo->size;
  620         t_map = nvbo->map;
  621 
  622         nvbo->handle = nouveau_bo(new)->handle;
  623         nvbo->domain = nouveau_bo(new)->domain;
  624         nvbo->offset = nouveau_bo(new)->offset;
  625         nvbo->size = nouveau_bo(new)->size;
  626         nvbo->map = nouveau_bo(new)->map;
  627 
  628         nouveau_bo(new)->handle = t_handle;
  629         nouveau_bo(new)->domain = t_domain;
  630         nouveau_bo(new)->offset = t_offset;
  631         nouveau_bo(new)->size = t_size;
  632         nouveau_bo(new)->map = t_map;
  633 
  634         nouveau_bo_ref(NULL, &new);
  635 
  636         return 0;
  637 }
  638 
  639 static int
  640 nouveau_bo_pin_nomm(struct nouveau_bo *bo, uint32_t flags)
  641 {
  642         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  643         int ret;
  644 
  645         if (!nvbo->handle) {
  646                 if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
  647                         return -EINVAL;
  648 
  649                 ret = nouveau_bo_validate_nomm(nvbo, flags & ~NOUVEAU_BO_PIN);
  650                 if (ret)
  651                         return ret;
  652         }
  653 
  654         nvbo->pinned = 1;
  655 
  656         /* Fill in public nouveau_bo members */
  657         bo->flags = nvbo->domain;
  658         bo->offset = nvbo->offset;
  659 
  660         return 0;
  661 }
  662 
  663 int
  664 nouveau_bo_pin(struct nouveau_bo *bo, uint32_t flags)
  665 {
  666         struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
  667         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  668         struct drm_nouveau_gem_pin req;
  669         int ret;
  670 
  671         if (nvbo->pinned)
  672                 return 0;
  673 
  674         if (!nvdev->mm_enabled)
  675                 return nouveau_bo_pin_nomm(bo, flags);
  676 
  677         /* Ensure we have a kernel object... */
  678         if (!nvbo->handle) {
  679                 if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
  680                         return -EINVAL;
  681                 nvbo->flags = flags;
  682 
  683                 ret = nouveau_bo_kalloc(nvbo, NULL);
  684                 if (ret)
  685                         return ret;
  686         }
  687 
  688         /* Now force it to stay put :) */
  689         req.handle = nvbo->handle;
  690         req.domain = 0;
  691         if (nvbo->flags & NOUVEAU_BO_VRAM)
  692                 req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
  693         if (nvbo->flags & NOUVEAU_BO_GART)
  694                 req.domain |= NOUVEAU_GEM_DOMAIN_GART;
  695 
  696         ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_PIN, &req,
  697                                   sizeof(struct drm_nouveau_gem_pin));
  698         if (ret)
  699                 return ret;
  700         nvbo->offset = req.offset;
  701         nvbo->domain = req.domain;
  702         nvbo->pinned = 1;
  703         nvbo->flags |= NOUVEAU_BO_PIN;
  704 
  705         /* Fill in public nouveau_bo members */
  706         if (nvbo->domain & NOUVEAU_GEM_DOMAIN_VRAM)
  707                 bo->flags = NOUVEAU_BO_VRAM;
  708         if (nvbo->domain & NOUVEAU_GEM_DOMAIN_GART)
  709                 bo->flags = NOUVEAU_BO_GART;
  710         bo->offset = nvbo->offset;
  711 
  712         return 0;
  713 }
  714 
  715 void
  716 nouveau_bo_unpin(struct nouveau_bo *bo)
  717 {
  718         struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
  719         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  720         struct drm_nouveau_gem_unpin req;
  721 
  722         if (!nvbo->pinned)
  723                 return;
  724 
  725         if (nvdev->mm_enabled) {
  726                 req.handle = nvbo->handle;
  727                 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_UNPIN,
  728                                 &req, sizeof(req));
  729         }
  730 
  731         nvbo->pinned = bo->offset = bo->flags = 0;
  732 }
  733 
  734 int
  735 nouveau_bo_tile(struct nouveau_bo *bo, uint32_t flags, uint32_t delta,
  736                 uint32_t size)
  737 {
  738         struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
  739         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  740         uint32_t kern_flags = 0;
  741         int ret = 0;
  742 
  743         if (flags & NOUVEAU_BO_TILED) {
  744                 kern_flags |= NOUVEAU_MEM_TILE;
  745                 if (flags & NOUVEAU_BO_ZTILE)
  746                         kern_flags |= NOUVEAU_MEM_TILE_ZETA;
  747         }
  748 
  749         if (nvdev->mm_enabled) {
  750                 struct drm_nouveau_gem_tile req;
  751 
  752                 req.handle = nvbo->handle;
  753                 req.delta = delta;
  754                 req.size = size;
  755                 req.flags = kern_flags;
  756                 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_TILE,
  757                                       &req, sizeof(req));
  758         } else {
  759                 struct drm_nouveau_mem_tile req;
  760 
  761                 req.offset = nvbo->offset;
  762                 req.delta = delta;
  763                 req.size = size;
  764                 req.flags = kern_flags;
  765 
  766                 if (flags & NOUVEAU_BO_VRAM)
  767                         req.flags |= NOUVEAU_MEM_FB;
  768                 if (flags & NOUVEAU_BO_GART)
  769                         req.flags |= NOUVEAU_MEM_AGP;
  770 
  771                 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_TILE,
  772                                       &req, sizeof(req));
  773         }
  774 
  775         return 0;
  776 }
  777 
  778 int
  779 nouveau_bo_busy(struct nouveau_bo *bo, uint32_t access)
  780 {
  781         struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
  782         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  783 
  784         if (!nvdev->mm_enabled) {
  785                 struct nouveau_fence *fence;
  786 
  787                 if (nvbo->pending && (nvbo->pending->write_domains ||
  788                                       (access & NOUVEAU_BO_WR)))
  789                         return 1;
  790 
  791                 if (access & NOUVEAU_BO_WR)
  792                         fence = nvbo->fence;
  793                 else
  794                         fence = nvbo->wr_fence;
  795                 return !nouveau_fence(fence)->signalled;
  796         }
  797 
  798         return 1;
  799 }
  800 
  801 struct drm_nouveau_gem_pushbuf_bo *
  802 nouveau_bo_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo)
  803 {
  804         struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
  805         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
  806         struct drm_nouveau_gem_pushbuf_bo *pbbo;
  807         struct nouveau_bo *ref = NULL;
  808         int ret;
  809 
  810         if (nvbo->pending)
  811                 return nvbo->pending;
  812 
  813         if (!nvbo->handle) {
  814                 ret = nouveau_bo_kalloc(nvbo, chan);
  815                 if (ret)
  816                         return NULL;
  817 
  818                 if (nvbo->sysmem) {
  819                         void *sysmem_tmp = nvbo->sysmem;
  820 
  821                         nvbo->sysmem = NULL;
  822                         ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
  823                         if (ret)
  824                                 return NULL;
  825                         nvbo->sysmem = sysmem_tmp;
  826 
  827                         memcpy(bo->map, nvbo->sysmem, nvbo->base.size);
  828                         nouveau_bo_unmap(bo);
  829                         nouveau_bo_ufree(nvbo);
  830                 }
  831         }
  832 
  833         if (nvpb->nr_buffers >= NOUVEAU_PUSHBUF_MAX_BUFFERS)
  834                 return NULL;
  835         pbbo = nvpb->buffers + nvpb->nr_buffers++;
  836         nvbo->pending = pbbo;
  837         nvbo->pending_channel = chan;
  838 
  839         nouveau_bo_ref(bo, &ref);
  840         pbbo->user_priv = (uint64_t)(unsigned long)ref;
  841         pbbo->handle = nvbo->handle;
  842         pbbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART;
  843         pbbo->read_domains = 0;
  844         pbbo->write_domains = 0;
  845         pbbo->presumed_domain = nvbo->domain;
  846         pbbo->presumed_offset = nvbo->offset;
  847         pbbo->presumed_ok = 1;
  848         return pbbo;
  849 }

Cache object: 8745a9c95e584040f035a65a9b955692


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.