The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/drm2/drm_bufs.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**
    2  * \file drm_bufs.c
    3  * Generic buffer template
    4  *
    5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
    6  * \author Gareth Hughes <gareth@valinux.com>
    7  */
    8 
    9 /*
   10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
   11  *
   12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
   13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
   14  * All Rights Reserved.
   15  *
   16  * Permission is hereby granted, free of charge, to any person obtaining a
   17  * copy of this software and associated documentation files (the "Software"),
   18  * to deal in the Software without restriction, including without limitation
   19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   20  * and/or sell copies of the Software, and to permit persons to whom the
   21  * Software is furnished to do so, subject to the following conditions:
   22  *
   23  * The above copyright notice and this permission notice (including the next
   24  * paragraph) shall be included in all copies or substantial portions of the
   25  * Software.
   26  *
   27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
   31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
   32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
   33  * OTHER DEALINGS IN THE SOFTWARE.
   34  */
   35 
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD$");
   38 
   39 #include <sys/param.h>
   40 #include <sys/shm.h>
   41 
   42 #include <dev/pci/pcireg.h>
   43 
   44 #include <dev/drm2/drmP.h>
   45 
   46 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
   47  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
   48  * address for accessing them.  Cleaned up at unload.
   49  */
   50 static int drm_alloc_resource(struct drm_device *dev, int resource)
   51 {
   52         struct resource *res;
   53         int rid;
   54 
   55         if (resource >= DRM_MAX_PCI_RESOURCE) {
   56                 DRM_ERROR("Resource %d too large\n", resource);
   57                 return 1;
   58         }
   59 
   60         if (dev->pcir[resource] != NULL) {
   61                 return 0;
   62         }
   63 
   64         rid = PCIR_BAR(resource);
   65         res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
   66             RF_SHAREABLE);
   67         if (res == NULL) {
   68                 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
   69                 return 1;
   70         }
   71 
   72         if (dev->pcir[resource] == NULL) {
   73                 dev->pcirid[resource] = rid;
   74                 dev->pcir[resource] = res;
   75         }
   76 
   77         return 0;
   78 }
   79 
   80 unsigned long drm_get_resource_start(struct drm_device *dev,
   81                                      unsigned int resource)
   82 {
   83         unsigned long start;
   84 
   85         mtx_lock(&dev->pcir_lock);
   86 
   87         if (drm_alloc_resource(dev, resource) != 0)
   88                 return 0;
   89 
   90         start = rman_get_start(dev->pcir[resource]);
   91 
   92         mtx_unlock(&dev->pcir_lock);
   93 
   94         return (start);
   95 }
   96 
   97 unsigned long drm_get_resource_len(struct drm_device *dev,
   98                                    unsigned int resource)
   99 {
  100         unsigned long len;
  101 
  102         mtx_lock(&dev->pcir_lock);
  103 
  104         if (drm_alloc_resource(dev, resource) != 0)
  105                 return 0;
  106 
  107         len = rman_get_size(dev->pcir[resource]);
  108 
  109         mtx_unlock(&dev->pcir_lock);
  110 
  111         return (len);
  112 }
  113 
  114 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
  115                                                   struct drm_local_map *map)
  116 {
  117         struct drm_map_list *entry;
  118         list_for_each_entry(entry, &dev->maplist, head) {
  119                 /*
  120                  * Because the kernel-userspace ABI is fixed at a 32-bit offset
  121                  * while PCI resources may live above that, we only compare the
  122                  * lower 32 bits of the map offset for maps of type
  123                  * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
  124                  * It is assumed that if a driver have more than one resource
  125                  * of each type, the lower 32 bits are different.
  126                  */
  127                 if (!entry->map ||
  128                     map->type != entry->map->type ||
  129                     entry->master != dev->primary->master)
  130                         continue;
  131                 switch (map->type) {
  132                 case _DRM_SHM:
  133                         if (map->flags != _DRM_CONTAINS_LOCK)
  134                                 break;
  135                         return entry;
  136                 case _DRM_REGISTERS:
  137                 case _DRM_FRAME_BUFFER:
  138                         if ((entry->map->offset & 0xffffffff) ==
  139                             (map->offset & 0xffffffff))
  140                                 return entry;
  141                 default: /* Make gcc happy */
  142                         ;
  143                 }
  144                 if (entry->map->offset == map->offset)
  145                         return entry;
  146         }
  147 
  148         return NULL;
  149 }
  150 
  151 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
  152                           unsigned long user_token, int hashed_handle, int shm)
  153 {
  154         int use_hashed_handle, shift;
  155         unsigned long add;
  156 
  157 #if (BITS_PER_LONG == 64)
  158         use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
  159 #elif (BITS_PER_LONG == 32)
  160         use_hashed_handle = hashed_handle;
  161 #else
  162 #error Unsupported long size. Neither 64 nor 32 bits.
  163 #endif
  164 
  165         if (!use_hashed_handle) {
  166                 int ret;
  167                 hash->key = user_token >> PAGE_SHIFT;
  168                 ret = drm_ht_insert_item(&dev->map_hash, hash);
  169                 if (ret != -EINVAL)
  170                         return ret;
  171         }
  172 
  173         shift = 0;
  174         add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
  175         if (shm && (SHMLBA > PAGE_SIZE)) {
  176                 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
  177 
  178                 /* For shared memory, we have to preserve the SHMLBA
  179                  * bits of the eventual vma->vm_pgoff value during
  180                  * mmap().  Otherwise we run into cache aliasing problems
  181                  * on some platforms.  On these platforms, the pgoff of
  182                  * a mmap() request is used to pick a suitable virtual
  183                  * address for the mmap() region such that it will not
  184                  * cause cache aliasing problems.
  185                  *
  186                  * Therefore, make sure the SHMLBA relevant bits of the
  187                  * hash value we use are equal to those in the original
  188                  * kernel virtual address.
  189                  */
  190                 shift = bits;
  191                 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
  192         }
  193 
  194         return drm_ht_just_insert_please(&dev->map_hash, hash,
  195                                          user_token, 32 - PAGE_SHIFT - 3,
  196                                          shift, add);
  197 }
  198 
  199 /**
  200  * Core function to create a range of memory available for mapping by a
  201  * non-root process.
  202  *
  203  * Adjusts the memory offset to its absolute value according to the mapping
  204  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
  205  * applicable and if supported by the kernel.
  206  */
  207 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
  208                            unsigned int size, enum drm_map_type type,
  209                            enum drm_map_flags flags,
  210                            struct drm_map_list ** maplist)
  211 {
  212         struct drm_local_map *map;
  213         struct drm_map_list *list;
  214         drm_dma_handle_t *dmah;
  215         unsigned long user_token;
  216         int ret;
  217         int align;
  218 
  219         map = malloc(sizeof(*map), DRM_MEM_MAPS, M_NOWAIT);
  220         if (!map)
  221                 return -ENOMEM;
  222 
  223         map->offset = offset;
  224         map->size = size;
  225         map->flags = flags;
  226         map->type = type;
  227 
  228         /* Only allow shared memory to be removable since we only keep enough
  229          * book keeping information about shared memory to allow for removal
  230          * when processes fork.
  231          */
  232         if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
  233                 free(map, DRM_MEM_MAPS);
  234                 return -EINVAL;
  235         }
  236         DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
  237                   (unsigned long long)map->offset, map->size, map->type);
  238 
  239         /* page-align _DRM_SHM maps. They are allocated here so there is no security
  240          * hole created by that and it works around various broken drivers that use
  241          * a non-aligned quantity to map the SAREA. --BenH
  242          */
  243         if (map->type == _DRM_SHM)
  244                 map->size = PAGE_ALIGN(map->size);
  245 
  246         /*
  247          * FreeBSD port note: FreeBSD's PAGE_MASK is the inverse of
  248          * Linux's one. That's why the test below doesn't inverse the
  249          * constant.
  250          */
  251         if ((map->offset & ((resource_size_t)PAGE_MASK)) || (map->size & (PAGE_MASK))) {
  252                 free(map, DRM_MEM_MAPS);
  253                 return -EINVAL;
  254         }
  255         map->mtrr = -1;
  256         map->handle = NULL;
  257 
  258         switch (map->type) {
  259         case _DRM_REGISTERS:
  260         case _DRM_FRAME_BUFFER:
  261 #ifdef __linux__
  262 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
  263                 if (map->offset + (map->size-1) < map->offset ||
  264                     map->offset < virt_to_phys(high_memory)) {
  265                         kfree(map);
  266                         return -EINVAL;
  267                 }
  268 #endif
  269 #endif
  270                 /* Some drivers preinitialize some maps, without the X Server
  271                  * needing to be aware of it.  Therefore, we just return success
  272                  * when the server tries to create a duplicate map.
  273                  */
  274                 list = drm_find_matching_map(dev, map);
  275                 if (list != NULL) {
  276                         if (list->map->size != map->size) {
  277                                 DRM_DEBUG("Matching maps of type %d with "
  278                                           "mismatched sizes, (%ld vs %ld)\n",
  279                                           map->type, map->size,
  280                                           list->map->size);
  281                                 list->map->size = map->size;
  282                         }
  283 
  284                         free(map, DRM_MEM_MAPS);
  285                         *maplist = list;
  286                         return 0;
  287                 }
  288 
  289                 if (drm_core_has_MTRR(dev)) {
  290                         if (map->type == _DRM_FRAME_BUFFER ||
  291                             (map->flags & _DRM_WRITE_COMBINING)) {
  292                                 if (drm_mtrr_add(
  293                                     map->offset, map->size,
  294                                     DRM_MTRR_WC) == 0)
  295                                         map->mtrr = 1;
  296                         }
  297                 }
  298                 if (map->type == _DRM_REGISTERS) {
  299                         drm_core_ioremap(map, dev);
  300                         if (!map->handle) {
  301                                 free(map, DRM_MEM_MAPS);
  302                                 return -ENOMEM;
  303                         }
  304                 }
  305 
  306                 break;
  307         case _DRM_SHM:
  308                 list = drm_find_matching_map(dev, map);
  309                 if (list != NULL) {
  310                         if(list->map->size != map->size) {
  311                                 DRM_DEBUG("Matching maps of type %d with "
  312                                           "mismatched sizes, (%ld vs %ld)\n",
  313                                           map->type, map->size, list->map->size);
  314                                 list->map->size = map->size;
  315                         }
  316 
  317                         free(map, DRM_MEM_MAPS);
  318                         *maplist = list;
  319                         return 0;
  320                 }
  321                 map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
  322                 DRM_DEBUG("%lu %d %p\n",
  323                           map->size, drm_order(map->size), map->handle);
  324                 if (!map->handle) {
  325                         free(map, DRM_MEM_MAPS);
  326                         return -ENOMEM;
  327                 }
  328                 map->offset = (unsigned long)map->handle;
  329                 if (map->flags & _DRM_CONTAINS_LOCK) {
  330                         /* Prevent a 2nd X Server from creating a 2nd lock */
  331                         if (dev->primary->master->lock.hw_lock != NULL) {
  332                                 free(map->handle, DRM_MEM_MAPS);
  333                                 free(map, DRM_MEM_MAPS);
  334                                 return -EBUSY;
  335                         }
  336                         dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;   /* Pointer to lock */
  337                 }
  338                 break;
  339         case _DRM_AGP: {
  340                 struct drm_agp_mem *entry;
  341                 int valid = 0;
  342 
  343                 if (!drm_core_has_AGP(dev)) {
  344                         free(map, DRM_MEM_MAPS);
  345                         return -EINVAL;
  346                 }
  347 #ifdef __linux__
  348 #ifdef __alpha__
  349                 map->offset += dev->hose->mem_space->start;
  350 #endif
  351 #endif
  352                 /* In some cases (i810 driver), user space may have already
  353                  * added the AGP base itself, because dev->agp->base previously
  354                  * only got set during AGP enable.  So, only add the base
  355                  * address if the map's offset isn't already within the
  356                  * aperture.
  357                  */
  358                 if (map->offset < dev->agp->base ||
  359                     map->offset > dev->agp->base +
  360                     dev->agp->agp_info.ai_aperture_size * 1024 * 1024 - 1) {
  361                         map->offset += dev->agp->base;
  362                 }
  363                 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
  364 
  365                 /* This assumes the DRM is in total control of AGP space.
  366                  * It's not always the case as AGP can be in the control
  367                  * of user space (i.e. i810 driver). So this loop will get
  368                  * skipped and we double check that dev->agp->memory is
  369                  * actually set as well as being invalid before EPERM'ing
  370                  */
  371                 list_for_each_entry(entry, &dev->agp->memory, head) {
  372                         if ((map->offset >= entry->bound) &&
  373                             (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
  374                                 valid = 1;
  375                                 break;
  376                         }
  377                 }
  378                 if (!list_empty(&dev->agp->memory) && !valid) {
  379                         free(map, DRM_MEM_MAPS);
  380                         return -EPERM;
  381                 }
  382                 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
  383                           (unsigned long long)map->offset, map->size);
  384 
  385                 break;
  386         }
  387         case _DRM_GEM:
  388                 DRM_ERROR("tried to addmap GEM object\n");
  389                 break;
  390         case _DRM_SCATTER_GATHER:
  391                 if (!dev->sg) {
  392                         free(map, DRM_MEM_MAPS);
  393                         return -EINVAL;
  394                 }
  395                 map->handle = (void *)(dev->sg->vaddr + offset);
  396                 map->offset += dev->sg->vaddr;
  397                 break;
  398         case _DRM_CONSISTENT:
  399                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
  400                  * As we're limiting the address to 2^32-1 (or less),
  401                  * casting it down to 32 bits is no problem, but we
  402                  * need to point to a 64bit variable first. */
  403                 align = map->size;
  404                 if ((align & (align - 1)) != 0)
  405                         align = PAGE_SIZE;
  406                 dmah = drm_pci_alloc(dev, map->size, align, BUS_SPACE_MAXADDR);
  407                 if (!dmah) {
  408                         free(map, DRM_MEM_MAPS);
  409                         return -ENOMEM;
  410                 }
  411                 map->handle = dmah->vaddr;
  412                 map->offset = dmah->busaddr;
  413                 map->dmah = dmah;
  414                 break;
  415         default:
  416                 free(map, DRM_MEM_MAPS);
  417                 return -EINVAL;
  418         }
  419 
  420         list = malloc(sizeof(*list), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
  421         if (!list) {
  422                 if (map->type == _DRM_REGISTERS)
  423                         drm_core_ioremapfree(map, dev);
  424                 free(map, DRM_MEM_MAPS);
  425                 return -EINVAL;
  426         }
  427         list->map = map;
  428 
  429         DRM_LOCK(dev);
  430         list_add(&list->head, &dev->maplist);
  431 
  432         /* Assign a 32-bit handle */
  433         /* We do it here so that dev->struct_mutex protects the increment */
  434         user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
  435                 map->offset;
  436         ret = drm_map_handle(dev, &list->hash, user_token, 0,
  437                              (map->type == _DRM_SHM));
  438         if (ret) {
  439                 if (map->type == _DRM_REGISTERS)
  440                         drm_core_ioremapfree(map, dev);
  441                 free(map, DRM_MEM_MAPS);
  442                 free(list, DRM_MEM_MAPS);
  443                 DRM_UNLOCK(dev);
  444                 return ret;
  445         }
  446 
  447         list->user_token = list->hash.key << PAGE_SHIFT;
  448         DRM_UNLOCK(dev);
  449 
  450         if (!(map->flags & _DRM_DRIVER))
  451                 list->master = dev->primary->master;
  452         *maplist = list;
  453         return 0;
  454         }
  455 
  456 int drm_addmap(struct drm_device * dev, resource_size_t offset,
  457                unsigned int size, enum drm_map_type type,
  458                enum drm_map_flags flags, struct drm_local_map ** map_ptr)
  459 {
  460         struct drm_map_list *list;
  461         int rc;
  462 
  463         rc = drm_addmap_core(dev, offset, size, type, flags, &list);
  464         if (!rc)
  465                 *map_ptr = list->map;
  466         return rc;
  467 }
  468 
  469 EXPORT_SYMBOL(drm_addmap);
  470 
  471 /**
  472  * Ioctl to specify a range of memory that is available for mapping by a
  473  * non-root process.
  474  *
  475  * \param inode device inode.
  476  * \param file_priv DRM file private.
  477  * \param cmd command.
  478  * \param arg pointer to a drm_map structure.
  479  * \return zero on success or a negative value on error.
  480  *
  481  */
  482 int drm_addmap_ioctl(struct drm_device *dev, void *data,
  483                      struct drm_file *file_priv)
  484 {
  485         struct drm_map *map = data;
  486         struct drm_map_list *maplist;
  487         int err;
  488 
  489         if (!(DRM_SUSER(DRM_CURPROC) || map->type == _DRM_AGP || map->type == _DRM_SHM))
  490                 return -EPERM;
  491 
  492         err = drm_addmap_core(dev, map->offset, map->size, map->type,
  493                               map->flags, &maplist);
  494 
  495         if (err)
  496                 return err;
  497 
  498         /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
  499         map->handle = (void *)(unsigned long)maplist->user_token;
  500         return 0;
  501 }
  502 
  503 /**
  504  * Remove a map private from list and deallocate resources if the mapping
  505  * isn't in use.
  506  *
  507  * Searches the map on drm_device::maplist, removes it from the list, see if
  508  * its being used, and free any associate resource (such as MTRR's) if it's not
  509  * being on use.
  510  *
  511  * \sa drm_addmap
  512  */
  513 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
  514 {
  515         struct drm_map_list *r_list = NULL, *list_t;
  516         int found = 0;
  517         struct drm_master *master;
  518 
  519         /* Find the list entry for the map and remove it */
  520         list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
  521                 if (r_list->map == map) {
  522                         master = r_list->master;
  523                         list_del(&r_list->head);
  524                         drm_ht_remove_key(&dev->map_hash,
  525                                           r_list->user_token >> PAGE_SHIFT);
  526                         free(r_list, DRM_MEM_MAPS);
  527                         found = 1;
  528                         break;
  529                 }
  530         }
  531 
  532         if (!found)
  533                 return -EINVAL;
  534 
  535         switch (map->type) {
  536         case _DRM_REGISTERS:
  537                 drm_core_ioremapfree(map, dev);
  538                 /* FALLTHROUGH */
  539         case _DRM_FRAME_BUFFER:
  540                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
  541                         int retcode;
  542                         retcode = drm_mtrr_del(map->mtrr, map->offset,
  543                             map->size, DRM_MTRR_WC);
  544                         DRM_DEBUG("mtrr_del=%d\n", retcode);
  545                 }
  546                 break;
  547         case _DRM_SHM:
  548                 free(map->handle, DRM_MEM_MAPS);
  549                 if (master) {
  550                         if (dev->sigdata.lock == master->lock.hw_lock)
  551                                 dev->sigdata.lock = NULL;
  552                         master->lock.hw_lock = NULL;   /* SHM removed */
  553                         master->lock.file_priv = NULL;
  554                         DRM_WAKEUP_INT((void *)&master->lock.lock_queue);
  555                 }
  556                 break;
  557         case _DRM_AGP:
  558         case _DRM_SCATTER_GATHER:
  559                 break;
  560         case _DRM_CONSISTENT:
  561                 drm_pci_free(dev, map->dmah);
  562                 break;
  563         case _DRM_GEM:
  564                 DRM_ERROR("tried to rmmap GEM object\n");
  565                 break;
  566         }
  567         free(map, DRM_MEM_MAPS);
  568 
  569         return 0;
  570 }
  571 EXPORT_SYMBOL(drm_rmmap_locked);
  572 
  573 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
  574 {
  575         int ret;
  576 
  577         DRM_LOCK(dev);
  578         ret = drm_rmmap_locked(dev, map);
  579         DRM_UNLOCK(dev);
  580 
  581         return ret;
  582 }
  583 EXPORT_SYMBOL(drm_rmmap);
  584 
  585 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
  586  * the last close of the device, and this is necessary for cleanup when things
  587  * exit uncleanly.  Therefore, having userland manually remove mappings seems
  588  * like a pointless exercise since they're going away anyway.
  589  *
  590  * One use case might be after addmap is allowed for normal users for SHM and
  591  * gets used by drivers that the server doesn't need to care about.  This seems
  592  * unlikely.
  593  *
  594  * \param inode device inode.
  595  * \param file_priv DRM file private.
  596  * \param cmd command.
  597  * \param arg pointer to a struct drm_map structure.
  598  * \return zero on success or a negative value on error.
  599  */
  600 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
  601                     struct drm_file *file_priv)
  602 {
  603         struct drm_map *request = data;
  604         struct drm_local_map *map = NULL;
  605         struct drm_map_list *r_list;
  606         int ret;
  607 
  608         DRM_LOCK(dev);
  609         list_for_each_entry(r_list, &dev->maplist, head) {
  610                 if (r_list->map &&
  611                     r_list->user_token == (unsigned long)request->handle &&
  612                     r_list->map->flags & _DRM_REMOVABLE) {
  613                         map = r_list->map;
  614                         break;
  615                 }
  616         }
  617 
  618         /* List has wrapped around to the head pointer, or its empty we didn't
  619          * find anything.
  620          */
  621         if (list_empty(&dev->maplist) || !map) {
  622                 DRM_UNLOCK(dev);
  623                 return -EINVAL;
  624         }
  625 
  626         /* Register and framebuffer maps are permanent */
  627         if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
  628                 DRM_UNLOCK(dev);
  629                 return 0;
  630         }
  631 
  632         ret = drm_rmmap_locked(dev, map);
  633 
  634         DRM_UNLOCK(dev);
  635 
  636         return ret;
  637 }
  638 
  639 /**
  640  * Cleanup after an error on one of the addbufs() functions.
  641  *
  642  * \param dev DRM device.
  643  * \param entry buffer entry where the error occurred.
  644  *
  645  * Frees any pages and buffers associated with the given entry.
  646  */
  647 static void drm_cleanup_buf_error(struct drm_device * dev,
  648                                   struct drm_buf_entry * entry)
  649 {
  650         int i;
  651 
  652         if (entry->seg_count) {
  653                 for (i = 0; i < entry->seg_count; i++) {
  654                         if (entry->seglist[i]) {
  655                                 drm_pci_free(dev, entry->seglist[i]);
  656                         }
  657                 }
  658                 free(entry->seglist, DRM_MEM_SEGS);
  659 
  660                 entry->seg_count = 0;
  661         }
  662 
  663         if (entry->buf_count) {
  664                 for (i = 0; i < entry->buf_count; i++) {
  665                         free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
  666                 }
  667                 free(entry->buflist, DRM_MEM_BUFS);
  668 
  669                 entry->buf_count = 0;
  670         }
  671 }
  672 
  673 #if __OS_HAS_AGP
  674 /**
  675  * Add AGP buffers for DMA transfers.
  676  *
  677  * \param dev struct drm_device to which the buffers are to be added.
  678  * \param request pointer to a struct drm_buf_desc describing the request.
  679  * \return zero on success or a negative number on failure.
  680  *
  681  * After some sanity checks creates a drm_buf structure for each buffer and
  682  * reallocates the buffer list of the same size order to accommodate the new
  683  * buffers.
  684  */
  685 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
  686 {
  687         struct drm_device_dma *dma = dev->dma;
  688         struct drm_buf_entry *entry;
  689         struct drm_agp_mem *agp_entry;
  690         struct drm_buf *buf;
  691         unsigned long offset;
  692         unsigned long agp_offset;
  693         int count;
  694         int order;
  695         int size;
  696         int alignment;
  697         int page_order;
  698         int total;
  699         int byte_count;
  700         int i, valid;
  701         struct drm_buf **temp_buflist;
  702 
  703         if (!dma)
  704                 return -EINVAL;
  705 
  706         count = request->count;
  707         order = drm_order(request->size);
  708         size = 1 << order;
  709 
  710         alignment = (request->flags & _DRM_PAGE_ALIGN)
  711             ? PAGE_ALIGN(size) : size;
  712         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
  713         total = PAGE_SIZE << page_order;
  714 
  715         byte_count = 0;
  716         agp_offset = dev->agp->base + request->agp_start;
  717 
  718         DRM_DEBUG("count:      %d\n", count);
  719         DRM_DEBUG("order:      %d\n", order);
  720         DRM_DEBUG("size:       %d\n", size);
  721         DRM_DEBUG("agp_offset: %lx\n", agp_offset);
  722         DRM_DEBUG("alignment:  %d\n", alignment);
  723         DRM_DEBUG("page_order: %d\n", page_order);
  724         DRM_DEBUG("total:      %d\n", total);
  725 
  726         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
  727                 return -EINVAL;
  728 
  729         /* Make sure buffers are located in AGP memory that we own */
  730         valid = 0;
  731         list_for_each_entry(agp_entry, &dev->agp->memory, head) {
  732                 if ((agp_offset >= agp_entry->bound) &&
  733                     (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
  734                         valid = 1;
  735                         break;
  736                 }
  737         }
  738         if (!list_empty(&dev->agp->memory) && !valid) {
  739                 DRM_DEBUG("zone invalid\n");
  740                 return -EINVAL;
  741         }
  742         mtx_lock(&dev->count_lock);
  743         if (dev->buf_use) {
  744                 mtx_unlock(&dev->count_lock);
  745                 return -EBUSY;
  746         }
  747         atomic_inc(&dev->buf_alloc);
  748         mtx_unlock(&dev->count_lock);
  749 
  750         DRM_LOCK(dev);
  751         entry = &dma->bufs[order];
  752         if (entry->buf_count) {
  753                 DRM_UNLOCK(dev);
  754                 atomic_dec(&dev->buf_alloc);
  755                 return -ENOMEM; /* May only call once for each order */
  756         }
  757 
  758         if (count < 0 || count > 4096) {
  759                 DRM_UNLOCK(dev);
  760                 atomic_dec(&dev->buf_alloc);
  761                 return -EINVAL;
  762         }
  763 
  764         entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
  765             M_NOWAIT | M_ZERO);
  766         if (!entry->buflist) {
  767                 DRM_UNLOCK(dev);
  768                 atomic_dec(&dev->buf_alloc);
  769                 return -ENOMEM;
  770         }
  771 
  772         entry->buf_size = size;
  773         entry->page_order = page_order;
  774 
  775         offset = 0;
  776 
  777         while (entry->buf_count < count) {
  778                 buf = &entry->buflist[entry->buf_count];
  779                 buf->idx = dma->buf_count + entry->buf_count;
  780                 buf->total = alignment;
  781                 buf->order = order;
  782                 buf->used = 0;
  783 
  784                 buf->offset = (dma->byte_count + offset);
  785                 buf->bus_address = agp_offset + offset;
  786                 buf->address = (void *)(agp_offset + offset);
  787                 buf->next = NULL;
  788                 buf->waiting = 0;
  789                 buf->pending = 0;
  790                 buf->file_priv = NULL;
  791 
  792                 buf->dev_priv_size = dev->driver->dev_priv_size;
  793                 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
  794                     M_NOWAIT | M_ZERO);
  795                 if (!buf->dev_private) {
  796                         /* Set count correctly so we free the proper amount. */
  797                         entry->buf_count = count;
  798                         drm_cleanup_buf_error(dev, entry);
  799                         DRM_UNLOCK(dev);
  800                         atomic_dec(&dev->buf_alloc);
  801                         return -ENOMEM;
  802                 }
  803 
  804                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
  805 
  806                 offset += alignment;
  807                 entry->buf_count++;
  808                 byte_count += PAGE_SIZE << page_order;
  809         }
  810 
  811         DRM_DEBUG("byte_count: %d\n", byte_count);
  812 
  813         temp_buflist = realloc(dma->buflist,
  814             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
  815             DRM_MEM_BUFS, M_NOWAIT);
  816         if (!temp_buflist) {
  817                 /* Free the entry because it isn't valid */
  818                 drm_cleanup_buf_error(dev, entry);
  819                 DRM_UNLOCK(dev);
  820                 atomic_dec(&dev->buf_alloc);
  821                 return -ENOMEM;
  822         }
  823         dma->buflist = temp_buflist;
  824 
  825         for (i = 0; i < entry->buf_count; i++) {
  826                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
  827         }
  828 
  829         dma->buf_count += entry->buf_count;
  830         dma->seg_count += entry->seg_count;
  831         dma->page_count += byte_count >> PAGE_SHIFT;
  832         dma->byte_count += byte_count;
  833 
  834         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
  835         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
  836 
  837         DRM_UNLOCK(dev);
  838 
  839         request->count = entry->buf_count;
  840         request->size = size;
  841 
  842         dma->flags = _DRM_DMA_USE_AGP;
  843 
  844         atomic_dec(&dev->buf_alloc);
  845         return 0;
  846 }
  847 EXPORT_SYMBOL(drm_addbufs_agp);
  848 #endif                          /* __OS_HAS_AGP */
  849 
  850 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
  851 {
  852         struct drm_device_dma *dma = dev->dma;
  853         int count;
  854         int order;
  855         int size;
  856         int total;
  857         int page_order;
  858         struct drm_buf_entry *entry;
  859         drm_dma_handle_t *dmah;
  860         struct drm_buf *buf;
  861         int alignment;
  862         unsigned long offset;
  863         int i;
  864         int byte_count;
  865         int page_count;
  866         unsigned long *temp_pagelist;
  867         struct drm_buf **temp_buflist;
  868 
  869         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
  870                 return -EINVAL;
  871 
  872         if (!dma)
  873                 return -EINVAL;
  874 
  875         if (!DRM_SUSER(DRM_CURPROC))
  876                 return -EPERM;
  877 
  878         count = request->count;
  879         order = drm_order(request->size);
  880         size = 1 << order;
  881 
  882         DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
  883                   request->count, request->size, size, order);
  884 
  885         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
  886                 return -EINVAL;
  887 
  888         alignment = (request->flags & _DRM_PAGE_ALIGN)
  889             ? PAGE_ALIGN(size) : size;
  890         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
  891         total = PAGE_SIZE << page_order;
  892 
  893         mtx_lock(&dev->count_lock);
  894         if (dev->buf_use) {
  895                 mtx_unlock(&dev->count_lock);
  896                 return -EBUSY;
  897         }
  898         atomic_inc(&dev->buf_alloc);
  899         mtx_unlock(&dev->count_lock);
  900 
  901         DRM_LOCK(dev);
  902         entry = &dma->bufs[order];
  903         if (entry->buf_count) {
  904                 DRM_UNLOCK(dev);
  905                 atomic_dec(&dev->buf_alloc);
  906                 return -ENOMEM; /* May only call once for each order */
  907         }
  908 
  909         if (count < 0 || count > 4096) {
  910                 DRM_UNLOCK(dev);
  911                 atomic_dec(&dev->buf_alloc);
  912                 return -EINVAL;
  913         }
  914 
  915         entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
  916             M_NOWAIT | M_ZERO);
  917         if (!entry->buflist) {
  918                 DRM_UNLOCK(dev);
  919                 atomic_dec(&dev->buf_alloc);
  920                 return -ENOMEM;
  921         }
  922 
  923         entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
  924             M_NOWAIT | M_ZERO);
  925         if (!entry->seglist) {
  926                 free(entry->buflist, DRM_MEM_BUFS);
  927                 DRM_UNLOCK(dev);
  928                 atomic_dec(&dev->buf_alloc);
  929                 return -ENOMEM;
  930         }
  931 
  932         /* Keep the original pagelist until we know all the allocations
  933          * have succeeded
  934          */
  935         temp_pagelist = malloc((dma->page_count + (count << page_order)) *
  936             sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
  937         if (!temp_pagelist) {
  938                 free(entry->buflist, DRM_MEM_BUFS);
  939                 free(entry->seglist, DRM_MEM_SEGS);
  940                 DRM_UNLOCK(dev);
  941                 atomic_dec(&dev->buf_alloc);
  942                 return -ENOMEM;
  943         }
  944         memcpy(temp_pagelist,
  945                dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
  946         DRM_DEBUG("pagelist: %d entries\n",
  947                   dma->page_count + (count << page_order));
  948 
  949         entry->buf_size = size;
  950         entry->page_order = page_order;
  951         byte_count = 0;
  952         page_count = 0;
  953 
  954         while (entry->buf_count < count) {
  955 
  956                 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, BUS_SPACE_MAXADDR);
  957 
  958                 if (!dmah) {
  959                         /* Set count correctly so we free the proper amount. */
  960                         entry->buf_count = count;
  961                         entry->seg_count = count;
  962                         drm_cleanup_buf_error(dev, entry);
  963                         free(temp_pagelist, DRM_MEM_PAGES);
  964                         DRM_UNLOCK(dev);
  965                         atomic_dec(&dev->buf_alloc);
  966                         return -ENOMEM;
  967                 }
  968                 entry->seglist[entry->seg_count++] = dmah;
  969                 for (i = 0; i < (1 << page_order); i++) {
  970                         DRM_DEBUG("page %d @ 0x%08lx\n",
  971                                   dma->page_count + page_count,
  972                                   (unsigned long)dmah->vaddr + PAGE_SIZE * i);
  973                         temp_pagelist[dma->page_count + page_count++]
  974                                 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
  975                 }
  976                 for (offset = 0;
  977                      offset + size <= total && entry->buf_count < count;
  978                      offset += alignment, ++entry->buf_count) {
  979                         buf = &entry->buflist[entry->buf_count];
  980                         buf->idx = dma->buf_count + entry->buf_count;
  981                         buf->total = alignment;
  982                         buf->order = order;
  983                         buf->used = 0;
  984                         buf->offset = (dma->byte_count + byte_count + offset);
  985                         buf->address = (void *)((char *)dmah->vaddr + offset);
  986                         buf->bus_address = dmah->busaddr + offset;
  987                         buf->next = NULL;
  988                         buf->waiting = 0;
  989                         buf->pending = 0;
  990                         buf->file_priv = NULL;
  991 
  992                         buf->dev_priv_size = dev->driver->dev_priv_size;
  993                         buf->dev_private = malloc(buf->dev_priv_size,
  994                             DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
  995                         if (!buf->dev_private) {
  996                                 /* Set count correctly so we free the proper amount. */
  997                                 entry->buf_count = count;
  998                                 entry->seg_count = count;
  999                                 drm_cleanup_buf_error(dev, entry);
 1000                                 free(temp_pagelist, DRM_MEM_PAGES);
 1001                                 DRM_UNLOCK(dev);
 1002                                 atomic_dec(&dev->buf_alloc);
 1003                                 return -ENOMEM;
 1004                         }
 1005 
 1006                         DRM_DEBUG("buffer %d @ %p\n",
 1007                                   entry->buf_count, buf->address);
 1008                 }
 1009                 byte_count += PAGE_SIZE << page_order;
 1010         }
 1011 
 1012         temp_buflist = realloc(dma->buflist,
 1013             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
 1014             DRM_MEM_BUFS, M_NOWAIT);
 1015         if (!temp_buflist) {
 1016                 /* Free the entry because it isn't valid */
 1017                 drm_cleanup_buf_error(dev, entry);
 1018                 free(temp_pagelist, DRM_MEM_PAGES);
 1019                 DRM_UNLOCK(dev);
 1020                 atomic_dec(&dev->buf_alloc);
 1021                 return -ENOMEM;
 1022         }
 1023         dma->buflist = temp_buflist;
 1024 
 1025         for (i = 0; i < entry->buf_count; i++) {
 1026                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 1027         }
 1028 
 1029         /* No allocations failed, so now we can replace the original pagelist
 1030          * with the new one.
 1031          */
 1032         if (dma->page_count) {
 1033                 free(dma->pagelist, DRM_MEM_PAGES);
 1034         }
 1035         dma->pagelist = temp_pagelist;
 1036 
 1037         dma->buf_count += entry->buf_count;
 1038         dma->seg_count += entry->seg_count;
 1039         dma->page_count += entry->seg_count << page_order;
 1040         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
 1041 
 1042         DRM_UNLOCK(dev);
 1043 
 1044         request->count = entry->buf_count;
 1045         request->size = size;
 1046 
 1047         if (request->flags & _DRM_PCI_BUFFER_RO)
 1048                 dma->flags = _DRM_DMA_USE_PCI_RO;
 1049 
 1050         atomic_dec(&dev->buf_alloc);
 1051         return 0;
 1052 
 1053 }
 1054 EXPORT_SYMBOL(drm_addbufs_pci);
 1055 
 1056 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
 1057 {
 1058         struct drm_device_dma *dma = dev->dma;
 1059         struct drm_buf_entry *entry;
 1060         struct drm_buf *buf;
 1061         unsigned long offset;
 1062         unsigned long agp_offset;
 1063         int count;
 1064         int order;
 1065         int size;
 1066         int alignment;
 1067         int page_order;
 1068         int total;
 1069         int byte_count;
 1070         int i;
 1071         struct drm_buf **temp_buflist;
 1072 
 1073         if (!drm_core_check_feature(dev, DRIVER_SG))
 1074                 return -EINVAL;
 1075 
 1076         if (!dma)
 1077                 return -EINVAL;
 1078 
 1079         if (!DRM_SUSER(DRM_CURPROC))
 1080                 return -EPERM;
 1081 
 1082         count = request->count;
 1083         order = drm_order(request->size);
 1084         size = 1 << order;
 1085 
 1086         alignment = (request->flags & _DRM_PAGE_ALIGN)
 1087             ? PAGE_ALIGN(size) : size;
 1088         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 1089         total = PAGE_SIZE << page_order;
 1090 
 1091         byte_count = 0;
 1092         agp_offset = request->agp_start;
 1093 
 1094         DRM_DEBUG("count:      %d\n", count);
 1095         DRM_DEBUG("order:      %d\n", order);
 1096         DRM_DEBUG("size:       %d\n", size);
 1097         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
 1098         DRM_DEBUG("alignment:  %d\n", alignment);
 1099         DRM_DEBUG("page_order: %d\n", page_order);
 1100         DRM_DEBUG("total:      %d\n", total);
 1101 
 1102         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 1103                 return -EINVAL;
 1104 
 1105         mtx_lock(&dev->count_lock);
 1106         if (dev->buf_use) {
 1107                 mtx_unlock(&dev->count_lock);
 1108                 return -EBUSY;
 1109         }
 1110         atomic_inc(&dev->buf_alloc);
 1111         mtx_unlock(&dev->count_lock);
 1112 
 1113         DRM_LOCK(dev);
 1114         entry = &dma->bufs[order];
 1115         if (entry->buf_count) {
 1116                 DRM_UNLOCK(dev);
 1117                 atomic_dec(&dev->buf_alloc);
 1118                 return -ENOMEM; /* May only call once for each order */
 1119         }
 1120 
 1121         if (count < 0 || count > 4096) {
 1122                 DRM_UNLOCK(dev);
 1123                 atomic_dec(&dev->buf_alloc);
 1124                 return -EINVAL;
 1125         }
 1126 
 1127         entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
 1128             M_NOWAIT | M_ZERO);
 1129         if (!entry->buflist) {
 1130                 DRM_UNLOCK(dev);
 1131                 atomic_dec(&dev->buf_alloc);
 1132                 return -ENOMEM;
 1133         }
 1134 
 1135         entry->buf_size = size;
 1136         entry->page_order = page_order;
 1137 
 1138         offset = 0;
 1139 
 1140         while (entry->buf_count < count) {
 1141                 buf = &entry->buflist[entry->buf_count];
 1142                 buf->idx = dma->buf_count + entry->buf_count;
 1143                 buf->total = alignment;
 1144                 buf->order = order;
 1145                 buf->used = 0;
 1146 
 1147                 buf->offset = (dma->byte_count + offset);
 1148                 buf->bus_address = agp_offset + offset;
 1149                 buf->address = (void *)(agp_offset + offset
 1150                                         + (unsigned long)dev->sg->vaddr);
 1151                 buf->next = NULL;
 1152                 buf->waiting = 0;
 1153                 buf->pending = 0;
 1154                 buf->file_priv = NULL;
 1155 
 1156                 buf->dev_priv_size = dev->driver->dev_priv_size;
 1157                 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
 1158                     M_NOWAIT | M_ZERO);
 1159                 if (!buf->dev_private) {
 1160                         /* Set count correctly so we free the proper amount. */
 1161                         entry->buf_count = count;
 1162                         drm_cleanup_buf_error(dev, entry);
 1163                         DRM_UNLOCK(dev);
 1164                         atomic_dec(&dev->buf_alloc);
 1165                         return -ENOMEM;
 1166                 }
 1167 
 1168                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 1169 
 1170                 offset += alignment;
 1171                 entry->buf_count++;
 1172                 byte_count += PAGE_SIZE << page_order;
 1173         }
 1174 
 1175         DRM_DEBUG("byte_count: %d\n", byte_count);
 1176 
 1177         temp_buflist = realloc(dma->buflist,
 1178             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
 1179             DRM_MEM_BUFS, M_NOWAIT);
 1180         if (!temp_buflist) {
 1181                 /* Free the entry because it isn't valid */
 1182                 drm_cleanup_buf_error(dev, entry);
 1183                 DRM_UNLOCK(dev);
 1184                 atomic_dec(&dev->buf_alloc);
 1185                 return -ENOMEM;
 1186         }
 1187         dma->buflist = temp_buflist;
 1188 
 1189         for (i = 0; i < entry->buf_count; i++) {
 1190                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 1191         }
 1192 
 1193         dma->buf_count += entry->buf_count;
 1194         dma->seg_count += entry->seg_count;
 1195         dma->page_count += byte_count >> PAGE_SHIFT;
 1196         dma->byte_count += byte_count;
 1197 
 1198         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 1199         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 1200 
 1201         DRM_UNLOCK(dev);
 1202 
 1203         request->count = entry->buf_count;
 1204         request->size = size;
 1205 
 1206         dma->flags = _DRM_DMA_USE_SG;
 1207 
 1208         atomic_dec(&dev->buf_alloc);
 1209         return 0;
 1210 }
 1211 
 1212 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
 1213 {
 1214         struct drm_device_dma *dma = dev->dma;
 1215         struct drm_buf_entry *entry;
 1216         struct drm_buf *buf;
 1217         unsigned long offset;
 1218         unsigned long agp_offset;
 1219         int count;
 1220         int order;
 1221         int size;
 1222         int alignment;
 1223         int page_order;
 1224         int total;
 1225         int byte_count;
 1226         int i;
 1227         struct drm_buf **temp_buflist;
 1228 
 1229         if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
 1230                 return -EINVAL;
 1231 
 1232         if (!dma)
 1233                 return -EINVAL;
 1234 
 1235         if (!DRM_SUSER(DRM_CURPROC))
 1236                 return -EPERM;
 1237 
 1238         count = request->count;
 1239         order = drm_order(request->size);
 1240         size = 1 << order;
 1241 
 1242         alignment = (request->flags & _DRM_PAGE_ALIGN)
 1243             ? PAGE_ALIGN(size) : size;
 1244         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 1245         total = PAGE_SIZE << page_order;
 1246 
 1247         byte_count = 0;
 1248         agp_offset = request->agp_start;
 1249 
 1250         DRM_DEBUG("count:      %d\n", count);
 1251         DRM_DEBUG("order:      %d\n", order);
 1252         DRM_DEBUG("size:       %d\n", size);
 1253         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
 1254         DRM_DEBUG("alignment:  %d\n", alignment);
 1255         DRM_DEBUG("page_order: %d\n", page_order);
 1256         DRM_DEBUG("total:      %d\n", total);
 1257 
 1258         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 1259                 return -EINVAL;
 1260 
 1261         mtx_lock(&dev->count_lock);
 1262         if (dev->buf_use) {
 1263                 mtx_unlock(&dev->count_lock);
 1264                 return -EBUSY;
 1265         }
 1266         atomic_inc(&dev->buf_alloc);
 1267         mtx_unlock(&dev->count_lock);
 1268 
 1269         DRM_LOCK(dev);
 1270         entry = &dma->bufs[order];
 1271         if (entry->buf_count) {
 1272                 DRM_UNLOCK(dev);
 1273                 atomic_dec(&dev->buf_alloc);
 1274                 return -ENOMEM; /* May only call once for each order */
 1275         }
 1276 
 1277         if (count < 0 || count > 4096) {
 1278                 DRM_UNLOCK(dev);
 1279                 atomic_dec(&dev->buf_alloc);
 1280                 return -EINVAL;
 1281         }
 1282 
 1283         entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
 1284             M_NOWAIT | M_ZERO);
 1285         if (!entry->buflist) {
 1286                 DRM_UNLOCK(dev);
 1287                 atomic_dec(&dev->buf_alloc);
 1288                 return -ENOMEM;
 1289         }
 1290 
 1291         entry->buf_size = size;
 1292         entry->page_order = page_order;
 1293 
 1294         offset = 0;
 1295 
 1296         while (entry->buf_count < count) {
 1297                 buf = &entry->buflist[entry->buf_count];
 1298                 buf->idx = dma->buf_count + entry->buf_count;
 1299                 buf->total = alignment;
 1300                 buf->order = order;
 1301                 buf->used = 0;
 1302 
 1303                 buf->offset = (dma->byte_count + offset);
 1304                 buf->bus_address = agp_offset + offset;
 1305                 buf->address = (void *)(agp_offset + offset);
 1306                 buf->next = NULL;
 1307                 buf->waiting = 0;
 1308                 buf->pending = 0;
 1309                 buf->file_priv = NULL;
 1310 
 1311                 buf->dev_priv_size = dev->driver->dev_priv_size;
 1312                 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
 1313                     M_NOWAIT | M_ZERO);
 1314                 if (!buf->dev_private) {
 1315                         /* Set count correctly so we free the proper amount. */
 1316                         entry->buf_count = count;
 1317                         drm_cleanup_buf_error(dev, entry);
 1318                         DRM_UNLOCK(dev);
 1319                         atomic_dec(&dev->buf_alloc);
 1320                         return -ENOMEM;
 1321                 }
 1322 
 1323                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 1324 
 1325                 offset += alignment;
 1326                 entry->buf_count++;
 1327                 byte_count += PAGE_SIZE << page_order;
 1328         }
 1329 
 1330         DRM_DEBUG("byte_count: %d\n", byte_count);
 1331 
 1332         temp_buflist = realloc(dma->buflist,
 1333             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
 1334             DRM_MEM_BUFS, M_NOWAIT);
 1335         if (!temp_buflist) {
 1336                 /* Free the entry because it isn't valid */
 1337                 drm_cleanup_buf_error(dev, entry);
 1338                 DRM_UNLOCK(dev);
 1339                 atomic_dec(&dev->buf_alloc);
 1340                 return -ENOMEM;
 1341         }
 1342         dma->buflist = temp_buflist;
 1343 
 1344         for (i = 0; i < entry->buf_count; i++) {
 1345                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 1346         }
 1347 
 1348         dma->buf_count += entry->buf_count;
 1349         dma->seg_count += entry->seg_count;
 1350         dma->page_count += byte_count >> PAGE_SHIFT;
 1351         dma->byte_count += byte_count;
 1352 
 1353         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 1354         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 1355 
 1356         DRM_UNLOCK(dev);
 1357 
 1358         request->count = entry->buf_count;
 1359         request->size = size;
 1360 
 1361         dma->flags = _DRM_DMA_USE_FB;
 1362 
 1363         atomic_dec(&dev->buf_alloc);
 1364         return 0;
 1365 }
 1366 
 1367 
 1368 /**
 1369  * Add buffers for DMA transfers (ioctl).
 1370  *
 1371  * \param inode device inode.
 1372  * \param file_priv DRM file private.
 1373  * \param cmd command.
 1374  * \param arg pointer to a struct drm_buf_desc request.
 1375  * \return zero on success or a negative number on failure.
 1376  *
 1377  * According with the memory type specified in drm_buf_desc::flags and the
 1378  * build options, it dispatches the call either to addbufs_agp(),
 1379  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
 1380  * PCI memory respectively.
 1381  */
 1382 int drm_addbufs(struct drm_device *dev, void *data,
 1383                 struct drm_file *file_priv)
 1384 {
 1385         struct drm_buf_desc *request = data;
 1386         int ret;
 1387 
 1388         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 1389                 return -EINVAL;
 1390 
 1391 #if __OS_HAS_AGP
 1392         if (request->flags & _DRM_AGP_BUFFER)
 1393                 ret = drm_addbufs_agp(dev, request);
 1394         else
 1395 #endif
 1396         if (request->flags & _DRM_SG_BUFFER)
 1397                 ret = drm_addbufs_sg(dev, request);
 1398         else if (request->flags & _DRM_FB_BUFFER)
 1399                 ret = drm_addbufs_fb(dev, request);
 1400         else
 1401                 ret = drm_addbufs_pci(dev, request);
 1402 
 1403         return ret;
 1404 }
 1405 
 1406 /**
 1407  * Get information about the buffer mappings.
 1408  *
 1409  * This was originally mean for debugging purposes, or by a sophisticated
 1410  * client library to determine how best to use the available buffers (e.g.,
 1411  * large buffers can be used for image transfer).
 1412  *
 1413  * \param inode device inode.
 1414  * \param file_priv DRM file private.
 1415  * \param cmd command.
 1416  * \param arg pointer to a drm_buf_info structure.
 1417  * \return zero on success or a negative number on failure.
 1418  *
 1419  * Increments drm_device::buf_use while holding the drm_device::count_lock
 1420  * lock, preventing of allocating more buffers after this call. Information
 1421  * about each requested buffer is then copied into user space.
 1422  */
 1423 int drm_infobufs(struct drm_device *dev, void *data,
 1424                  struct drm_file *file_priv)
 1425 {
 1426         struct drm_device_dma *dma = dev->dma;
 1427         struct drm_buf_info *request = data;
 1428         int i;
 1429         int count;
 1430 
 1431         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 1432                 return -EINVAL;
 1433 
 1434         if (!dma)
 1435                 return -EINVAL;
 1436 
 1437         mtx_lock(&dev->count_lock);
 1438         if (atomic_read(&dev->buf_alloc)) {
 1439                 mtx_unlock(&dev->count_lock);
 1440                 return -EBUSY;
 1441         }
 1442         ++dev->buf_use;         /* Can't allocate more after this call */
 1443         mtx_unlock(&dev->count_lock);
 1444 
 1445         for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
 1446                 if (dma->bufs[i].buf_count)
 1447                         ++count;
 1448         }
 1449 
 1450         DRM_DEBUG("count = %d\n", count);
 1451 
 1452         if (request->count >= count) {
 1453                 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
 1454                         if (dma->bufs[i].buf_count) {
 1455                                 struct drm_buf_desc __user *to =
 1456                                     &request->list[count];
 1457                                 struct drm_buf_entry *from = &dma->bufs[i];
 1458                                 struct drm_freelist *list = &dma->bufs[i].freelist;
 1459                                 if (copy_to_user(&to->count,
 1460                                                  &from->buf_count,
 1461                                                  sizeof(from->buf_count)) ||
 1462                                     copy_to_user(&to->size,
 1463                                                  &from->buf_size,
 1464                                                  sizeof(from->buf_size)) ||
 1465                                     copy_to_user(&to->low_mark,
 1466                                                  &list->low_mark,
 1467                                                  sizeof(list->low_mark)) ||
 1468                                     copy_to_user(&to->high_mark,
 1469                                                  &list->high_mark,
 1470                                                  sizeof(list->high_mark)))
 1471                                         return -EFAULT;
 1472 
 1473                                 DRM_DEBUG("%d %d %d %d %d\n",
 1474                                           i,
 1475                                           dma->bufs[i].buf_count,
 1476                                           dma->bufs[i].buf_size,
 1477                                           dma->bufs[i].freelist.low_mark,
 1478                                           dma->bufs[i].freelist.high_mark);
 1479                                 ++count;
 1480                         }
 1481                 }
 1482         }
 1483         request->count = count;
 1484 
 1485         return 0;
 1486 }
 1487 
 1488 /**
 1489  * Specifies a low and high water mark for buffer allocation
 1490  *
 1491  * \param inode device inode.
 1492  * \param file_priv DRM file private.
 1493  * \param cmd command.
 1494  * \param arg a pointer to a drm_buf_desc structure.
 1495  * \return zero on success or a negative number on failure.
 1496  *
 1497  * Verifies that the size order is bounded between the admissible orders and
 1498  * updates the respective drm_device_dma::bufs entry low and high water mark.
 1499  *
 1500  * \note This ioctl is deprecated and mostly never used.
 1501  */
 1502 int drm_markbufs(struct drm_device *dev, void *data,
 1503                  struct drm_file *file_priv)
 1504 {
 1505         struct drm_device_dma *dma = dev->dma;
 1506         struct drm_buf_desc *request = data;
 1507         int order;
 1508         struct drm_buf_entry *entry;
 1509 
 1510         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 1511                 return -EINVAL;
 1512 
 1513         if (!dma)
 1514                 return -EINVAL;
 1515 
 1516         DRM_DEBUG("%d, %d, %d\n",
 1517                   request->size, request->low_mark, request->high_mark);
 1518         order = drm_order(request->size);
 1519         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 1520                 return -EINVAL;
 1521         entry = &dma->bufs[order];
 1522 
 1523         if (request->low_mark < 0 || request->low_mark > entry->buf_count)
 1524                 return -EINVAL;
 1525         if (request->high_mark < 0 || request->high_mark > entry->buf_count)
 1526                 return -EINVAL;
 1527 
 1528         entry->freelist.low_mark = request->low_mark;
 1529         entry->freelist.high_mark = request->high_mark;
 1530 
 1531         return 0;
 1532 }
 1533 
 1534 /**
 1535  * Unreserve the buffers in list, previously reserved using drmDMA.
 1536  *
 1537  * \param inode device inode.
 1538  * \param file_priv DRM file private.
 1539  * \param cmd command.
 1540  * \param arg pointer to a drm_buf_free structure.
 1541  * \return zero on success or a negative number on failure.
 1542  *
 1543  * Calls free_buffer() for each used buffer.
 1544  * This function is primarily used for debugging.
 1545  */
 1546 int drm_freebufs(struct drm_device *dev, void *data,
 1547                  struct drm_file *file_priv)
 1548 {
 1549         struct drm_device_dma *dma = dev->dma;
 1550         struct drm_buf_free *request = data;
 1551         int i;
 1552         int idx;
 1553         struct drm_buf *buf;
 1554 
 1555         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 1556                 return -EINVAL;
 1557 
 1558         if (!dma)
 1559                 return -EINVAL;
 1560 
 1561         DRM_DEBUG("%d\n", request->count);
 1562         for (i = 0; i < request->count; i++) {
 1563                 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
 1564                         return -EFAULT;
 1565                 if (idx < 0 || idx >= dma->buf_count) {
 1566                         DRM_ERROR("Index %d (of %d max)\n",
 1567                                   idx, dma->buf_count - 1);
 1568                         return -EINVAL;
 1569                 }
 1570                 buf = dma->buflist[idx];
 1571                 if (buf->file_priv != file_priv) {
 1572                         DRM_ERROR("Process %d freeing buffer not owned\n",
 1573                                   DRM_CURRENTPID);
 1574                         return -EINVAL;
 1575                 }
 1576                 drm_free_buffer(dev, buf);
 1577         }
 1578 
 1579         return 0;
 1580 }
 1581 
 1582 /**
 1583  * Maps all of the DMA buffers into client-virtual space (ioctl).
 1584  *
 1585  * \param inode device inode.
 1586  * \param file_priv DRM file private.
 1587  * \param cmd command.
 1588  * \param arg pointer to a drm_buf_map structure.
 1589  * \return zero on success or a negative number on failure.
 1590  *
 1591  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
 1592  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
 1593  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
 1594  * drm_mmap_dma().
 1595  */
 1596 int drm_mapbufs(struct drm_device *dev, void *data,
 1597                 struct drm_file *file_priv)
 1598 {
 1599         struct drm_device_dma *dma = dev->dma;
 1600         int retcode = 0;
 1601         const int zero = 0;
 1602         vm_offset_t virtual;
 1603         vm_offset_t address;
 1604         struct vmspace *vms;
 1605         struct drm_buf_map *request = data;
 1606         int i;
 1607 
 1608         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 1609                 return -EINVAL;
 1610 
 1611         if (!dma)
 1612                 return -EINVAL;
 1613 
 1614         mtx_lock(&dev->count_lock);
 1615         if (atomic_read(&dev->buf_alloc)) {
 1616                 mtx_unlock(&dev->count_lock);
 1617                 return -EBUSY;
 1618         }
 1619         dev->buf_use++;         /* Can't allocate more after this call */
 1620         mtx_unlock(&dev->count_lock);
 1621 
 1622         vms = DRM_CURPROC->td_proc->p_vmspace;
 1623 
 1624         if (request->count >= dma->buf_count) {
 1625                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
 1626                     || (drm_core_check_feature(dev, DRIVER_SG)
 1627                         && (dma->flags & _DRM_DMA_USE_SG))
 1628                     || (drm_core_check_feature(dev, DRIVER_FB_DMA)
 1629                         && (dma->flags & _DRM_DMA_USE_FB))) {
 1630                         struct drm_local_map *map = dev->agp_buffer_map;
 1631                         vm_ooffset_t token = dev->agp_buffer_token;
 1632 
 1633                         if (!map) {
 1634                                 retcode = -EINVAL;
 1635                                 goto done;
 1636                         }
 1637                         retcode = vm_mmap(&vms->vm_map, &virtual, map->size,
 1638                             VM_PROT_RW, VM_PROT_RW, MAP_SHARED | MAP_NOSYNC,
 1639                             OBJT_DEVICE, file_priv->minor->device, token);
 1640                 } else {
 1641                         retcode = vm_mmap(&vms->vm_map, &virtual, dma->byte_count,
 1642                             VM_PROT_RW, VM_PROT_RW, MAP_SHARED | MAP_NOSYNC,
 1643                             OBJT_DEVICE, file_priv->minor->device, 0);
 1644                 }
 1645                 if (retcode) {
 1646                         /* Real error */
 1647                         retcode = -retcode;
 1648                         goto done;
 1649                 }
 1650                 request->virtual = (void __user *)virtual;
 1651 
 1652                 for (i = 0; i < dma->buf_count; i++) {
 1653                         if (copy_to_user(&request->list[i].idx,
 1654                                          &dma->buflist[i]->idx,
 1655                                          sizeof(request->list[0].idx))) {
 1656                                 retcode = -EFAULT;
 1657                                 goto done;
 1658                         }
 1659                         if (copy_to_user(&request->list[i].total,
 1660                                          &dma->buflist[i]->total,
 1661                                          sizeof(request->list[0].total))) {
 1662                                 retcode = -EFAULT;
 1663                                 goto done;
 1664                         }
 1665                         if (copy_to_user(&request->list[i].used,
 1666                                          &zero, sizeof(zero))) {
 1667                                 retcode = -EFAULT;
 1668                                 goto done;
 1669                         }
 1670                         address = virtual + dma->buflist[i]->offset;    /* *** */
 1671                         if (copy_to_user(&request->list[i].address,
 1672                                          &address, sizeof(address))) {
 1673                                 retcode = -EFAULT;
 1674                                 goto done;
 1675                         }
 1676                 }
 1677         }
 1678       done:
 1679         request->count = dma->buf_count;
 1680         DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
 1681 
 1682         return retcode;
 1683 }
 1684 
 1685 /**
 1686  * Compute size order.  Returns the exponent of the smaller power of two which
 1687  * is greater or equal to given number.
 1688  *
 1689  * \param size size.
 1690  * \return order.
 1691  *
 1692  * \todo Can be made faster.
 1693  */
 1694 int drm_order(unsigned long size)
 1695 {
 1696         int order;
 1697         unsigned long tmp;
 1698 
 1699         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
 1700 
 1701         if (size & (size - 1))
 1702                 ++order;
 1703 
 1704         return order;
 1705 }
 1706 EXPORT_SYMBOL(drm_order);

Cache object: 6757dd15de1c8cedbf4c16f305d58a7c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.