The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/drm2/drm_mm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2  *
    3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
    4  * All Rights Reserved.
    5  *
    6  * Permission is hereby granted, free of charge, to any person obtaining a
    7  * copy of this software and associated documentation files (the
    8  * "Software"), to deal in the Software without restriction, including
    9  * without limitation the rights to use, copy, modify, merge, publish,
   10  * distribute, sub license, and/or sell copies of the Software, and to
   11  * permit persons to whom the Software is furnished to do so, subject to
   12  * the following conditions:
   13  *
   14  * The above copyright notice and this permission notice (including the
   15  * next paragraph) shall be included in all copies or substantial portions
   16  * of the Software.
   17  *
   18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
   21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
   22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
   23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
   24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
   25  *
   26  *
   27  **************************************************************************/
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 /*
   33  * Generic simple memory manager implementation. Intended to be used as a base
   34  * class implementation for more advanced memory managers.
   35  *
   36  * Note that the algorithm used is quite simple and there might be substantial
   37  * performance gains if a smarter free list is implemented. Currently it is just an
   38  * unordered stack of free regions. This could easily be improved if an RB-tree
   39  * is used instead. At least if we expect heavy fragmentation.
   40  *
   41  * Aligned allocations can also see improvement.
   42  *
   43  * Authors:
   44  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
   45  */
   46 
   47 #include <dev/drm2/drmP.h>
   48 #include <dev/drm2/drm_mm.h>
   49 
   50 #define MM_UNUSED_TARGET 4
   51 
   52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
   53 {
   54         struct drm_mm_node *child;
   55 
   56         child = malloc(sizeof(*child), DRM_MEM_MM, M_NOWAIT | M_ZERO);
   57 
   58         if (unlikely(child == NULL)) {
   59                 mtx_lock(&mm->unused_lock);
   60                 if (list_empty(&mm->unused_nodes))
   61                         child = NULL;
   62                 else {
   63                         child =
   64                             list_entry(mm->unused_nodes.next,
   65                                        struct drm_mm_node, node_list);
   66                         list_del(&child->node_list);
   67                         --mm->num_unused;
   68                 }
   69                 mtx_unlock(&mm->unused_lock);
   70         }
   71         return child;
   72 }
   73 
   74 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
   75  * drm_mm:      memory manager struct we are pre-allocating for
   76  *
   77  * Returns 0 on success or -ENOMEM if allocation fails.
   78  */
   79 int drm_mm_pre_get(struct drm_mm *mm)
   80 {
   81         struct drm_mm_node *node;
   82 
   83         mtx_lock(&mm->unused_lock);
   84         while (mm->num_unused < MM_UNUSED_TARGET) {
   85                 mtx_unlock(&mm->unused_lock);
   86                 node = malloc(sizeof(*node), DRM_MEM_MM, M_NOWAIT | M_ZERO);
   87                 mtx_lock(&mm->unused_lock);
   88 
   89                 if (unlikely(node == NULL)) {
   90                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
   91                         mtx_unlock(&mm->unused_lock);
   92                         return ret;
   93                 }
   94                 ++mm->num_unused;
   95                 list_add_tail(&node->node_list, &mm->unused_nodes);
   96         }
   97         mtx_unlock(&mm->unused_lock);
   98         return 0;
   99 }
  100 EXPORT_SYMBOL(drm_mm_pre_get);
  101 
  102 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
  103 {
  104         return hole_node->start + hole_node->size;
  105 }
  106 
  107 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  108 {
  109         struct drm_mm_node *next_node =
  110                 list_entry(hole_node->node_list.next, struct drm_mm_node,
  111                            node_list);
  112 
  113         return next_node->start;
  114 }
  115 
  116 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
  117                                  struct drm_mm_node *node,
  118                                  unsigned long size, unsigned alignment,
  119                                  unsigned long color)
  120 {
  121         struct drm_mm *mm = hole_node->mm;
  122         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
  123         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
  124         unsigned long adj_start = hole_start;
  125         unsigned long adj_end = hole_end;
  126 
  127         BUG_ON(!hole_node->hole_follows || node->allocated);
  128 
  129         if (mm->color_adjust)
  130                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
  131 
  132         if (alignment) {
  133                 unsigned tmp = adj_start % alignment;
  134                 if (tmp)
  135                         adj_start += alignment - tmp;
  136         }
  137 
  138         if (adj_start == hole_start) {
  139                 hole_node->hole_follows = 0;
  140                 list_del(&hole_node->hole_stack);
  141         }
  142 
  143         node->start = adj_start;
  144         node->size = size;
  145         node->mm = mm;
  146         node->color = color;
  147         node->allocated = 1;
  148 
  149         INIT_LIST_HEAD(&node->hole_stack);
  150         list_add(&node->node_list, &hole_node->node_list);
  151 
  152         BUG_ON(node->start + node->size > adj_end);
  153 
  154         node->hole_follows = 0;
  155         if (node->start + node->size < hole_end) {
  156                 list_add(&node->hole_stack, &mm->hole_stack);
  157                 node->hole_follows = 1;
  158         }
  159 }
  160 
  161 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
  162                                              unsigned long size,
  163                                              unsigned alignment,
  164                                              unsigned long color,
  165                                              int atomic)
  166 {
  167         struct drm_mm_node *node;
  168 
  169         node = drm_mm_kmalloc(hole_node->mm, atomic);
  170         if (unlikely(node == NULL))
  171                 return NULL;
  172 
  173         drm_mm_insert_helper(hole_node, node, size, alignment, color);
  174 
  175         return node;
  176 }
  177 EXPORT_SYMBOL(drm_mm_get_block_generic);
  178 
  179 /**
  180  * Search for free space and insert a preallocated memory node. Returns
  181  * -ENOSPC if no suitable free area is available. The preallocated memory node
  182  * must be cleared.
  183  */
  184 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
  185                                unsigned long size, unsigned alignment,
  186                                unsigned long color)
  187 {
  188         struct drm_mm_node *hole_node;
  189 
  190         hole_node = drm_mm_search_free_generic(mm, size, alignment,
  191                                                color, 0);
  192         if (!hole_node)
  193                 return -ENOSPC;
  194 
  195         drm_mm_insert_helper(hole_node, node, size, alignment, color);
  196         return 0;
  197 }
  198 EXPORT_SYMBOL(drm_mm_insert_node_generic);
  199 
  200 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
  201                        unsigned long size, unsigned alignment)
  202 {
  203         return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
  204 }
  205 EXPORT_SYMBOL(drm_mm_insert_node);
  206 
  207 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  208                                        struct drm_mm_node *node,
  209                                        unsigned long size, unsigned alignment,
  210                                        unsigned long color,
  211                                        unsigned long start, unsigned long end)
  212 {
  213         struct drm_mm *mm = hole_node->mm;
  214         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
  215         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
  216         unsigned long adj_start = hole_start;
  217         unsigned long adj_end = hole_end;
  218 
  219         BUG_ON(!hole_node->hole_follows || node->allocated);
  220 
  221         if (adj_start < start)
  222                 adj_start = start;
  223         if (adj_end > end)
  224                 adj_end = end;
  225 
  226         if (mm->color_adjust)
  227                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
  228 
  229         if (alignment) {
  230                 unsigned tmp = adj_start % alignment;
  231                 if (tmp)
  232                         adj_start += alignment - tmp;
  233         }
  234 
  235         if (adj_start == hole_start) {
  236                 hole_node->hole_follows = 0;
  237                 list_del(&hole_node->hole_stack);
  238         }
  239 
  240         node->start = adj_start;
  241         node->size = size;
  242         node->mm = mm;
  243         node->color = color;
  244         node->allocated = 1;
  245 
  246         INIT_LIST_HEAD(&node->hole_stack);
  247         list_add(&node->node_list, &hole_node->node_list);
  248 
  249         BUG_ON(node->start + node->size > adj_end);
  250         BUG_ON(node->start + node->size > end);
  251 
  252         node->hole_follows = 0;
  253         if (node->start + node->size < hole_end) {
  254                 list_add(&node->hole_stack, &mm->hole_stack);
  255                 node->hole_follows = 1;
  256         }
  257 }
  258 
  259 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
  260                                                 unsigned long size,
  261                                                 unsigned alignment,
  262                                                 unsigned long color,
  263                                                 unsigned long start,
  264                                                 unsigned long end,
  265                                                 int atomic)
  266 {
  267         struct drm_mm_node *node;
  268 
  269         node = drm_mm_kmalloc(hole_node->mm, atomic);
  270         if (unlikely(node == NULL))
  271                 return NULL;
  272 
  273         drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
  274                                    start, end);
  275 
  276         return node;
  277 }
  278 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  279 
  280 /**
  281  * Search for free space and insert a preallocated memory node. Returns
  282  * -ENOSPC if no suitable free area is available. This is for range
  283  * restricted allocations. The preallocated memory node must be cleared.
  284  */
  285 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
  286                                         unsigned long size, unsigned alignment, unsigned long color,
  287                                         unsigned long start, unsigned long end)
  288 {
  289         struct drm_mm_node *hole_node;
  290 
  291         hole_node = drm_mm_search_free_in_range_generic(mm,
  292                                                         size, alignment, color,
  293                                                         start, end, 0);
  294         if (!hole_node)
  295                 return -ENOSPC;
  296 
  297         drm_mm_insert_helper_range(hole_node, node,
  298                                    size, alignment, color,
  299                                    start, end);
  300         return 0;
  301 }
  302 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
  303 
  304 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
  305                                 unsigned long size, unsigned alignment,
  306                                 unsigned long start, unsigned long end)
  307 {
  308         return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
  309 }
  310 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
  311 
  312 /**
  313  * Remove a memory node from the allocator.
  314  */
  315 void drm_mm_remove_node(struct drm_mm_node *node)
  316 {
  317         struct drm_mm *mm = node->mm;
  318         struct drm_mm_node *prev_node;
  319 
  320         BUG_ON(node->scanned_block || node->scanned_prev_free
  321                                    || node->scanned_next_free);
  322 
  323         prev_node =
  324             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
  325 
  326         if (node->hole_follows) {
  327                 BUG_ON(drm_mm_hole_node_start(node)
  328                                 == drm_mm_hole_node_end(node));
  329                 list_del(&node->hole_stack);
  330         } else
  331                 BUG_ON(drm_mm_hole_node_start(node)
  332                                 != drm_mm_hole_node_end(node));
  333 
  334         if (!prev_node->hole_follows) {
  335                 prev_node->hole_follows = 1;
  336                 list_add(&prev_node->hole_stack, &mm->hole_stack);
  337         } else
  338                 list_move(&prev_node->hole_stack, &mm->hole_stack);
  339 
  340         list_del(&node->node_list);
  341         node->allocated = 0;
  342 }
  343 EXPORT_SYMBOL(drm_mm_remove_node);
  344 
  345 /*
  346  * Remove a memory node from the allocator and free the allocated struct
  347  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
  348  * drm_mm_get_block functions.
  349  */
  350 void drm_mm_put_block(struct drm_mm_node *node)
  351 {
  352 
  353         struct drm_mm *mm = node->mm;
  354 
  355         drm_mm_remove_node(node);
  356 
  357         mtx_lock(&mm->unused_lock);
  358         if (mm->num_unused < MM_UNUSED_TARGET) {
  359                 list_add(&node->node_list, &mm->unused_nodes);
  360                 ++mm->num_unused;
  361         } else
  362                 free(node, DRM_MEM_MM);
  363         mtx_unlock(&mm->unused_lock);
  364 }
  365 EXPORT_SYMBOL(drm_mm_put_block);
  366 
  367 static int check_free_hole(unsigned long start, unsigned long end,
  368                            unsigned long size, unsigned alignment)
  369 {
  370         if (end - start < size)
  371                 return 0;
  372 
  373         if (alignment) {
  374                 unsigned tmp = start % alignment;
  375                 if (tmp)
  376                         start += alignment - tmp;
  377         }
  378 
  379         return end >= start + size;
  380 }
  381 
  382 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
  383                                                unsigned long size,
  384                                                unsigned alignment,
  385                                                unsigned long color,
  386                                                bool best_match)
  387 {
  388         struct drm_mm_node *entry;
  389         struct drm_mm_node *best;
  390         unsigned long best_size;
  391 
  392         BUG_ON(mm->scanned_blocks);
  393 
  394         best = NULL;
  395         best_size = ~0UL;
  396 
  397         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
  398                 unsigned long adj_start = drm_mm_hole_node_start(entry);
  399                 unsigned long adj_end = drm_mm_hole_node_end(entry);
  400 
  401                 if (mm->color_adjust) {
  402                         mm->color_adjust(entry, color, &adj_start, &adj_end);
  403                         if (adj_end <= adj_start)
  404                                 continue;
  405                 }
  406 
  407                 BUG_ON(!entry->hole_follows);
  408                 if (!check_free_hole(adj_start, adj_end, size, alignment))
  409                         continue;
  410 
  411                 if (!best_match)
  412                         return entry;
  413 
  414                 if (entry->size < best_size) {
  415                         best = entry;
  416                         best_size = entry->size;
  417                 }
  418         }
  419 
  420         return best;
  421 }
  422 EXPORT_SYMBOL(drm_mm_search_free_generic);
  423 
  424 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
  425                                                         unsigned long size,
  426                                                         unsigned alignment,
  427                                                         unsigned long color,
  428                                                         unsigned long start,
  429                                                         unsigned long end,
  430                                                         bool best_match)
  431 {
  432         struct drm_mm_node *entry;
  433         struct drm_mm_node *best;
  434         unsigned long best_size;
  435 
  436         BUG_ON(mm->scanned_blocks);
  437 
  438         best = NULL;
  439         best_size = ~0UL;
  440 
  441         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
  442                 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
  443                         start : drm_mm_hole_node_start(entry);
  444                 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
  445                         end : drm_mm_hole_node_end(entry);
  446 
  447                 BUG_ON(!entry->hole_follows);
  448 
  449                 if (mm->color_adjust) {
  450                         mm->color_adjust(entry, color, &adj_start, &adj_end);
  451                         if (adj_end <= adj_start)
  452                                 continue;
  453                 }
  454 
  455                 if (!check_free_hole(adj_start, adj_end, size, alignment))
  456                         continue;
  457 
  458                 if (!best_match)
  459                         return entry;
  460 
  461                 if (entry->size < best_size) {
  462                         best = entry;
  463                         best_size = entry->size;
  464                 }
  465         }
  466 
  467         return best;
  468 }
  469 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
  470 
  471 /**
  472  * Moves an allocation. To be used with embedded struct drm_mm_node.
  473  */
  474 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
  475 {
  476         list_replace(&old->node_list, &new->node_list);
  477         list_replace(&old->hole_stack, &new->hole_stack);
  478         new->hole_follows = old->hole_follows;
  479         new->mm = old->mm;
  480         new->start = old->start;
  481         new->size = old->size;
  482         new->color = old->color;
  483 
  484         old->allocated = 0;
  485         new->allocated = 1;
  486 }
  487 EXPORT_SYMBOL(drm_mm_replace_node);
  488 
  489 /**
  490  * Initializa lru scanning.
  491  *
  492  * This simply sets up the scanning routines with the parameters for the desired
  493  * hole.
  494  *
  495  * Warning: As long as the scan list is non-empty, no other operations than
  496  * adding/removing nodes to/from the scan list are allowed.
  497  */
  498 void drm_mm_init_scan(struct drm_mm *mm,
  499                       unsigned long size,
  500                       unsigned alignment,
  501                       unsigned long color)
  502 {
  503         mm->scan_color = color;
  504         mm->scan_alignment = alignment;
  505         mm->scan_size = size;
  506         mm->scanned_blocks = 0;
  507         mm->scan_hit_start = 0;
  508         mm->scan_hit_end = 0;
  509         mm->scan_check_range = 0;
  510         mm->prev_scanned_node = NULL;
  511 }
  512 EXPORT_SYMBOL(drm_mm_init_scan);
  513 
  514 /**
  515  * Initializa lru scanning.
  516  *
  517  * This simply sets up the scanning routines with the parameters for the desired
  518  * hole. This version is for range-restricted scans.
  519  *
  520  * Warning: As long as the scan list is non-empty, no other operations than
  521  * adding/removing nodes to/from the scan list are allowed.
  522  */
  523 void drm_mm_init_scan_with_range(struct drm_mm *mm,
  524                                  unsigned long size,
  525                                  unsigned alignment,
  526                                  unsigned long color,
  527                                  unsigned long start,
  528                                  unsigned long end)
  529 {
  530         mm->scan_color = color;
  531         mm->scan_alignment = alignment;
  532         mm->scan_size = size;
  533         mm->scanned_blocks = 0;
  534         mm->scan_hit_start = 0;
  535         mm->scan_hit_end = 0;
  536         mm->scan_start = start;
  537         mm->scan_end = end;
  538         mm->scan_check_range = 1;
  539         mm->prev_scanned_node = NULL;
  540 }
  541 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
  542 
  543 /**
  544  * Add a node to the scan list that might be freed to make space for the desired
  545  * hole.
  546  *
  547  * Returns non-zero, if a hole has been found, zero otherwise.
  548  */
  549 int drm_mm_scan_add_block(struct drm_mm_node *node)
  550 {
  551         struct drm_mm *mm = node->mm;
  552         struct drm_mm_node *prev_node;
  553         unsigned long hole_start, hole_end;
  554         unsigned long adj_start, adj_end;
  555 
  556         mm->scanned_blocks++;
  557 
  558         BUG_ON(node->scanned_block);
  559         node->scanned_block = 1;
  560 
  561         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  562                                node_list);
  563 
  564         node->scanned_preceeds_hole = prev_node->hole_follows;
  565         prev_node->hole_follows = 1;
  566         list_del(&node->node_list);
  567         node->node_list.prev = &prev_node->node_list;
  568         node->node_list.next = &mm->prev_scanned_node->node_list;
  569         mm->prev_scanned_node = node;
  570 
  571         adj_start = hole_start = drm_mm_hole_node_start(prev_node);
  572         adj_end = hole_end = drm_mm_hole_node_end(prev_node);
  573 
  574         if (mm->scan_check_range) {
  575                 if (adj_start < mm->scan_start)
  576                         adj_start = mm->scan_start;
  577                 if (adj_end > mm->scan_end)
  578                         adj_end = mm->scan_end;
  579         }
  580 
  581         if (mm->color_adjust)
  582                 mm->color_adjust(prev_node, mm->scan_color,
  583                                  &adj_start, &adj_end);
  584 
  585         if (check_free_hole(adj_start, adj_end,
  586                             mm->scan_size, mm->scan_alignment)) {
  587                 mm->scan_hit_start = hole_start;
  588                 mm->scan_hit_end = hole_end;
  589                 return 1;
  590         }
  591 
  592         return 0;
  593 }
  594 EXPORT_SYMBOL(drm_mm_scan_add_block);
  595 
  596 /**
  597  * Remove a node from the scan list.
  598  *
  599  * Nodes _must_ be removed in the exact same order from the scan list as they
  600  * have been added, otherwise the internal state of the memory manager will be
  601  * corrupted.
  602  *
  603  * When the scan list is empty, the selected memory nodes can be freed. An
  604  * immediately following drm_mm_search_free with best_match = 0 will then return
  605  * the just freed block (because its at the top of the free_stack list).
  606  *
  607  * Returns one if this block should be evicted, zero otherwise. Will always
  608  * return zero when no hole has been found.
  609  */
  610 int drm_mm_scan_remove_block(struct drm_mm_node *node)
  611 {
  612         struct drm_mm *mm = node->mm;
  613         struct drm_mm_node *prev_node;
  614 
  615         mm->scanned_blocks--;
  616 
  617         BUG_ON(!node->scanned_block);
  618         node->scanned_block = 0;
  619 
  620         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  621                                node_list);
  622 
  623         prev_node->hole_follows = node->scanned_preceeds_hole;
  624         list_add(&node->node_list, &prev_node->node_list);
  625 
  626          return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
  627                  node->start < mm->scan_hit_end);
  628 }
  629 EXPORT_SYMBOL(drm_mm_scan_remove_block);
  630 
  631 int drm_mm_clean(struct drm_mm * mm)
  632 {
  633         struct list_head *head = &mm->head_node.node_list;
  634 
  635         return (head->next->next == head);
  636 }
  637 EXPORT_SYMBOL(drm_mm_clean);
  638 
  639 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
  640 {
  641         INIT_LIST_HEAD(&mm->hole_stack);
  642         INIT_LIST_HEAD(&mm->unused_nodes);
  643         mm->num_unused = 0;
  644         mm->scanned_blocks = 0;
  645         mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
  646 
  647         /* Clever trick to avoid a special case in the free hole tracking. */
  648         INIT_LIST_HEAD(&mm->head_node.node_list);
  649         INIT_LIST_HEAD(&mm->head_node.hole_stack);
  650         mm->head_node.hole_follows = 1;
  651         mm->head_node.scanned_block = 0;
  652         mm->head_node.scanned_prev_free = 0;
  653         mm->head_node.scanned_next_free = 0;
  654         mm->head_node.mm = mm;
  655         mm->head_node.start = start + size;
  656         mm->head_node.size = start - mm->head_node.start;
  657         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
  658 
  659         mm->color_adjust = NULL;
  660 
  661         return 0;
  662 }
  663 EXPORT_SYMBOL(drm_mm_init);
  664 
  665 void drm_mm_takedown(struct drm_mm * mm)
  666 {
  667         struct drm_mm_node *entry, *next;
  668 
  669         if (!list_empty(&mm->head_node.node_list)) {
  670                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
  671                 return;
  672         }
  673 
  674         mtx_lock(&mm->unused_lock);
  675         list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
  676                 list_del(&entry->node_list);
  677                 free(entry, DRM_MEM_MM);
  678                 --mm->num_unused;
  679         }
  680         mtx_unlock(&mm->unused_lock);
  681 
  682         BUG_ON(mm->num_unused != 0);
  683 }
  684 EXPORT_SYMBOL(drm_mm_takedown);
  685 
  686 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
  687 {
  688         struct drm_mm_node *entry;
  689         unsigned long total_used = 0, total_free = 0, total = 0;
  690         unsigned long hole_start, hole_end, hole_size;
  691 
  692         hole_start = drm_mm_hole_node_start(&mm->head_node);
  693         hole_end = drm_mm_hole_node_end(&mm->head_node);
  694         hole_size = hole_end - hole_start;
  695         if (hole_size)
  696                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
  697                         prefix, hole_start, hole_end,
  698                         hole_size);
  699         total_free += hole_size;
  700 
  701         drm_mm_for_each_node(entry, mm) {
  702                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
  703                         prefix, entry->start, entry->start + entry->size,
  704                         entry->size);
  705                 total_used += entry->size;
  706 
  707                 if (entry->hole_follows) {
  708                         hole_start = drm_mm_hole_node_start(entry);
  709                         hole_end = drm_mm_hole_node_end(entry);
  710                         hole_size = hole_end - hole_start;
  711                         printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
  712                                 prefix, hole_start, hole_end,
  713                                 hole_size);
  714                         total_free += hole_size;
  715                 }
  716         }
  717         total = total_free + total_used;
  718 
  719         printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
  720                 total_used, total_free);
  721 }
  722 EXPORT_SYMBOL(drm_mm_debug_table);

Cache object: 4720091b253c4ae958e1e411a5608c90


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.