The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/lib/btree.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * lib/btree.c  - Simple In-memory B+Tree
    3  *
    4  * As should be obvious for Linux kernel code, license is GPLv2
    5  *
    6  * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
    7  * Bits and pieces stolen from Peter Zijlstra's code, which is
    8  * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
    9  * GPLv2
   10  *
   11  * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
   12  *
   13  * A relatively simple B+Tree implementation.  I have written it as a learning
   14  * exercise to understand how B+Trees work.  Turned out to be useful as well.
   15  *
   16  * B+Trees can be used similar to Linux radix trees (which don't have anything
   17  * in common with textbook radix trees, beware).  Prerequisite for them working
   18  * well is that access to a random tree node is much faster than a large number
   19  * of operations within each node.
   20  *
   21  * Disks have fulfilled the prerequisite for a long time.  More recently DRAM
   22  * has gained similar properties, as memory access times, when measured in cpu
   23  * cycles, have increased.  Cacheline sizes have increased as well, which also
   24  * helps B+Trees.
   25  *
   26  * Compared to radix trees, B+Trees are more efficient when dealing with a
   27  * sparsely populated address space.  Between 25% and 50% of the memory is
   28  * occupied with valid pointers.  When densely populated, radix trees contain
   29  * ~98% pointers - hard to beat.  Very sparse radix trees contain only ~2%
   30  * pointers.
   31  *
   32  * This particular implementation stores pointers identified by a long value.
   33  * Storing NULL pointers is illegal, lookup will return NULL when no entry
   34  * was found.
   35  *
   36  * A tricks was used that is not commonly found in textbooks.  The lowest
   37  * values are to the right, not to the left.  All used slots within a node
   38  * are on the left, all unused slots contain NUL values.  Most operations
   39  * simply loop once over all slots and terminate on the first NUL.
   40  */
   41 
   42 #include <linux/btree.h>
   43 #include <linux/cache.h>
   44 #include <linux/kernel.h>
   45 #include <linux/slab.h>
   46 #include <linux/module.h>
   47 
   48 #define MAX(a, b) ((a) > (b) ? (a) : (b))
   49 #define NODESIZE MAX(L1_CACHE_BYTES, 128)
   50 
   51 struct btree_geo {
   52         int keylen;
   53         int no_pairs;
   54         int no_longs;
   55 };
   56 
   57 struct btree_geo btree_geo32 = {
   58         .keylen = 1,
   59         .no_pairs = NODESIZE / sizeof(long) / 2,
   60         .no_longs = NODESIZE / sizeof(long) / 2,
   61 };
   62 EXPORT_SYMBOL_GPL(btree_geo32);
   63 
   64 #define LONG_PER_U64 (64 / BITS_PER_LONG)
   65 struct btree_geo btree_geo64 = {
   66         .keylen = LONG_PER_U64,
   67         .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
   68         .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
   69 };
   70 EXPORT_SYMBOL_GPL(btree_geo64);
   71 
   72 struct btree_geo btree_geo128 = {
   73         .keylen = 2 * LONG_PER_U64,
   74         .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
   75         .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
   76 };
   77 EXPORT_SYMBOL_GPL(btree_geo128);
   78 
   79 static struct kmem_cache *btree_cachep;
   80 
   81 void *btree_alloc(gfp_t gfp_mask, void *pool_data)
   82 {
   83         return kmem_cache_alloc(btree_cachep, gfp_mask);
   84 }
   85 EXPORT_SYMBOL_GPL(btree_alloc);
   86 
   87 void btree_free(void *element, void *pool_data)
   88 {
   89         kmem_cache_free(btree_cachep, element);
   90 }
   91 EXPORT_SYMBOL_GPL(btree_free);
   92 
   93 static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
   94 {
   95         unsigned long *node;
   96 
   97         node = mempool_alloc(head->mempool, gfp);
   98         if (likely(node))
   99                 memset(node, 0, NODESIZE);
  100         return node;
  101 }
  102 
  103 static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
  104 {
  105         size_t i;
  106 
  107         for (i = 0; i < n; i++) {
  108                 if (l1[i] < l2[i])
  109                         return -1;
  110                 if (l1[i] > l2[i])
  111                         return 1;
  112         }
  113         return 0;
  114 }
  115 
  116 static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
  117                 size_t n)
  118 {
  119         size_t i;
  120 
  121         for (i = 0; i < n; i++)
  122                 dest[i] = src[i];
  123         return dest;
  124 }
  125 
  126 static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
  127 {
  128         size_t i;
  129 
  130         for (i = 0; i < n; i++)
  131                 s[i] = c;
  132         return s;
  133 }
  134 
  135 static void dec_key(struct btree_geo *geo, unsigned long *key)
  136 {
  137         unsigned long val;
  138         int i;
  139 
  140         for (i = geo->keylen - 1; i >= 0; i--) {
  141                 val = key[i];
  142                 key[i] = val - 1;
  143                 if (val)
  144                         break;
  145         }
  146 }
  147 
  148 static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
  149 {
  150         return &node[n * geo->keylen];
  151 }
  152 
  153 static void *bval(struct btree_geo *geo, unsigned long *node, int n)
  154 {
  155         return (void *)node[geo->no_longs + n];
  156 }
  157 
  158 static void setkey(struct btree_geo *geo, unsigned long *node, int n,
  159                    unsigned long *key)
  160 {
  161         longcpy(bkey(geo, node, n), key, geo->keylen);
  162 }
  163 
  164 static void setval(struct btree_geo *geo, unsigned long *node, int n,
  165                    void *val)
  166 {
  167         node[geo->no_longs + n] = (unsigned long) val;
  168 }
  169 
  170 static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
  171 {
  172         longset(bkey(geo, node, n), 0, geo->keylen);
  173         node[geo->no_longs + n] = 0;
  174 }
  175 
  176 static inline void __btree_init(struct btree_head *head)
  177 {
  178         head->node = NULL;
  179         head->height = 0;
  180 }
  181 
  182 void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
  183 {
  184         __btree_init(head);
  185         head->mempool = mempool;
  186 }
  187 EXPORT_SYMBOL_GPL(btree_init_mempool);
  188 
  189 int btree_init(struct btree_head *head)
  190 {
  191         __btree_init(head);
  192         head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
  193         if (!head->mempool)
  194                 return -ENOMEM;
  195         return 0;
  196 }
  197 EXPORT_SYMBOL_GPL(btree_init);
  198 
  199 void btree_destroy(struct btree_head *head)
  200 {
  201         mempool_destroy(head->mempool);
  202         head->mempool = NULL;
  203 }
  204 EXPORT_SYMBOL_GPL(btree_destroy);
  205 
  206 void *btree_last(struct btree_head *head, struct btree_geo *geo,
  207                  unsigned long *key)
  208 {
  209         int height = head->height;
  210         unsigned long *node = head->node;
  211 
  212         if (height == 0)
  213                 return NULL;
  214 
  215         for ( ; height > 1; height--)
  216                 node = bval(geo, node, 0);
  217 
  218         longcpy(key, bkey(geo, node, 0), geo->keylen);
  219         return bval(geo, node, 0);
  220 }
  221 EXPORT_SYMBOL_GPL(btree_last);
  222 
  223 static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
  224                   unsigned long *key)
  225 {
  226         return longcmp(bkey(geo, node, pos), key, geo->keylen);
  227 }
  228 
  229 static int keyzero(struct btree_geo *geo, unsigned long *key)
  230 {
  231         int i;
  232 
  233         for (i = 0; i < geo->keylen; i++)
  234                 if (key[i])
  235                         return 0;
  236 
  237         return 1;
  238 }
  239 
  240 void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
  241                 unsigned long *key)
  242 {
  243         int i, height = head->height;
  244         unsigned long *node = head->node;
  245 
  246         if (height == 0)
  247                 return NULL;
  248 
  249         for ( ; height > 1; height--) {
  250                 for (i = 0; i < geo->no_pairs; i++)
  251                         if (keycmp(geo, node, i, key) <= 0)
  252                                 break;
  253                 if (i == geo->no_pairs)
  254                         return NULL;
  255                 node = bval(geo, node, i);
  256                 if (!node)
  257                         return NULL;
  258         }
  259 
  260         if (!node)
  261                 return NULL;
  262 
  263         for (i = 0; i < geo->no_pairs; i++)
  264                 if (keycmp(geo, node, i, key) == 0)
  265                         return bval(geo, node, i);
  266         return NULL;
  267 }
  268 EXPORT_SYMBOL_GPL(btree_lookup);
  269 
  270 int btree_update(struct btree_head *head, struct btree_geo *geo,
  271                  unsigned long *key, void *val)
  272 {
  273         int i, height = head->height;
  274         unsigned long *node = head->node;
  275 
  276         if (height == 0)
  277                 return -ENOENT;
  278 
  279         for ( ; height > 1; height--) {
  280                 for (i = 0; i < geo->no_pairs; i++)
  281                         if (keycmp(geo, node, i, key) <= 0)
  282                                 break;
  283                 if (i == geo->no_pairs)
  284                         return -ENOENT;
  285                 node = bval(geo, node, i);
  286                 if (!node)
  287                         return -ENOENT;
  288         }
  289 
  290         if (!node)
  291                 return -ENOENT;
  292 
  293         for (i = 0; i < geo->no_pairs; i++)
  294                 if (keycmp(geo, node, i, key) == 0) {
  295                         setval(geo, node, i, val);
  296                         return 0;
  297                 }
  298         return -ENOENT;
  299 }
  300 EXPORT_SYMBOL_GPL(btree_update);
  301 
  302 /*
  303  * Usually this function is quite similar to normal lookup.  But the key of
  304  * a parent node may be smaller than the smallest key of all its siblings.
  305  * In such a case we cannot just return NULL, as we have only proven that no
  306  * key smaller than __key, but larger than this parent key exists.
  307  * So we set __key to the parent key and retry.  We have to use the smallest
  308  * such parent key, which is the last parent key we encountered.
  309  */
  310 void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
  311                      unsigned long *__key)
  312 {
  313         int i, height;
  314         unsigned long *node, *oldnode;
  315         unsigned long *retry_key = NULL, key[geo->keylen];
  316 
  317         if (keyzero(geo, __key))
  318                 return NULL;
  319 
  320         if (head->height == 0)
  321                 return NULL;
  322         longcpy(key, __key, geo->keylen);
  323 retry:
  324         dec_key(geo, key);
  325 
  326         node = head->node;
  327         for (height = head->height ; height > 1; height--) {
  328                 for (i = 0; i < geo->no_pairs; i++)
  329                         if (keycmp(geo, node, i, key) <= 0)
  330                                 break;
  331                 if (i == geo->no_pairs)
  332                         goto miss;
  333                 oldnode = node;
  334                 node = bval(geo, node, i);
  335                 if (!node)
  336                         goto miss;
  337                 retry_key = bkey(geo, oldnode, i);
  338         }
  339 
  340         if (!node)
  341                 goto miss;
  342 
  343         for (i = 0; i < geo->no_pairs; i++) {
  344                 if (keycmp(geo, node, i, key) <= 0) {
  345                         if (bval(geo, node, i)) {
  346                                 longcpy(__key, bkey(geo, node, i), geo->keylen);
  347                                 return bval(geo, node, i);
  348                         } else
  349                                 goto miss;
  350                 }
  351         }
  352 miss:
  353         if (retry_key) {
  354                 longcpy(key, retry_key, geo->keylen);
  355                 retry_key = NULL;
  356                 goto retry;
  357         }
  358         return NULL;
  359 }
  360 EXPORT_SYMBOL_GPL(btree_get_prev);
  361 
  362 static int getpos(struct btree_geo *geo, unsigned long *node,
  363                 unsigned long *key)
  364 {
  365         int i;
  366 
  367         for (i = 0; i < geo->no_pairs; i++) {
  368                 if (keycmp(geo, node, i, key) <= 0)
  369                         break;
  370         }
  371         return i;
  372 }
  373 
  374 static int getfill(struct btree_geo *geo, unsigned long *node, int start)
  375 {
  376         int i;
  377 
  378         for (i = start; i < geo->no_pairs; i++)
  379                 if (!bval(geo, node, i))
  380                         break;
  381         return i;
  382 }
  383 
  384 /*
  385  * locate the correct leaf node in the btree
  386  */
  387 static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
  388                 unsigned long *key, int level)
  389 {
  390         unsigned long *node = head->node;
  391         int i, height;
  392 
  393         for (height = head->height; height > level; height--) {
  394                 for (i = 0; i < geo->no_pairs; i++)
  395                         if (keycmp(geo, node, i, key) <= 0)
  396                                 break;
  397 
  398                 if ((i == geo->no_pairs) || !bval(geo, node, i)) {
  399                         /* right-most key is too large, update it */
  400                         /* FIXME: If the right-most key on higher levels is
  401                          * always zero, this wouldn't be necessary. */
  402                         i--;
  403                         setkey(geo, node, i, key);
  404                 }
  405                 BUG_ON(i < 0);
  406                 node = bval(geo, node, i);
  407         }
  408         BUG_ON(!node);
  409         return node;
  410 }
  411 
  412 static int btree_grow(struct btree_head *head, struct btree_geo *geo,
  413                       gfp_t gfp)
  414 {
  415         unsigned long *node;
  416         int fill;
  417 
  418         node = btree_node_alloc(head, gfp);
  419         if (!node)
  420                 return -ENOMEM;
  421         if (head->node) {
  422                 fill = getfill(geo, head->node, 0);
  423                 setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
  424                 setval(geo, node, 0, head->node);
  425         }
  426         head->node = node;
  427         head->height++;
  428         return 0;
  429 }
  430 
  431 static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
  432 {
  433         unsigned long *node;
  434         int fill;
  435 
  436         if (head->height <= 1)
  437                 return;
  438 
  439         node = head->node;
  440         fill = getfill(geo, node, 0);
  441         BUG_ON(fill > 1);
  442         head->node = bval(geo, node, 0);
  443         head->height--;
  444         mempool_free(node, head->mempool);
  445 }
  446 
  447 static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
  448                               unsigned long *key, void *val, int level,
  449                               gfp_t gfp)
  450 {
  451         unsigned long *node;
  452         int i, pos, fill, err;
  453 
  454         BUG_ON(!val);
  455         if (head->height < level) {
  456                 err = btree_grow(head, geo, gfp);
  457                 if (err)
  458                         return err;
  459         }
  460 
  461 retry:
  462         node = find_level(head, geo, key, level);
  463         pos = getpos(geo, node, key);
  464         fill = getfill(geo, node, pos);
  465         /* two identical keys are not allowed */
  466         BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
  467 
  468         if (fill == geo->no_pairs) {
  469                 /* need to split node */
  470                 unsigned long *new;
  471 
  472                 new = btree_node_alloc(head, gfp);
  473                 if (!new)
  474                         return -ENOMEM;
  475                 err = btree_insert_level(head, geo,
  476                                 bkey(geo, node, fill / 2 - 1),
  477                                 new, level + 1, gfp);
  478                 if (err) {
  479                         mempool_free(new, head->mempool);
  480                         return err;
  481                 }
  482                 for (i = 0; i < fill / 2; i++) {
  483                         setkey(geo, new, i, bkey(geo, node, i));
  484                         setval(geo, new, i, bval(geo, node, i));
  485                         setkey(geo, node, i, bkey(geo, node, i + fill / 2));
  486                         setval(geo, node, i, bval(geo, node, i + fill / 2));
  487                         clearpair(geo, node, i + fill / 2);
  488                 }
  489                 if (fill & 1) {
  490                         setkey(geo, node, i, bkey(geo, node, fill - 1));
  491                         setval(geo, node, i, bval(geo, node, fill - 1));
  492                         clearpair(geo, node, fill - 1);
  493                 }
  494                 goto retry;
  495         }
  496         BUG_ON(fill >= geo->no_pairs);
  497 
  498         /* shift and insert */
  499         for (i = fill; i > pos; i--) {
  500                 setkey(geo, node, i, bkey(geo, node, i - 1));
  501                 setval(geo, node, i, bval(geo, node, i - 1));
  502         }
  503         setkey(geo, node, pos, key);
  504         setval(geo, node, pos, val);
  505 
  506         return 0;
  507 }
  508 
  509 int btree_insert(struct btree_head *head, struct btree_geo *geo,
  510                 unsigned long *key, void *val, gfp_t gfp)
  511 {
  512         BUG_ON(!val);
  513         return btree_insert_level(head, geo, key, val, 1, gfp);
  514 }
  515 EXPORT_SYMBOL_GPL(btree_insert);
  516 
  517 static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
  518                 unsigned long *key, int level);
  519 static void merge(struct btree_head *head, struct btree_geo *geo, int level,
  520                 unsigned long *left, int lfill,
  521                 unsigned long *right, int rfill,
  522                 unsigned long *parent, int lpos)
  523 {
  524         int i;
  525 
  526         for (i = 0; i < rfill; i++) {
  527                 /* Move all keys to the left */
  528                 setkey(geo, left, lfill + i, bkey(geo, right, i));
  529                 setval(geo, left, lfill + i, bval(geo, right, i));
  530         }
  531         /* Exchange left and right child in parent */
  532         setval(geo, parent, lpos, right);
  533         setval(geo, parent, lpos + 1, left);
  534         /* Remove left (formerly right) child from parent */
  535         btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
  536         mempool_free(right, head->mempool);
  537 }
  538 
  539 static void rebalance(struct btree_head *head, struct btree_geo *geo,
  540                 unsigned long *key, int level, unsigned long *child, int fill)
  541 {
  542         unsigned long *parent, *left = NULL, *right = NULL;
  543         int i, no_left, no_right;
  544 
  545         if (fill == 0) {
  546                 /* Because we don't steal entries from a neighbour, this case
  547                  * can happen.  Parent node contains a single child, this
  548                  * node, so merging with a sibling never happens.
  549                  */
  550                 btree_remove_level(head, geo, key, level + 1);
  551                 mempool_free(child, head->mempool);
  552                 return;
  553         }
  554 
  555         parent = find_level(head, geo, key, level + 1);
  556         i = getpos(geo, parent, key);
  557         BUG_ON(bval(geo, parent, i) != child);
  558 
  559         if (i > 0) {
  560                 left = bval(geo, parent, i - 1);
  561                 no_left = getfill(geo, left, 0);
  562                 if (fill + no_left <= geo->no_pairs) {
  563                         merge(head, geo, level,
  564                                         left, no_left,
  565                                         child, fill,
  566                                         parent, i - 1);
  567                         return;
  568                 }
  569         }
  570         if (i + 1 < getfill(geo, parent, i)) {
  571                 right = bval(geo, parent, i + 1);
  572                 no_right = getfill(geo, right, 0);
  573                 if (fill + no_right <= geo->no_pairs) {
  574                         merge(head, geo, level,
  575                                         child, fill,
  576                                         right, no_right,
  577                                         parent, i);
  578                         return;
  579                 }
  580         }
  581         /*
  582          * We could also try to steal one entry from the left or right
  583          * neighbor.  By not doing so we changed the invariant from
  584          * "all nodes are at least half full" to "no two neighboring
  585          * nodes can be merged".  Which means that the average fill of
  586          * all nodes is still half or better.
  587          */
  588 }
  589 
  590 static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
  591                 unsigned long *key, int level)
  592 {
  593         unsigned long *node;
  594         int i, pos, fill;
  595         void *ret;
  596 
  597         if (level > head->height) {
  598                 /* we recursed all the way up */
  599                 head->height = 0;
  600                 head->node = NULL;
  601                 return NULL;
  602         }
  603 
  604         node = find_level(head, geo, key, level);
  605         pos = getpos(geo, node, key);
  606         fill = getfill(geo, node, pos);
  607         if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
  608                 return NULL;
  609         ret = bval(geo, node, pos);
  610 
  611         /* remove and shift */
  612         for (i = pos; i < fill - 1; i++) {
  613                 setkey(geo, node, i, bkey(geo, node, i + 1));
  614                 setval(geo, node, i, bval(geo, node, i + 1));
  615         }
  616         clearpair(geo, node, fill - 1);
  617 
  618         if (fill - 1 < geo->no_pairs / 2) {
  619                 if (level < head->height)
  620                         rebalance(head, geo, key, level, node, fill - 1);
  621                 else if (fill - 1 == 1)
  622                         btree_shrink(head, geo);
  623         }
  624 
  625         return ret;
  626 }
  627 
  628 void *btree_remove(struct btree_head *head, struct btree_geo *geo,
  629                 unsigned long *key)
  630 {
  631         if (head->height == 0)
  632                 return NULL;
  633 
  634         return btree_remove_level(head, geo, key, 1);
  635 }
  636 EXPORT_SYMBOL_GPL(btree_remove);
  637 
  638 int btree_merge(struct btree_head *target, struct btree_head *victim,
  639                 struct btree_geo *geo, gfp_t gfp)
  640 {
  641         unsigned long key[geo->keylen];
  642         unsigned long dup[geo->keylen];
  643         void *val;
  644         int err;
  645 
  646         BUG_ON(target == victim);
  647 
  648         if (!(target->node)) {
  649                 /* target is empty, just copy fields over */
  650                 target->node = victim->node;
  651                 target->height = victim->height;
  652                 __btree_init(victim);
  653                 return 0;
  654         }
  655 
  656         /* TODO: This needs some optimizations.  Currently we do three tree
  657          * walks to remove a single object from the victim.
  658          */
  659         for (;;) {
  660                 if (!btree_last(victim, geo, key))
  661                         break;
  662                 val = btree_lookup(victim, geo, key);
  663                 err = btree_insert(target, geo, key, val, gfp);
  664                 if (err)
  665                         return err;
  666                 /* We must make a copy of the key, as the original will get
  667                  * mangled inside btree_remove. */
  668                 longcpy(dup, key, geo->keylen);
  669                 btree_remove(victim, geo, dup);
  670         }
  671         return 0;
  672 }
  673 EXPORT_SYMBOL_GPL(btree_merge);
  674 
  675 static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
  676                                unsigned long *node, unsigned long opaque,
  677                                void (*func)(void *elem, unsigned long opaque,
  678                                             unsigned long *key, size_t index,
  679                                             void *func2),
  680                                void *func2, int reap, int height, size_t count)
  681 {
  682         int i;
  683         unsigned long *child;
  684 
  685         for (i = 0; i < geo->no_pairs; i++) {
  686                 child = bval(geo, node, i);
  687                 if (!child)
  688                         break;
  689                 if (height > 1)
  690                         count = __btree_for_each(head, geo, child, opaque,
  691                                         func, func2, reap, height - 1, count);
  692                 else
  693                         func(child, opaque, bkey(geo, node, i), count++,
  694                                         func2);
  695         }
  696         if (reap)
  697                 mempool_free(node, head->mempool);
  698         return count;
  699 }
  700 
  701 static void empty(void *elem, unsigned long opaque, unsigned long *key,
  702                   size_t index, void *func2)
  703 {
  704 }
  705 
  706 void visitorl(void *elem, unsigned long opaque, unsigned long *key,
  707               size_t index, void *__func)
  708 {
  709         visitorl_t func = __func;
  710 
  711         func(elem, opaque, *key, index);
  712 }
  713 EXPORT_SYMBOL_GPL(visitorl);
  714 
  715 void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
  716                size_t index, void *__func)
  717 {
  718         visitor32_t func = __func;
  719         u32 *key = (void *)__key;
  720 
  721         func(elem, opaque, *key, index);
  722 }
  723 EXPORT_SYMBOL_GPL(visitor32);
  724 
  725 void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
  726                size_t index, void *__func)
  727 {
  728         visitor64_t func = __func;
  729         u64 *key = (void *)__key;
  730 
  731         func(elem, opaque, *key, index);
  732 }
  733 EXPORT_SYMBOL_GPL(visitor64);
  734 
  735 void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
  736                 size_t index, void *__func)
  737 {
  738         visitor128_t func = __func;
  739         u64 *key = (void *)__key;
  740 
  741         func(elem, opaque, key[0], key[1], index);
  742 }
  743 EXPORT_SYMBOL_GPL(visitor128);
  744 
  745 size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
  746                      unsigned long opaque,
  747                      void (*func)(void *elem, unsigned long opaque,
  748                                   unsigned long *key,
  749                                   size_t index, void *func2),
  750                      void *func2)
  751 {
  752         size_t count = 0;
  753 
  754         if (!func2)
  755                 func = empty;
  756         if (head->node)
  757                 count = __btree_for_each(head, geo, head->node, opaque, func,
  758                                 func2, 0, head->height, 0);
  759         return count;
  760 }
  761 EXPORT_SYMBOL_GPL(btree_visitor);
  762 
  763 size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
  764                           unsigned long opaque,
  765                           void (*func)(void *elem, unsigned long opaque,
  766                                        unsigned long *key,
  767                                        size_t index, void *func2),
  768                           void *func2)
  769 {
  770         size_t count = 0;
  771 
  772         if (!func2)
  773                 func = empty;
  774         if (head->node)
  775                 count = __btree_for_each(head, geo, head->node, opaque, func,
  776                                 func2, 1, head->height, 0);
  777         __btree_init(head);
  778         return count;
  779 }
  780 EXPORT_SYMBOL_GPL(btree_grim_visitor);
  781 
  782 static int __init btree_module_init(void)
  783 {
  784         btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
  785                         SLAB_HWCACHE_ALIGN, NULL);
  786         return 0;
  787 }
  788 
  789 static void __exit btree_module_exit(void)
  790 {
  791         kmem_cache_destroy(btree_cachep);
  792 }
  793 
  794 /* If core code starts using btree, initialization should happen even earlier */
  795 module_init(btree_module_init);
  796 module_exit(btree_module_exit);
  797 
  798 MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
  799 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
  800 MODULE_LICENSE("GPL");

Cache object: e376f7014c95bad4ea65c7ea566a93a5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.