The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/libkern/jenkins_hash.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Taken from http://burtleburtle.net/bob/c/lookup3.c
    3  * $FreeBSD: releng/10.1/sys/libkern/jenkins_hash.c 240521 2012-09-14 22:00:03Z eadler $
    4  */
    5 
    6 #include <sys/hash.h>
    7 #include <machine/endian.h>
    8 
    9 /*
   10 -------------------------------------------------------------------------------
   11 lookup3.c, by Bob Jenkins, May 2006, Public Domain.
   12 
   13 These are functions for producing 32-bit hashes for hash table lookup.
   14 hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() 
   15 are externally useful functions.  Routines to test the hash are included 
   16 if SELF_TEST is defined.  You can use this free for any purpose.  It's in
   17 the public domain.  It has no warranty.
   18 
   19 You probably want to use hashlittle().  hashlittle() and hashbig()
   20 hash byte arrays.  hashlittle() is faster than hashbig() on
   21 little-endian machines.  Intel and AMD are little-endian machines.
   22 On second thought, you probably want hashlittle2(), which is identical to
   23 hashlittle() except it returns two 32-bit hashes for the price of one.  
   24 You could implement hashbig2() if you wanted but I haven't bothered here.
   25 
   26 If you want to find a hash of, say, exactly 7 integers, do
   27   a = i1;  b = i2;  c = i3;
   28   mix(a,b,c);
   29   a += i4; b += i5; c += i6;
   30   mix(a,b,c);
   31   a += i7;
   32   final(a,b,c);
   33 then use c as the hash value.  If you have a variable length array of
   34 4-byte integers to hash, use hashword().  If you have a byte array (like
   35 a character string), use hashlittle().  If you have several byte arrays, or
   36 a mix of things, see the comments above hashlittle().  
   37 
   38 Why is this so big?  I read 12 bytes at a time into 3 4-byte integers, 
   39 then mix those integers.  This is fast (you can do a lot more thorough
   40 mixing with 12*3 instructions on 3 integers than you can with 3 instructions
   41 on 1 byte), but shoehorning those bytes into integers efficiently is messy.
   42 -------------------------------------------------------------------------------
   43 */
   44 
   45 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
   46 
   47 /*
   48 -------------------------------------------------------------------------------
   49 mix -- mix 3 32-bit values reversibly.
   50 
   51 This is reversible, so any information in (a,b,c) before mix() is
   52 still in (a,b,c) after mix().
   53 
   54 If four pairs of (a,b,c) inputs are run through mix(), or through
   55 mix() in reverse, there are at least 32 bits of the output that
   56 are sometimes the same for one pair and different for another pair.
   57 This was tested for:
   58 * pairs that differed by one bit, by two bits, in any combination
   59   of top bits of (a,b,c), or in any combination of bottom bits of
   60   (a,b,c).
   61 * "differ" is defined as +, -, ^, or ~^.  For + and -, I transformed
   62   the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
   63   is commonly produced by subtraction) look like a single 1-bit
   64   difference.
   65 * the base values were pseudorandom, all zero but one bit set, or 
   66   all zero plus a counter that starts at zero.
   67 
   68 Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
   69 satisfy this are
   70     4  6  8 16 19  4
   71     9 15  3 18 27 15
   72    14  9  3  7 17  3
   73 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
   74 for "differ" defined as + with a one-bit base and a two-bit delta.  I
   75 used http://burtleburtle.net/bob/hash/avalanche.html to choose 
   76 the operations, constants, and arrangements of the variables.
   77 
   78 This does not achieve avalanche.  There are input bits of (a,b,c)
   79 that fail to affect some output bits of (a,b,c), especially of a.  The
   80 most thoroughly mixed value is c, but it doesn't really even achieve
   81 avalanche in c.
   82 
   83 This allows some parallelism.  Read-after-writes are good at doubling
   84 the number of bits affected, so the goal of mixing pulls in the opposite
   85 direction as the goal of parallelism.  I did what I could.  Rotates
   86 seem to cost as much as shifts on every machine I could lay my hands
   87 on, and rotates are much kinder to the top and bottom bits, so I used
   88 rotates.
   89 -------------------------------------------------------------------------------
   90 */
   91 #define mix(a,b,c) \
   92 { \
   93   a -= c;  a ^= rot(c, 4);  c += b; \
   94   b -= a;  b ^= rot(a, 6);  a += c; \
   95   c -= b;  c ^= rot(b, 8);  b += a; \
   96   a -= c;  a ^= rot(c,16);  c += b; \
   97   b -= a;  b ^= rot(a,19);  a += c; \
   98   c -= b;  c ^= rot(b, 4);  b += a; \
   99 }
  100 
  101 /*
  102 -------------------------------------------------------------------------------
  103 final -- final mixing of 3 32-bit values (a,b,c) into c
  104 
  105 Pairs of (a,b,c) values differing in only a few bits will usually
  106 produce values of c that look totally different.  This was tested for
  107 * pairs that differed by one bit, by two bits, in any combination
  108   of top bits of (a,b,c), or in any combination of bottom bits of
  109   (a,b,c).
  110 * "differ" is defined as +, -, ^, or ~^.  For + and -, I transformed
  111   the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
  112   is commonly produced by subtraction) look like a single 1-bit
  113   difference.
  114 * the base values were pseudorandom, all zero but one bit set, or 
  115   all zero plus a counter that starts at zero.
  116 
  117 These constants passed:
  118  14 11 25 16 4 14 24
  119  12 14 25 16 4 14 24
  120 and these came close:
  121   4  8 15 26 3 22 24
  122  10  8 15 26 3 22 24
  123  11  8 15 26 3 22 24
  124 -------------------------------------------------------------------------------
  125 */
  126 #define final(a,b,c) \
  127 { \
  128   c ^= b; c -= rot(b,14); \
  129   a ^= c; a -= rot(c,11); \
  130   b ^= a; b -= rot(a,25); \
  131   c ^= b; c -= rot(b,16); \
  132   a ^= c; a -= rot(c,4);  \
  133   b ^= a; b -= rot(a,14); \
  134   c ^= b; c -= rot(b,24); \
  135 }
  136 
  137 /*
  138 --------------------------------------------------------------------
  139  This works on all machines.  To be useful, it requires
  140  -- that the key be an array of uint32_t's, and
  141  -- that the length be the number of uint32_t's in the key
  142 
  143  The function hashword() is identical to hashlittle() on little-endian
  144  machines, and identical to hashbig() on big-endian machines,
  145  except that the length has to be measured in uint32_ts rather than in
  146  bytes.  hashlittle() is more complicated than hashword() only because
  147  hashlittle() has to dance around fitting the key bytes into registers.
  148 --------------------------------------------------------------------
  149 */
  150 uint32_t jenkins_hash32(
  151 const uint32_t *k,                   /* the key, an array of uint32_t values */
  152 size_t          length,               /* the length of the key, in uint32_ts */
  153 uint32_t        initval)         /* the previous hash, or an arbitrary value */
  154 {
  155   uint32_t a,b,c;
  156 
  157   /* Set up the internal state */
  158   a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval;
  159 
  160   /*------------------------------------------------- handle most of the key */
  161   while (length > 3)
  162   {
  163     a += k[0];
  164     b += k[1];
  165     c += k[2];
  166     mix(a,b,c);
  167     length -= 3;
  168     k += 3;
  169   }
  170 
  171   /*------------------------------------------- handle the last 3 uint32_t's */
  172   switch(length)                     /* all the case statements fall through */
  173   { 
  174   case 3 : c+=k[2];
  175   case 2 : b+=k[1];
  176   case 1 : a+=k[0];
  177     final(a,b,c);
  178   case 0:     /* case 0: nothing left to add */
  179     break;
  180   }
  181   /*------------------------------------------------------ report the result */
  182   return c;
  183 }
  184 
  185 #if BYTE_ORDER == LITTLE_ENDIAN
  186 /*
  187 -------------------------------------------------------------------------------
  188 hashlittle() -- hash a variable-length key into a 32-bit value
  189   k       : the key (the unaligned variable-length array of bytes)
  190   length  : the length of the key, counting by bytes
  191   initval : can be any 4-byte value
  192 Returns a 32-bit value.  Every bit of the key affects every bit of
  193 the return value.  Two keys differing by one or two bits will have
  194 totally different hash values.
  195 
  196 The best hash table sizes are powers of 2.  There is no need to do
  197 mod a prime (mod is sooo slow!).  If you need less than 32 bits,
  198 use a bitmask.  For example, if you need only 10 bits, do
  199   h = (h & hashmask(10));
  200 In which case, the hash table should have hashsize(10) elements.
  201 
  202 If you are hashing n strings (uint8_t **)k, do it like this:
  203   for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
  204 
  205 By Bob Jenkins, 2006.  bob_jenkins@burtleburtle.net.  You may use this
  206 code any way you wish, private, educational, or commercial.  It's free.
  207 
  208 Use for hash table lookup, or anything where one collision in 2^^32 is
  209 acceptable.  Do NOT use for cryptographic purposes.
  210 -------------------------------------------------------------------------------
  211 */
  212 
  213 uint32_t jenkins_hash( const void *key, size_t length, uint32_t initval)
  214 {
  215   uint32_t a,b,c;                                          /* internal state */
  216   union { const void *ptr; size_t i; } u;     /* needed for Mac Powerbook G4 */
  217 
  218   /* Set up the internal state */
  219   a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
  220 
  221   u.ptr = key;
  222   if ((u.i & 0x3) == 0) {
  223     const uint32_t *k = (const uint32_t *)key;         /* read 32-bit chunks */
  224 
  225     /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
  226     while (length > 12)
  227     {
  228       a += k[0];
  229       b += k[1];
  230       c += k[2];
  231       mix(a,b,c);
  232       length -= 12;
  233       k += 3;
  234     }
  235 
  236     /*----------------------------- handle the last (probably partial) block */
  237     /* 
  238      * "k[2]&0xffffff" actually reads beyond the end of the string, but
  239      * then masks off the part it's not allowed to read.  Because the
  240      * string is aligned, the masked-off tail is in the same word as the
  241      * rest of the string.  Every machine with memory protection I've seen
  242      * does it on word boundaries, so is OK with this.  But VALGRIND will
  243      * still catch it and complain.  The masking trick does make the hash
  244      * noticably faster for short strings (like English words).
  245      */
  246 
  247     switch(length)
  248     {
  249     case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
  250     case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
  251     case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
  252     case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
  253     case 8 : b+=k[1]; a+=k[0]; break;
  254     case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
  255     case 6 : b+=k[1]&0xffff; a+=k[0]; break;
  256     case 5 : b+=k[1]&0xff; a+=k[0]; break;
  257     case 4 : a+=k[0]; break;
  258     case 3 : a+=k[0]&0xffffff; break;
  259     case 2 : a+=k[0]&0xffff; break;
  260     case 1 : a+=k[0]&0xff; break;
  261     case 0 : return c;              /* zero length strings require no mixing */
  262     }
  263 
  264   } else if ((u.i & 0x1) == 0) {
  265     const uint16_t *k = (const uint16_t *)key;         /* read 16-bit chunks */
  266     const uint8_t  *k8;
  267 
  268     /*--------------- all but last block: aligned reads and different mixing */
  269     while (length > 12)
  270     {
  271       a += k[0] + (((uint32_t)k[1])<<16);
  272       b += k[2] + (((uint32_t)k[3])<<16);
  273       c += k[4] + (((uint32_t)k[5])<<16);
  274       mix(a,b,c);
  275       length -= 12;
  276       k += 6;
  277     }
  278 
  279     /*----------------------------- handle the last (probably partial) block */
  280     k8 = (const uint8_t *)k;
  281     switch(length)
  282     {
  283     case 12: c+=k[4]+(((uint32_t)k[5])<<16);
  284              b+=k[2]+(((uint32_t)k[3])<<16);
  285              a+=k[0]+(((uint32_t)k[1])<<16);
  286              break;
  287     case 11: c+=((uint32_t)k8[10])<<16;     /* fall through */
  288     case 10: c+=k[4];
  289              b+=k[2]+(((uint32_t)k[3])<<16);
  290              a+=k[0]+(((uint32_t)k[1])<<16);
  291              break;
  292     case 9 : c+=k8[8];                      /* fall through */
  293     case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
  294              a+=k[0]+(((uint32_t)k[1])<<16);
  295              break;
  296     case 7 : b+=((uint32_t)k8[6])<<16;      /* fall through */
  297     case 6 : b+=k[2];
  298              a+=k[0]+(((uint32_t)k[1])<<16);
  299              break;
  300     case 5 : b+=k8[4];                      /* fall through */
  301     case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
  302              break;
  303     case 3 : a+=((uint32_t)k8[2])<<16;      /* fall through */
  304     case 2 : a+=k[0];
  305              break;
  306     case 1 : a+=k8[0];
  307              break;
  308     case 0 : return c;                     /* zero length requires no mixing */
  309     }
  310 
  311   } else {                        /* need to read the key one byte at a time */
  312     const uint8_t *k = (const uint8_t *)key;
  313 
  314     /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
  315     while (length > 12)
  316     {
  317       a += k[0];
  318       a += ((uint32_t)k[1])<<8;
  319       a += ((uint32_t)k[2])<<16;
  320       a += ((uint32_t)k[3])<<24;
  321       b += k[4];
  322       b += ((uint32_t)k[5])<<8;
  323       b += ((uint32_t)k[6])<<16;
  324       b += ((uint32_t)k[7])<<24;
  325       c += k[8];
  326       c += ((uint32_t)k[9])<<8;
  327       c += ((uint32_t)k[10])<<16;
  328       c += ((uint32_t)k[11])<<24;
  329       mix(a,b,c);
  330       length -= 12;
  331       k += 12;
  332     }
  333 
  334     /*-------------------------------- last block: affect all 32 bits of (c) */
  335     switch(length)                   /* all the case statements fall through */
  336     {
  337     case 12: c+=((uint32_t)k[11])<<24;
  338     case 11: c+=((uint32_t)k[10])<<16;
  339     case 10: c+=((uint32_t)k[9])<<8;
  340     case 9 : c+=k[8];
  341     case 8 : b+=((uint32_t)k[7])<<24;
  342     case 7 : b+=((uint32_t)k[6])<<16;
  343     case 6 : b+=((uint32_t)k[5])<<8;
  344     case 5 : b+=k[4];
  345     case 4 : a+=((uint32_t)k[3])<<24;
  346     case 3 : a+=((uint32_t)k[2])<<16;
  347     case 2 : a+=((uint32_t)k[1])<<8;
  348     case 1 : a+=k[0];
  349              break;
  350     case 0 : return c;
  351     }
  352   }
  353 
  354   final(a,b,c);
  355   return c;
  356 }
  357 
  358 #else /* !(BYTE_ORDER == LITTLE_ENDIAN) */
  359 
  360 /*
  361  * hashbig():
  362  * This is the same as hashword() on big-endian machines.  It is different
  363  * from hashlittle() on all machines.  hashbig() takes advantage of
  364  * big-endian byte ordering. 
  365  */
  366 uint32_t jenkins_hash( const void *key, size_t length, uint32_t initval)
  367 {
  368   uint32_t a,b,c;
  369   union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
  370 
  371   /* Set up the internal state */
  372   a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
  373 
  374   u.ptr = key;
  375   if ((u.i & 0x3) == 0) {
  376     const uint32_t *k = (const uint32_t *)key;         /* read 32-bit chunks */
  377 
  378     /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
  379     while (length > 12)
  380     {
  381       a += k[0];
  382       b += k[1];
  383       c += k[2];
  384       mix(a,b,c);
  385       length -= 12;
  386       k += 3;
  387     }
  388 
  389     /*----------------------------- handle the last (probably partial) block */
  390     /* 
  391      * "k[2]<<8" actually reads beyond the end of the string, but
  392      * then shifts out the part it's not allowed to read.  Because the
  393      * string is aligned, the illegal read is in the same word as the
  394      * rest of the string.  Every machine with memory protection I've seen
  395      * does it on word boundaries, so is OK with this.  But VALGRIND will
  396      * still catch it and complain.  The masking trick does make the hash
  397      * noticably faster for short strings (like English words).
  398      */
  399 
  400     switch(length)
  401     {
  402     case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
  403     case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
  404     case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
  405     case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
  406     case 8 : b+=k[1]; a+=k[0]; break;
  407     case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
  408     case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
  409     case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
  410     case 4 : a+=k[0]; break;
  411     case 3 : a+=k[0]&0xffffff00; break;
  412     case 2 : a+=k[0]&0xffff0000; break;
  413     case 1 : a+=k[0]&0xff000000; break;
  414     case 0 : return c;              /* zero length strings require no mixing */
  415     }
  416 
  417   } else {                        /* need to read the key one byte at a time */
  418     const uint8_t *k = (const uint8_t *)key;
  419 
  420     /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
  421     while (length > 12)
  422     {
  423       a += ((uint32_t)k[0])<<24;
  424       a += ((uint32_t)k[1])<<16;
  425       a += ((uint32_t)k[2])<<8;
  426       a += ((uint32_t)k[3]);
  427       b += ((uint32_t)k[4])<<24;
  428       b += ((uint32_t)k[5])<<16;
  429       b += ((uint32_t)k[6])<<8;
  430       b += ((uint32_t)k[7]);
  431       c += ((uint32_t)k[8])<<24;
  432       c += ((uint32_t)k[9])<<16;
  433       c += ((uint32_t)k[10])<<8;
  434       c += ((uint32_t)k[11]);
  435       mix(a,b,c);
  436       length -= 12;
  437       k += 12;
  438     }
  439 
  440     /*-------------------------------- last block: affect all 32 bits of (c) */
  441     switch(length)                   /* all the case statements fall through */
  442     {
  443     case 12: c+=k[11];
  444     case 11: c+=((uint32_t)k[10])<<8;
  445     case 10: c+=((uint32_t)k[9])<<16;
  446     case 9 : c+=((uint32_t)k[8])<<24;
  447     case 8 : b+=k[7];
  448     case 7 : b+=((uint32_t)k[6])<<8;
  449     case 6 : b+=((uint32_t)k[5])<<16;
  450     case 5 : b+=((uint32_t)k[4])<<24;
  451     case 4 : a+=k[3];
  452     case 3 : a+=((uint32_t)k[2])<<8;
  453     case 2 : a+=((uint32_t)k[1])<<16;
  454     case 1 : a+=((uint32_t)k[0])<<24;
  455              break;
  456     case 0 : return c;
  457     }
  458   }
  459 
  460   final(a,b,c);
  461   return c;
  462 }
  463 #endif

Cache object: ec9e482c8035957470fa9cde450d7f27


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.