The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ipc/ipc_entry.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
    4  * All Rights Reserved.
    5  * 
    6  * Permission to use, copy, modify and distribute this software and its
    7  * documentation is hereby granted, provided that both the copyright
    8  * notice and this permission notice appear in all copies of the
    9  * software, derivative works or modified versions, and any portions
   10  * thereof, and that both notices appear in supporting documentation.
   11  * 
   12  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   13  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   14  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   15  * 
   16  * Carnegie Mellon requests users of this software to return to
   17  * 
   18  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   19  *  School of Computer Science
   20  *  Carnegie Mellon University
   21  *  Pittsburgh PA 15213-3890
   22  * 
   23  * any improvements or extensions that they make and grant Carnegie Mellon
   24  * the rights to redistribute these changes.
   25  */
   26 /*
   27  * HISTORY
   28  * $Log:        ipc_entry.c,v $
   29  * Revision 2.10  93/01/14  17:32:35  danner
   30  *      64bit cleanup.
   31  *      [92/11/30            af]
   32  * 
   33  * Revision 2.9  92/08/03  17:34:02  jfriedl
   34  *      removed silly prototypes
   35  *      [92/08/02            jfriedl]
   36  * 
   37  * Revision 2.8  92/05/21  17:09:55  jfriedl
   38  *      tried prototypes.
   39  *      [92/05/20            jfriedl]
   40  * 
   41  * Revision 2.7  91/10/09  16:08:15  af
   42  *       Revision 2.6.2.1  91/09/16  10:15:30  rpd
   43  *              Added <ipc/ipc_hash.h>.
   44  *              [91/09/02            rpd]
   45  * 
   46  * Revision 2.6.2.1  91/09/16  10:15:30  rpd
   47  *      Added <ipc/ipc_hash.h>.
   48  *      [91/09/02            rpd]
   49  * 
   50  * Revision 2.6  91/05/14  16:31:38  mrt
   51  *      Correcting copyright
   52  * 
   53  * Revision 2.5  91/03/16  14:47:45  rpd
   54  *      Fixed ipc_entry_grow_table to use it_entries_realloc.
   55  *      [91/03/05            rpd]
   56  * 
   57  * Revision 2.4  91/02/05  17:21:17  mrt
   58  *      Changed to new Mach copyright
   59  *      [91/02/01  15:44:19  mrt]
   60  * 
   61  * Revision 2.3  91/01/08  15:12:58  rpd
   62  *      Removed MACH_IPC_GENNOS.
   63  *      [90/11/08            rpd]
   64  * 
   65  * Revision 2.2  90/06/02  14:49:36  rpd
   66  *      Created for new IPC.
   67  *      [90/03/26  20:54:27  rpd]
   68  * 
   69  */
   70 /*
   71  *      File:   ipc/ipc_entry.c
   72  *      Author: Rich Draves
   73  *      Date:   1989
   74  *
   75  *      Primitive functions to manipulate translation entries.
   76  */
   77 
   78 #include <mach/kern_return.h>
   79 #include <mach/port.h>
   80 #include <kern/assert.h>
   81 #include <kern/sched_prim.h>
   82 #include <kern/zalloc.h>
   83 #include <ipc/port.h>
   84 #include <ipc/ipc_entry.h>
   85 #include <ipc/ipc_space.h>
   86 #include <ipc/ipc_splay.h>
   87 #include <ipc/ipc_hash.h>
   88 #include <ipc/ipc_table.h>
   89 #include <ipc/ipc_object.h>
   90 
   91 zone_t ipc_tree_entry_zone;
   92 
   93 /*
   94  *      Routine:        ipc_entry_tree_collision
   95  *      Purpose:
   96  *              Checks if "name" collides with an allocated name
   97  *              in the space's tree.  That is, returns TRUE
   98  *              if the splay tree contains a name with the same
   99  *              index as "name".
  100  *      Conditions:
  101  *              The space is locked (read or write) and active.
  102  */
  103 
  104 boolean_t
  105 ipc_entry_tree_collision(space, name)
  106         ipc_space_t space;
  107         mach_port_t name;
  108 {
  109         mach_port_index_t index;
  110         mach_port_t lower, upper;
  111 
  112         assert(space->is_active);
  113 
  114         /*
  115          *      Check if we collide with the next smaller name
  116          *      or the next larger name.
  117          */
  118 
  119         ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
  120 
  121         index = MACH_PORT_INDEX(name);
  122         return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
  123                 ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
  124 }
  125 
  126 /*
  127  *      Routine:        ipc_entry_lookup
  128  *      Purpose:
  129  *              Searches for an entry, given its name.
  130  *      Conditions:
  131  *              The space must be read or write locked throughout.
  132  *              The space must be active.
  133  */
  134 
  135 ipc_entry_t
  136 ipc_entry_lookup(space, name)
  137         ipc_space_t space;
  138         mach_port_t name;
  139 {
  140         mach_port_index_t index;
  141         ipc_entry_t entry;
  142 
  143         assert(space->is_active);
  144 
  145         index = MACH_PORT_INDEX(name);
  146         if (index < space->is_table_size) {
  147                 entry = &space->is_table[index];
  148                 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
  149                         if (entry->ie_bits & IE_BITS_COLLISION) {
  150                                 assert(space->is_tree_total > 0);
  151                                 goto tree_lookup;
  152                         } else
  153                                 entry = IE_NULL;
  154                 else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
  155                         entry = IE_NULL;
  156         } else if (space->is_tree_total == 0)
  157                 entry = IE_NULL;
  158         else
  159             tree_lookup:
  160                 entry = (ipc_entry_t)
  161                                 ipc_splay_tree_lookup(&space->is_tree, name);
  162 
  163         assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
  164         return entry;
  165 }
  166 
  167 /*
  168  *      Routine:        ipc_entry_get
  169  *      Purpose:
  170  *              Tries to allocate an entry out of the space.
  171  *      Conditions:
  172  *              The space is write-locked and active throughout.
  173  *              An object may be locked.  Will not allocate memory.
  174  *      Returns:
  175  *              KERN_SUCCESS            A free entry was found.
  176  *              KERN_NO_SPACE           No entry allocated.
  177  */
  178 
  179 kern_return_t
  180 ipc_entry_get(space, namep, entryp)
  181         ipc_space_t space;
  182         mach_port_t *namep;
  183         ipc_entry_t *entryp;
  184 {
  185         ipc_entry_t table;
  186         mach_port_index_t first_free;
  187         mach_port_t new_name;
  188         ipc_entry_t free_entry;
  189 
  190         assert(space->is_active);
  191 
  192         table = space->is_table;
  193         first_free = table->ie_next;
  194 
  195         if (first_free == 0)
  196                 return KERN_NO_SPACE;
  197 
  198         free_entry = &table[first_free];
  199         table->ie_next = free_entry->ie_next;
  200 
  201         /*
  202          *      Initialize the new entry.  We need only
  203          *      increment the generation number and clear ie_request.
  204          */
  205 
  206     {
  207         mach_port_gen_t gen;
  208 
  209         assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
  210         gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
  211         free_entry->ie_bits = gen;
  212         free_entry->ie_request = 0;
  213         new_name = MACH_PORT_MAKE(first_free, gen);
  214     }
  215 
  216         /*
  217          *      The new name can't be MACH_PORT_NULL because index
  218          *      is non-zero.  It can't be MACH_PORT_DEAD because
  219          *      the table isn't allowed to grow big enough.
  220          *      (See comment in ipc/ipc_table.h.)
  221          */
  222 
  223         assert(MACH_PORT_VALID(new_name));
  224         assert(free_entry->ie_object == IO_NULL);
  225 
  226         *namep = new_name;
  227         *entryp = free_entry;
  228         return KERN_SUCCESS;
  229 }
  230 
  231 /*
  232  *      Routine:        ipc_entry_alloc
  233  *      Purpose:
  234  *              Allocate an entry out of the space.
  235  *      Conditions:
  236  *              The space is not locked before, but it is write-locked after
  237  *              if the call is successful.  May allocate memory.
  238  *      Returns:
  239  *              KERN_SUCCESS            An entry was allocated.
  240  *              KERN_INVALID_TASK       The space is dead.
  241  *              KERN_NO_SPACE           No room for an entry in the space.
  242  *              KERN_RESOURCE_SHORTAGE  Couldn't allocate memory for an entry.
  243  */
  244 
  245 kern_return_t
  246 ipc_entry_alloc(space, namep, entryp)
  247         ipc_space_t space;
  248         mach_port_t *namep;
  249         ipc_entry_t *entryp;
  250 {
  251         kern_return_t kr;
  252 
  253         is_write_lock(space);
  254 
  255         for (;;) {
  256                 if (!space->is_active) {
  257                         is_write_unlock(space);
  258                         return KERN_INVALID_TASK;
  259                 }
  260 
  261                 kr = ipc_entry_get(space, namep, entryp);
  262                 if (kr == KERN_SUCCESS)
  263                         return kr;
  264 
  265                 kr = ipc_entry_grow_table(space);
  266                 if (kr != KERN_SUCCESS)
  267                         return kr; /* space is unlocked */
  268         }
  269 }
  270 
  271 /*
  272  *      Routine:        ipc_entry_alloc_name
  273  *      Purpose:
  274  *              Allocates/finds an entry with a specific name.
  275  *              If an existing entry is returned, its type will be nonzero.
  276  *      Conditions:
  277  *              The space is not locked before, but it is write-locked after
  278  *              if the call is successful.  May allocate memory.
  279  *      Returns:
  280  *              KERN_SUCCESS            Found existing entry with same name.
  281  *              KERN_SUCCESS            Allocated a new entry.
  282  *              KERN_INVALID_TASK       The space is dead.
  283  *              KERN_RESOURCE_SHORTAGE  Couldn't allocate memory.
  284  */
  285 
  286 kern_return_t
  287 ipc_entry_alloc_name(space, name, entryp)
  288         ipc_space_t space;
  289         mach_port_t name;
  290         ipc_entry_t *entryp;
  291 {
  292         mach_port_index_t index = MACH_PORT_INDEX(name);
  293         mach_port_gen_t gen = MACH_PORT_GEN(name);
  294         ipc_tree_entry_t tree_entry = ITE_NULL;
  295 
  296         assert(MACH_PORT_VALID(name));
  297 
  298         is_write_lock(space);
  299 
  300         for (;;) {
  301                 ipc_entry_t entry;
  302                 ipc_tree_entry_t tentry;
  303                 ipc_table_size_t its;
  304 
  305                 if (!space->is_active) {
  306                         is_write_unlock(space);
  307                         if (tree_entry) ite_free(tree_entry);
  308                         return KERN_INVALID_TASK;
  309                 }
  310 
  311                 /*
  312                  *      If we are under the table cutoff,
  313                  *      there are three cases:
  314                  *              1) The entry is inuse, for the same name
  315                  *              2) The entry is inuse, for a different name
  316                  *              3) The entry is free
  317                  */
  318 
  319                 if ((0 < index) && (index < space->is_table_size)) {
  320                         ipc_entry_t table = space->is_table;
  321 
  322                         entry = &table[index];
  323 
  324                         if (IE_BITS_TYPE(entry->ie_bits)) {
  325                                 if (IE_BITS_GEN(entry->ie_bits) == gen) {
  326                                         *entryp = entry;
  327                                         if (tree_entry) ite_free(tree_entry);
  328                                         return KERN_SUCCESS;
  329                                 }
  330                         } else {
  331                                 mach_port_index_t free_index, next_index;
  332 
  333                                 /*
  334                                  *      Rip the entry out of the free list.
  335                                  */
  336 
  337                                 for (free_index = 0;
  338                                      (next_index = table[free_index].ie_next)
  339                                                         != index;
  340                                      free_index = next_index)
  341                                         continue;
  342 
  343                                 table[free_index].ie_next =
  344                                         table[next_index].ie_next;
  345 
  346                                 entry->ie_bits = gen;
  347                                 assert(entry->ie_object == IO_NULL);
  348                                 entry->ie_request = 0;
  349 
  350                                 *entryp = entry;
  351                                 if (tree_entry) ite_free(tree_entry);
  352                                 return KERN_SUCCESS;
  353                         }
  354                 }
  355 
  356                 /*
  357                  *      Before trying to allocate any memory,
  358                  *      check if the entry already exists in the tree.
  359                  *      This avoids spurious resource errors.
  360                  *      The splay tree makes a subsequent lookup/insert
  361                  *      of the same name cheap, so this costs little.
  362                  */
  363 
  364                 if ((space->is_tree_total > 0) &&
  365                     ((tentry = ipc_splay_tree_lookup(&space->is_tree, name))
  366                                                         != ITE_NULL)) {
  367                         assert(tentry->ite_space == space);
  368                         assert(IE_BITS_TYPE(tentry->ite_bits));
  369 
  370                         *entryp = &tentry->ite_entry;
  371                         if (tree_entry) ite_free(tree_entry);
  372                         return KERN_SUCCESS;
  373                 }
  374 
  375                 its = space->is_table_next;
  376 
  377                 /*
  378                  *      Check if the table should be grown.
  379                  *
  380                  *      Note that if space->is_table_size == its->its_size,
  381                  *      then we won't ever try to grow the table.
  382                  *
  383                  *      Note that we are optimistically assuming that name
  384                  *      doesn't collide with any existing names.  (So if
  385                  *      it were entered into the tree, is_tree_small would
  386                  *      be incremented.)  This is OK, because even in that
  387                  *      case, we don't lose memory by growing the table.
  388                  */
  389 
  390                 if ((space->is_table_size <= index) &&
  391                     (index < its->its_size) &&
  392                     (((its->its_size - space->is_table_size) *
  393                       sizeof(struct ipc_entry)) <
  394                      ((space->is_tree_small + 1) *
  395                       sizeof(struct ipc_tree_entry)))) {
  396                         kern_return_t kr;
  397 
  398                         /*
  399                          *      Can save space by growing the table.
  400                          *      Because the space will be unlocked,
  401                          *      we must restart.
  402                          */
  403 
  404                         kr = ipc_entry_grow_table(space);
  405                         assert(kr != KERN_NO_SPACE);
  406                         if (kr != KERN_SUCCESS) {
  407                                 /* space is unlocked */
  408                                 if (tree_entry) ite_free(tree_entry);
  409                                 return kr;
  410                         }
  411 
  412                         continue;
  413                 }
  414 
  415                 /*
  416                  *      If a splay-tree entry was allocated previously,
  417                  *      go ahead and insert it into the tree.
  418                  */
  419 
  420                 if (tree_entry != ITE_NULL) {
  421                         space->is_tree_total++;
  422 
  423                         if (index < space->is_table_size)
  424                                 space->is_table[index].ie_bits |=
  425                                         IE_BITS_COLLISION;
  426                         else if ((index < its->its_size) &&
  427                                  !ipc_entry_tree_collision(space, name))
  428                                 space->is_tree_small++;
  429 
  430                         ipc_splay_tree_insert(&space->is_tree,
  431                                               name, tree_entry);
  432 
  433                         tree_entry->ite_bits = 0;
  434                         tree_entry->ite_object = IO_NULL;
  435                         tree_entry->ite_request = 0;
  436                         tree_entry->ite_space = space;
  437                         *entryp = &tree_entry->ite_entry;
  438                         return KERN_SUCCESS;
  439                 }
  440 
  441                 /*
  442                  *      Allocate a tree entry and try again.
  443                  */
  444 
  445                 is_write_unlock(space);
  446                 tree_entry = ite_alloc();
  447                 if (tree_entry == ITE_NULL)
  448                         return KERN_RESOURCE_SHORTAGE;
  449                 is_write_lock(space);
  450         }
  451 }
  452 
  453 /*
  454  *      Routine:        ipc_entry_dealloc
  455  *      Purpose:
  456  *              Deallocates an entry from a space.
  457  *      Conditions:
  458  *              The space must be write-locked throughout.
  459  *              The space must be active.
  460  */
  461 
  462 void
  463 ipc_entry_dealloc(space, name, entry)
  464         ipc_space_t space;
  465         mach_port_t name;
  466         ipc_entry_t entry;
  467 {
  468         ipc_entry_t table;
  469         ipc_entry_num_t size;
  470         mach_port_index_t index;
  471 
  472         assert(space->is_active);
  473         assert(entry->ie_object == IO_NULL);
  474         assert(entry->ie_request == 0);
  475 
  476         index = MACH_PORT_INDEX(name);
  477         table = space->is_table;
  478         size = space->is_table_size;
  479 
  480         if ((index < size) && (entry == &table[index])) {
  481                 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
  482 
  483                 if (entry->ie_bits & IE_BITS_COLLISION) {
  484                         struct ipc_splay_tree small, collisions;
  485                         ipc_tree_entry_t tentry;
  486                         mach_port_t tname;
  487                         boolean_t pick;
  488                         ipc_entry_bits_t bits;
  489                         ipc_object_t obj;
  490 
  491                         /* must move an entry from tree to table */
  492 
  493                         ipc_splay_tree_split(&space->is_tree,
  494                                              MACH_PORT_MAKE(index+1, 0),
  495                                              &collisions);
  496                         ipc_splay_tree_split(&collisions,
  497                                              MACH_PORT_MAKE(index, 0),
  498                                              &small);
  499 
  500                         pick = ipc_splay_tree_pick(&collisions,
  501                                                    &tname, &tentry);
  502                         assert(pick);
  503                         assert(MACH_PORT_INDEX(tname) == index);
  504 
  505                         bits = tentry->ite_bits;
  506                         entry->ie_bits = bits | MACH_PORT_GEN(tname);
  507                         entry->ie_object = obj = tentry->ite_object;
  508                         entry->ie_request = tentry->ite_request;
  509                         assert(tentry->ite_space == space);
  510 
  511                         if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND) {
  512                                 ipc_hash_global_delete(space, obj,
  513                                                        tname, tentry);
  514                                 ipc_hash_local_insert(space, obj,
  515                                                       index, entry);
  516                         }
  517 
  518                         ipc_splay_tree_delete(&collisions, tname, tentry);
  519 
  520                         assert(space->is_tree_total > 0);
  521                         space->is_tree_total--;
  522 
  523                         /* check if collision bit should still be on */
  524 
  525                         pick = ipc_splay_tree_pick(&collisions,
  526                                                    &tname, &tentry);
  527                         if (pick) {
  528                                 entry->ie_bits |= IE_BITS_COLLISION;
  529                                 ipc_splay_tree_join(&space->is_tree,
  530                                                     &collisions);
  531                         }
  532 
  533                         ipc_splay_tree_join(&space->is_tree, &small);
  534                 } else {
  535                         entry->ie_bits &= IE_BITS_GEN_MASK;
  536                         entry->ie_next = table->ie_next;
  537                         table->ie_next = index;
  538                 }
  539         } else {
  540                 ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
  541 
  542                 assert(tentry->ite_space == space);
  543 
  544                 ipc_splay_tree_delete(&space->is_tree, name, tentry);
  545 
  546                 assert(space->is_tree_total > 0);
  547                 space->is_tree_total--;
  548 
  549                 if (index < size) {
  550                         ipc_entry_t ientry = &table[index];
  551 
  552                         assert(ientry->ie_bits & IE_BITS_COLLISION);
  553 
  554                         if (!ipc_entry_tree_collision(space, name))
  555                                 ientry->ie_bits &= ~IE_BITS_COLLISION;
  556                 } else if ((index < space->is_table_next->its_size) &&
  557                            !ipc_entry_tree_collision(space, name)) {
  558                         assert(space->is_tree_small > 0);
  559                         space->is_tree_small--;
  560                 }
  561         }
  562 }
  563 
  564 /*
  565  *      Routine:        ipc_entry_grow_table
  566  *      Purpose:
  567  *              Grows the table in a space.
  568  *      Conditions:
  569  *              The space must be write-locked and active before.
  570  *              If successful, it is also returned locked.
  571  *              Allocates memory.
  572  *      Returns:
  573  *              KERN_SUCCESS            Grew the table.
  574  *              KERN_SUCCESS            Somebody else grew the table.
  575  *              KERN_SUCCESS            The space died.
  576  *              KERN_NO_SPACE           Table has maximum size already.
  577  *              KERN_RESOURCE_SHORTAGE  Couldn't allocate a new table.
  578  */
  579 
  580 kern_return_t
  581 ipc_entry_grow_table(space)
  582         ipc_space_t space;
  583 {
  584         ipc_entry_num_t osize, size, nsize;
  585 
  586         do {
  587                 ipc_entry_t otable, table;
  588                 ipc_table_size_t oits, its, nits;
  589                 mach_port_index_t i, free_index;
  590 
  591                 assert(space->is_active);
  592 
  593                 if (space->is_growing) {
  594                         /*
  595                          *      Somebody else is growing the table.
  596                          *      We just wait for them to finish.
  597                          */
  598 
  599                         assert_wait((event_t) space, FALSE);
  600                         is_write_unlock(space);
  601                         thread_block((void (*)()) 0);
  602                         is_write_lock(space);
  603                         return KERN_SUCCESS;
  604                 }
  605 
  606                 otable = space->is_table;
  607                 its = space->is_table_next;
  608                 size = its->its_size;
  609                 oits = its - 1;
  610                 osize = oits->its_size;
  611                 nits = its + 1;
  612                 nsize = nits->its_size;
  613 
  614                 if (osize == size) {
  615                         is_write_unlock(space);
  616                         return KERN_NO_SPACE;
  617                 }
  618 
  619                 assert((osize < size) && (size <= nsize));
  620 
  621                 /*
  622                  *      OK, we'll attempt to grow the table.
  623                  *      The realloc requires that the old table
  624                  *      remain in existence.
  625                  */
  626 
  627                 space->is_growing = TRUE;
  628                 is_write_unlock(space);
  629                 if (it_entries_reallocable(oits))
  630                         table = it_entries_realloc(oits, otable, its);
  631                 else
  632                         table = it_entries_alloc(its);
  633                 is_write_lock(space);
  634                 space->is_growing = FALSE;
  635 
  636                 /*
  637                  *      We need to do a wakeup on the space,
  638                  *      to rouse waiting threads.  We defer
  639                  *      this until the space is unlocked,
  640                  *      because we don't want them to spin.
  641                  */
  642 
  643                 if (table == IE_NULL) {
  644                         is_write_unlock(space);
  645                         thread_wakeup((event_t) space);
  646                         return KERN_RESOURCE_SHORTAGE;
  647                 }
  648 
  649                 if (!space->is_active) {
  650                         /*
  651                          *      The space died while it was unlocked.
  652                          */
  653 
  654                         is_write_unlock(space);
  655                         thread_wakeup((event_t) space);
  656                         it_entries_free(its, table);
  657                         is_write_lock(space);
  658                         return KERN_SUCCESS;
  659                 }
  660 
  661                 assert(space->is_table == otable);
  662                 assert(space->is_table_next == its);
  663                 assert(space->is_table_size == osize);
  664 
  665                 space->is_table = table;
  666                 space->is_table_size = size;
  667                 space->is_table_next = nits;
  668 
  669                 /*
  670                  *      If we did a realloc, it remapped the data.
  671                  *      Otherwise we copy by hand first.  Then we have
  672                  *      to clear the index fields in the old part and
  673                  *      zero the new part.
  674                  */
  675 
  676                 if (!it_entries_reallocable(oits))
  677                         bcopy((char *) otable, (char *) table,
  678                               osize * sizeof(struct ipc_entry));
  679 
  680                 for (i = 0; i < osize; i++)
  681                         table[i].ie_index = 0;
  682 
  683                 bzero((char *) (table + osize),
  684                       (size - osize) * sizeof(struct ipc_entry));
  685 
  686                 /*
  687                  *      Put old entries into the reverse hash table.
  688                  */
  689 
  690                 for (i = 0; i < osize; i++) {
  691                         ipc_entry_t entry = &table[i];
  692 
  693                         if (IE_BITS_TYPE(entry->ie_bits) ==
  694                                                 MACH_PORT_TYPE_SEND)
  695                                 ipc_hash_local_insert(space, entry->ie_object,
  696                                                       i, entry);
  697                 }
  698 
  699                 /*
  700                  *      If there are entries in the splay tree,
  701                  *      then we have work to do:
  702                  *              1) transfer entries to the table
  703                  *              2) update is_tree_small
  704                  */
  705 
  706                 if (space->is_tree_total > 0) {
  707                         mach_port_index_t index;
  708                         boolean_t delete;
  709                         struct ipc_splay_tree ignore;
  710                         struct ipc_splay_tree move;
  711                         struct ipc_splay_tree small;
  712                         ipc_entry_num_t nosmall;
  713                         ipc_tree_entry_t tentry;
  714 
  715                         /*
  716                          *      The splay tree divides into four regions,
  717                          *      based on the index of the entries:
  718                          *              1) 0 <= index < osize
  719                          *              2) osize <= index < size
  720                          *              3) size <= index < nsize
  721                          *              4) nsize <= index
  722                          *
  723                          *      Entries in the first part are ignored.
  724                          *      Entries in the second part, that don't
  725                          *      collide, are moved into the table.
  726                          *      Entries in the third part, that don't
  727                          *      collide, are counted for is_tree_small.
  728                          *      Entries in the fourth part are ignored.
  729                          */
  730 
  731                         ipc_splay_tree_split(&space->is_tree,
  732                                              MACH_PORT_MAKE(nsize, 0),
  733                                              &small);
  734                         ipc_splay_tree_split(&small,
  735                                              MACH_PORT_MAKE(size, 0),
  736                                              &move);
  737                         ipc_splay_tree_split(&move,
  738                                              MACH_PORT_MAKE(osize, 0),
  739                                              &ignore);
  740 
  741                         /* move entries into the table */
  742 
  743                         for (tentry = ipc_splay_traverse_start(&move);
  744                              tentry != ITE_NULL;
  745                              tentry = ipc_splay_traverse_next(&move, delete)) {
  746                                 mach_port_t name;
  747                                 mach_port_gen_t gen;
  748                                 mach_port_type_t type;
  749                                 ipc_entry_bits_t bits;
  750                                 ipc_object_t obj;
  751                                 ipc_entry_t entry;
  752 
  753                                 name = tentry->ite_name;
  754                                 gen = MACH_PORT_GEN(name);
  755                                 index = MACH_PORT_INDEX(name);
  756 
  757                                 assert(tentry->ite_space == space);
  758                                 assert((osize <= index) && (index < size));
  759 
  760                                 entry = &table[index];
  761 
  762                                 /* collision with previously moved entry? */
  763 
  764                                 bits = entry->ie_bits;
  765                                 if (bits != 0) {
  766                                         assert(IE_BITS_TYPE(bits));
  767                                         assert(IE_BITS_GEN(bits) != gen);
  768 
  769                                         entry->ie_bits =
  770                                                 bits | IE_BITS_COLLISION;
  771                                         delete = FALSE;
  772                                         continue;
  773                                 }
  774 
  775                                 bits = tentry->ite_bits;
  776                                 type = IE_BITS_TYPE(bits);
  777                                 assert(type != MACH_PORT_TYPE_NONE);
  778 
  779                                 entry->ie_bits = bits | gen;
  780                                 entry->ie_object = obj = tentry->ite_object;
  781                                 entry->ie_request = tentry->ite_request;
  782 
  783                                 if (type == MACH_PORT_TYPE_SEND) {
  784                                         ipc_hash_global_delete(space, obj,
  785                                                                name, tentry);
  786                                         ipc_hash_local_insert(space, obj,
  787                                                               index, entry);
  788                                 }
  789 
  790                                 space->is_tree_total--;
  791                                 delete = TRUE;
  792                         }
  793                         ipc_splay_traverse_finish(&move);
  794 
  795                         /* count entries for is_tree_small */
  796 
  797                         nosmall = 0; index = 0;
  798                         for (tentry = ipc_splay_traverse_start(&small);
  799                              tentry != ITE_NULL;
  800                              tentry = ipc_splay_traverse_next(&small, FALSE)) {
  801                                 mach_port_index_t nindex;
  802 
  803                                 nindex = MACH_PORT_INDEX(tentry->ite_name);
  804 
  805                                 if (nindex != index) {
  806                                         nosmall++;
  807                                         index = nindex;
  808                                 }
  809                         }
  810                         ipc_splay_traverse_finish(&small);
  811 
  812                         assert(nosmall <= (nsize - size));
  813                         assert(nosmall <= space->is_tree_total);
  814                         space->is_tree_small = nosmall;
  815 
  816                         /* put the splay tree back together */
  817 
  818                         ipc_splay_tree_join(&space->is_tree, &small);
  819                         ipc_splay_tree_join(&space->is_tree, &move);
  820                         ipc_splay_tree_join(&space->is_tree, &ignore);
  821                 }
  822 
  823                 /*
  824                  *      Add entries in the new part which still aren't used
  825                  *      to the free list.  Add them in reverse order,
  826                  *      and set the generation number to -1, so that
  827                  *      early allocations produce "natural" names.
  828                  */
  829 
  830                 free_index = table[0].ie_next;
  831                 for (i = size-1; i >= osize; --i) {
  832                         ipc_entry_t entry = &table[i];
  833 
  834                         if (entry->ie_bits == 0) {
  835                                 entry->ie_bits = IE_BITS_GEN_MASK;
  836                                 entry->ie_next = free_index;
  837                                 free_index = i;
  838                         }
  839                 }
  840                 table[0].ie_next = free_index;
  841 
  842                 /*
  843                  *      Now we need to free the old table.
  844                  *      If the space dies or grows while unlocked,
  845                  *      then we can quit here.
  846                  */
  847 
  848                 is_write_unlock(space);
  849                 thread_wakeup((event_t) space);
  850                 it_entries_free(oits, otable);
  851                 is_write_lock(space);
  852                 if (!space->is_active || (space->is_table_next != nits))
  853                         return KERN_SUCCESS;
  854 
  855                 /*
  856                  *      We might have moved enough entries from
  857                  *      the splay tree into the table that
  858                  *      the table can be profitably grown again.
  859                  *
  860                  *      Note that if size == nsize, then
  861                  *      space->is_tree_small == 0.
  862                  */
  863         } while ((space->is_tree_small > 0) &&
  864                  (((nsize - size) * sizeof(struct ipc_entry)) <
  865                   (space->is_tree_small * sizeof(struct ipc_tree_entry))));
  866 
  867         return KERN_SUCCESS;
  868 }

Cache object: 409d9a71db49ac3068671b76bf2d74b5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.