The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/ppc/savearea.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * The contents of this file constitute Original Code as defined in and
    7  * are subject to the Apple Public Source License Version 1.1 (the
    8  * "License").  You may not use this file except in compliance with the
    9  * License.  Please obtain a copy of the License at
   10  * http://www.apple.com/publicsource and read it before using this file.
   11  * 
   12  * This Original Code and all software distributed under the License are
   13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
   17  * License for the specific language governing rights and limitations
   18  * under the License.
   19  * 
   20  * @APPLE_LICENSE_HEADER_END@
   21  */
   22 /*
   23  *      This file is used to maintain the exception save areas
   24  *
   25  */
   26 
   27 #include <debug.h>
   28 #include <mach_kgdb.h>
   29 #include <mach_vm_debug.h>
   30 
   31 #include <kern/thread.h>
   32 #include <mach/vm_attributes.h>
   33 #include <mach/vm_param.h>
   34 #include <vm/vm_kern.h>
   35 #include <vm/vm_map.h>
   36 #include <vm/vm_page.h>
   37 #include <mach/ppc/thread_status.h>
   38 #include <kern/spl.h>
   39 #include <kern/simple_lock.h>
   40 
   41 #include <kern/misc_protos.h>
   42 #include <ppc/misc_protos.h>
   43 #include <ppc/proc_reg.h>
   44 #include <ppc/mem.h>
   45 #include <ppc/pmap.h>
   46 #include <ppc/Firmware.h>
   47 #include <ppc/mappings.h>
   48 #include <ppc/exception.h>
   49 #include <ppc/savearea.h>
   50 #include <ddb/db_output.h>
   51 
   52 
   53 extern struct   Saveanchor saveanchor;                                                  /* Aliged savearea anchor */
   54 struct Saveanchor backpocket;                                                                   /* Emergency saveareas */
   55 unsigned int    debsave0 = 0;                                                                   /* Debug flag */
   56 unsigned int    backchain = 0;                                                                  /* Debug flag */
   57 
   58 /*
   59  *              These routines keep track of exception save areas and keeps the count within specific limits.  If there are
   60  *              too few, more are allocated, too many, and they are released. This savearea is where the PCBs are
   61  *              stored.  They never span a page boundary and are referenced by both virtual and real addresses.
   62  *              Within the interrupt vectors, the real address is used because at that level, no exceptions
   63  *              can be tolerated.  Save areas can be dynamic or permanent.  Permanant saveareas are allocated
   64  *              at boot time and must be in place before any type of exception occurs.  These are never released,
   65  *              and the number is based upon some arbitrary (yet to be determined) amount times the number of
   66  *              processors.  This represents the minimum number required to process a total system failure without
   67  *              destroying valuable and ever-so-handy system debugging information.
   68  *
   69  *              We keep two global free lists (the savearea free pool and the savearea free list) and one local
   70  *              list per processor.
   71  *
   72  *              The local lists are small and require no locked access.  They are chained using physical addresses
   73  *              and no interruptions are allowed when adding to or removing from the list. Also known as the 
   74  *              qfret list. This list is local to a processor and is intended for use only by very low level
   75  *              context handling code. 
   76  *
   77  *              The savearea free list is a medium size list that is globally accessible.  It is updated
   78  *              while holding a simple lock. The length of time that the lock is held is kept short.  The
   79  *              longest period of time is when the list is trimmed. Like the qfret lists, this is chained physically
   80  *              and must be accessed with translation and interruptions disabled. This is where the bulk
   81  *              of the free entries are located.
   82  *
   83  *              The saveareas are allocated from full pages.  A pool element is marked
   84  *              with an allocation map that shows which "slots" are free.  These pages are allocated via the
   85  *              normal kernel memory allocation functions. Queueing is with physical addresses.  The enqueue,
   86  *              dequeue, and search for free blocks is done under free list lock.  
   87  *              only if there are empty slots in it.
   88  *
   89  *              Saveareas that are counted as "in use" once they are removed from the savearea free list.
   90  *              This means that all areas on the local qfret list are considered in use.
   91  *
   92  *              There are two methods of obtaining a savearea.  The save_get function (which is also inlined
   93  *              in the low-level exception handler) attempts to get an area from the local qfret list.  This is
   94  *              done completely without locks.  If qfret is exahusted (or maybe just too low) an area is allocated
   95  *              from the savearea free list. If the free list is empty, we install the back pocket areas and
   96  *              panic.
   97  *
   98  *              The save_alloc function is designed to be called by high level routines, e.g., thread creation,
   99  *              etc.  It will allocate from the free list.  After allocation, it will compare the free count
  100  *              to the target value.  If outside of the range, it will adjust the size either upwards or
  101  *              downwards.
  102  *
  103  *              If we need to shrink the list, it will be trimmed to the target size and unlocked.  The code
  104  *              will walk the chain and return each savearea to its pool page.  If a pool page becomes
  105  *              completely empty, it is dequeued from the free pool list and enqueued (atomic queue
  106  *              function) to be released.
  107  *
  108  *              Once the trim list is finished, the pool release queue is checked to see if there are pages
  109  *              waiting to be released. If so, they are released one at a time.
  110  *
  111  *              If the free list needed to be grown rather than shrunken, we will first attempt to recover
  112  *              a page from the pending release queue (built when we trim the free list).  If we find one,
  113  *              it is allocated, otherwise, a page of kernel memory is allocated.  This loops until there are
  114  *              enough free saveareas.
  115  *              
  116  */
  117 
  118 
  119 
  120 /*
  121  *              Allocate our initial context save areas.  As soon as we do this,
  122  *              we can take an interrupt. We do the saveareas here, 'cause they're guaranteed
  123  *              to be at least page aligned.
  124  *
  125  *              Note: these initial saveareas are all to be allocated from V=R, less than 4GB
  126  *              space.
  127  */
  128 
  129 
  130 void savearea_init(vm_offset_t addr) {
  131 
  132         savearea_comm   *savec;
  133         vm_offset_t     save;
  134         int i;
  135 
  136         
  137         saveanchor.savetarget   = InitialSaveTarget;            /* Initial target value */
  138         saveanchor.saveinuse    = 0;                                            /* Number of areas in use */
  139 
  140         saveanchor.savefree    = 0;                                                     /* Remember the start of the free chain */
  141         saveanchor.savefreecnt = 0;                                                     /* Remember the length */
  142         saveanchor.savepoolfwd = (addr64_t)&saveanchor;         /* Remember pool forward */
  143         saveanchor.savepoolbwd = (addr64_t)&saveanchor;         /* Remember pool backward */
  144 
  145         save =  addr;                                                                           /* Point to the whole block of blocks */        
  146 
  147 /*
  148  *      First we allocate the back pocket in case of emergencies
  149  */
  150 
  151 
  152         for(i=0; i < BackPocketSaveBloks; i++) {                        /* Initialize the back pocket saveareas */
  153 
  154                 savec = (savearea_comm *)save;                                  /* Get the control area for this one */
  155 
  156                 savec->sac_alloc = 0;                                                   /* Mark it allocated */
  157                 savec->sac_vrswap = 0;                                                  /* V=R, so the translation factor is 0 */
  158                 savec->sac_flags = sac_perm;                                    /* Mark it permanent */
  159                 savec->sac_flags |= 0x0000EE00;                                 /* Debug eyecatcher */
  160                 save_queue((uint32_t)savec >> 12);                              /* Add page to savearea lists */
  161                 save += PAGE_SIZE;                                                              /* Jump up to the next one now */
  162         
  163         }
  164 
  165         backpocket = saveanchor;                                                        /* Save this for emergencies */
  166 
  167 
  168 /*
  169  *      We've saved away the back pocket savearea info, so reset it all and
  170  *      now allocate for real
  171  */
  172 
  173 
  174         saveanchor.savefree = 0;                                                        /* Remember the start of the free chain */
  175         saveanchor.savefreecnt = 0;                                                     /* Remember the length */
  176         saveanchor.saveadjust = 0;                                                      /* Set none needed yet */
  177         saveanchor.savepoolfwd = (addr64_t)&saveanchor;         /* Remember pool forward */
  178         saveanchor.savepoolbwd = (addr64_t)&saveanchor;         /* Remember pool backward */
  179 
  180         for(i=0; i < InitialSaveBloks; i++) {                           /* Initialize the saveareas */
  181 
  182                 savec = (savearea_comm *)save;                                  /* Get the control area for this one */
  183 
  184                 savec->sac_alloc = 0;                                                   /* Mark it allocated */
  185                 savec->sac_vrswap = 0;                                                  /* V=R, so the translation factor is 0 */
  186                 savec->sac_flags = sac_perm;                                    /* Mark it permanent */
  187                 savec->sac_flags |= 0x0000EE00;                                 /* Debug eyecatcher */
  188                 save_queue((uint32_t)savec >> 12);                              /* Add page to savearea lists */
  189                 save += PAGE_SIZE;                                                              /* Jump up to the next one now */
  190         
  191         }
  192 
  193 /*
  194  *      We now have a free list that has our initial number of entries  
  195  *      The local qfret lists is empty.  When we call save_get below it will see that
  196  *      the local list is empty and fill it for us.
  197  *
  198  *      It is ok to call save_get here because all initial saveareas are V=R in less
  199  *  than 4GB space, so 32-bit addressing is ok.
  200  *
  201  */
  202 
  203 /*
  204  * This will populate the local list  and get the first one for the system
  205  */     
  206         getPerProc()->next_savearea = (vm_offset_t)save_get();
  207 
  208 /*
  209  *      The system is now able to take interruptions
  210  */
  211         return;
  212 }
  213 
  214 
  215 
  216 
  217 /*
  218  *              Obtains a savearea.  If the free list needs size adjustment it happens here.
  219  *              Don't actually allocate the savearea until after the adjustment is done.
  220  */
  221 
  222 struct savearea *save_alloc(void) {                                             /* Reserve a save area */
  223         
  224         
  225         if(saveanchor.saveadjust) save_adjust();                        /* If size need adjustment, do it now */
  226         
  227         return save_get();                                                                      /* Pass the baby... */
  228 }
  229 
  230 
  231 /*
  232  *              This routine releases a save area to the free queue.  If after that, we have more than our maximum target,
  233  *              we start releasing what we can until we hit the normal target. 
  234  */
  235 
  236 
  237 
  238 void save_release(struct savearea *save) {                              /* Release a save area */
  239         
  240         save_ret(save);                                                                         /* Return a savearea to the free list */
  241         
  242         if(saveanchor.saveadjust) save_adjust();                        /* Adjust the savearea free list and pool size if needed */
  243         
  244         return;
  245         
  246 }
  247 
  248 
  249 /*
  250  *              Adjusts the size of the free list.  Can either release or allocate full pages
  251  *              of kernel memory.  This can block.
  252  *
  253  *              Note that we will only run one adjustment and the amount needed may change
  254  *              while we are executing.
  255  *
  256  *              Calling this routine is triggered by saveanchor.saveadjust.  This value is always calculated just before
  257  *              we unlock the saveanchor lock (this keeps it pretty accurate).  If the total of savefreecnt and saveinuse
  258  *              is within the hysteresis range, it is set to 0.  If outside, it is set to the number needed to bring
  259  *              the total to the target value.  Note that there is a minimum size to the free list (FreeListMin) and if
  260  *              savefreecnt falls below that, saveadjust is set to the number needed to bring it to that.
  261  */
  262 
  263 
  264 void save_adjust(void) {
  265         
  266         savearea_comm   *sctl, *sctlnext, *freepage;
  267         kern_return_t ret;
  268         uint64_t vtopmask;
  269         ppnum_t physpage;
  270 
  271         if(saveanchor.saveadjust < 0)                                   {       /* Do we need to adjust down? */
  272                         
  273                 sctl = (savearea_comm *)save_trim_free();               /* Trim list to the need count, return start of trim list */
  274                                 
  275                 while(sctl) {                                                                   /* Release the free pages back to the kernel */
  276                         sctlnext = CAST_DOWN(savearea_comm *, sctl->save_prev); /* Get next in list */  
  277                         kmem_free(kernel_map, (vm_offset_t) sctl, PAGE_SIZE);   /* Release the page */
  278                         sctl = sctlnext;                                                        /* Chain onwards */
  279                 }
  280         }
  281         else {                                                                                          /* We need more... */
  282 
  283                 if(save_recover()) return;                                              /* If we can recover enough from the pool, return */
  284                 
  285                 while(saveanchor.saveadjust > 0) {                              /* Keep going until we have enough */
  286 
  287                         ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&freepage, PAGE_SIZE);        /* Get a page for free pool */
  288                         if(ret != KERN_SUCCESS) {                                       /* Did we get some memory? */
  289                                 panic("Whoops...  Not a bit of wired memory left for saveareas\n");
  290                         }
  291                         
  292                         physpage = pmap_find_phys(kernel_pmap, (vm_offset_t)freepage);  /* Find physical page */
  293                         if(!physpage) {                                                         /* See if we actually have this mapped*/
  294                                 panic("save_adjust: wired page not mapped - va = %08X\n", freepage);    /* Die */
  295                         }
  296                         
  297                         bzero((void *)freepage, PAGE_SIZE);                     /* Clear it all to zeros */
  298                         freepage->sac_alloc = 0;                                        /* Mark all entries taken */
  299                         freepage->sac_vrswap = ((uint64_t)physpage << 12) ^ (uint64_t)((uintptr_t)freepage);    /* XOR to calculate conversion mask */
  300         
  301                         freepage->sac_flags |= 0x0000EE00;                      /* Set debug eyecatcher */
  302                                                 
  303                         save_queue(physpage);                                           /* Add all saveareas on page to free list */
  304                 }
  305         }
  306 }
  307 
  308 /*
  309  *              Fake up information to make the saveareas look like a zone
  310  */
  311 void
  312 save_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
  313                     vm_size_t *alloc_size, int *collectable, int *exhaustable)
  314 {
  315         *count      = saveanchor.saveinuse;
  316         *cur_size   = (saveanchor.savefreecnt + saveanchor.saveinuse) * (PAGE_SIZE / sac_cnt);
  317         *max_size   = saveanchor.savemaxcount * (PAGE_SIZE / sac_cnt);
  318         *elem_size  = sizeof(savearea);
  319         *alloc_size = PAGE_SIZE;
  320         *collectable = 1;
  321         *exhaustable = 0;
  322 }
  323 
  324 

Cache object: d19196378222908be0f80b9575fbb083


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.