The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/read_fault.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /*
   26  * @OSF_COPYRIGHT@
   27  */
   28 /* 
   29  * Mach Operating System
   30  * Copyright (c) 1991,1990 Carnegie Mellon University
   31  * All Rights Reserved.
   32  * 
   33  * Permission to use, copy, modify and distribute this software and its
   34  * documentation is hereby granted, provided that both the copyright
   35  * notice and this permission notice appear in all copies of the
   36  * software, derivative works or modified versions, and any portions
   37  * thereof, and that both notices appear in supporting documentation.
   38  * 
   39  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   40  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   41  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   42  * 
   43  * Carnegie Mellon requests users of this software to return to
   44  * 
   45  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   46  *  School of Computer Science
   47  *  Carnegie Mellon University
   48  *  Pittsburgh PA 15213-3890
   49  * 
   50  * any improvements or extensions that they make and grant Carnegie Mellon
   51  * the rights to redistribute these changes.
   52  */
   53 /*
   54  */
   55 
   56 #include <kern/thread.h>
   57 #include <vm/vm_fault.h>
   58 #include <mach/kern_return.h>
   59 #include <mach/vm_behavior.h>
   60 #include <vm/vm_map.h>
   61 #include <vm/vm_object.h>
   62 #include <vm/vm_page.h>
   63 #include <vm/pmap.h>
   64 
   65 #include <i386/intel_read_fault.h>
   66 
   67 #include <kern/macro_help.h>
   68 
   69 /*
   70  *      Expansion of vm_fault for read fault in kernel mode.
   71  *      Must enter the mapping as writable, since the i386
   72  *      (and i860 in i386 compatability mode) ignores write
   73  *      protection in kernel mode.
   74  *
   75  *      Note that this routine can be called for pmap's other
   76  *      than the kernel_pmap, in which case it just enters
   77  *      a read-only mapping.  (See e.g. kernel_trap().)
   78  */
   79 kern_return_t
   80 intel_read_fault(
   81         vm_map_t        map,
   82         vm_offset_t     vaddr)
   83 {
   84         vm_map_version_t        version;        /* Map version for
   85                                                    verification */
   86         vm_object_t             object;         /* Top-level object */
   87         vm_object_offset_t      offset;         /* Top-level offset */
   88         vm_prot_t               prot;           /* Protection for mapping */
   89         vm_behavior_t           behavior;       /* Expected paging behavior */
   90         vm_object_offset_t      lo_offset, hi_offset;
   91         vm_page_t               result_page;    /* Result of vm_fault_page */
   92         vm_page_t               top_page;       /* Placeholder page */
   93         boolean_t               wired;          /* Is map region wired? */
   94         kern_return_t           result;
   95         register vm_page_t      m;
   96         vm_map_t                pmap_map;
   97         vm_map_t                original_map = map;
   98         thread_t                cur_thread;
   99         boolean_t               funnel_set;
  100         funnel_t                *curflock;
  101 
  102         cur_thread = current_thread();
  103 
  104         if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
  105                 funnel_set = TRUE;
  106                 curflock = cur_thread->funnel_lock;
  107                 thread_funnel_set( curflock , FALSE);
  108         } else {
  109                 funnel_set = FALSE;
  110         }
  111 
  112     RetryFault:
  113 
  114         map = original_map;
  115 
  116         /*
  117          *      Find the backing store object and offset into it
  118          *      to begin search.
  119          */
  120         vm_map_lock_read(map);
  121         result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, &version,
  122                                       &object, &offset, &prot, &wired,
  123                                       &behavior, &lo_offset, 
  124                                       &hi_offset, &pmap_map);
  125         
  126         vm_map_unlock_read(map);
  127 
  128         if (result != KERN_SUCCESS) {
  129                 if (funnel_set)
  130                         thread_funnel_set( curflock, TRUE);
  131                 return (result);
  132         }
  133 
  134         if(pmap_map != map) {
  135                 vm_map_reference(pmap_map);
  136                 vm_map_unlock_read(pmap_map);
  137         }
  138 
  139         /*
  140          *      Make a reference to this object to prevent its
  141          *      disposal while we are playing with it.
  142          */
  143         assert(object->ref_count > 0);
  144         object->ref_count++;
  145         vm_object_res_reference(object);
  146         vm_object_paging_begin(object);
  147 
  148         result = vm_fault_page(object, offset, VM_PROT_READ, FALSE,
  149                                THREAD_ABORTSAFE,
  150                                lo_offset, hi_offset, behavior,
  151                                &prot, &result_page, &top_page, (int *)0,
  152                                0, map->no_zero_fill, FALSE, map, vaddr);
  153 
  154         if (result != VM_FAULT_SUCCESS) {
  155             vm_object_deallocate(object);
  156             if(pmap_map != map) {
  157                         vm_map_deallocate(pmap_map);
  158            }
  159 
  160             switch (result) {
  161                 case VM_FAULT_RETRY:
  162                     goto RetryFault;
  163                 case VM_FAULT_INTERRUPTED:
  164                         if (funnel_set)
  165                                 thread_funnel_set( curflock, TRUE);
  166                     return (KERN_SUCCESS);
  167                 case VM_FAULT_MEMORY_SHORTAGE:
  168                     VM_PAGE_WAIT();
  169                     goto RetryFault;
  170                 case VM_FAULT_FICTITIOUS_SHORTAGE:
  171                     vm_page_more_fictitious();
  172                     goto RetryFault;
  173                 case VM_FAULT_MEMORY_ERROR:
  174                     return (KERN_MEMORY_ERROR);
  175             }
  176         }
  177 
  178         m = result_page;
  179 
  180         /*
  181          *      How to clean up the result of vm_fault_page.  This
  182          *      happens whether the mapping is entered or not.
  183          */
  184 
  185 #define UNLOCK_AND_DEALLOCATE                           \
  186         MACRO_BEGIN                                     \
  187         vm_fault_cleanup(m->object, top_page);          \
  188         vm_object_deallocate(object);                   \
  189         MACRO_END
  190 
  191         /*
  192          *      What to do with the resulting page from vm_fault_page
  193          *      if it doesn't get entered into the physical map:
  194          */
  195 
  196 #define RELEASE_PAGE(m)                                 \
  197         MACRO_BEGIN                                     \
  198         PAGE_WAKEUP_DONE(m);                            \
  199         vm_page_lock_queues();                          \
  200         if (!m->active && !m->inactive)                 \
  201                 vm_page_activate(m);                    \
  202         vm_page_unlock_queues();                        \
  203         MACRO_END
  204 
  205         /*
  206          *      We must verify that the maps have not changed.
  207          */
  208         vm_object_unlock(m->object);
  209 
  210         if ((map != original_map) || !vm_map_verify(map, &version)) {
  211             vm_object_t         retry_object;
  212             vm_object_offset_t  retry_offset;
  213             vm_prot_t           retry_prot;
  214 
  215                 if (map != pmap_map) {
  216                         vm_map_deallocate(pmap_map);
  217                 }
  218             
  219                 map = original_map;
  220                 vm_map_lock_read(map);
  221 
  222             result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, &version,
  223                                 &retry_object, &retry_offset, &retry_prot,
  224                                 &wired, &behavior, &lo_offset, 
  225                                 &hi_offset, &pmap_map);
  226 
  227             if (result != KERN_SUCCESS) {
  228                 vm_map_unlock_read(map);
  229                         vm_object_lock(m->object);
  230                         RELEASE_PAGE(m);
  231                         UNLOCK_AND_DEALLOCATE;
  232                         if (funnel_set)
  233                                 thread_funnel_set( curflock, TRUE);
  234                         return (result);
  235             }
  236 
  237                 if (map != pmap_map) {
  238                         vm_map_reference(pmap_map);
  239                 }
  240 
  241             vm_object_unlock(retry_object);
  242 
  243             if (retry_object != object || retry_offset != offset) {
  244                         vm_object_lock(m->object);
  245                         RELEASE_PAGE(m);
  246                 vm_map_unlock_read(map);
  247                 if(pmap_map != map) {
  248                                 vm_map_unlock_read(pmap_map);
  249                                 vm_map_deallocate(pmap_map);
  250                         }
  251                         UNLOCK_AND_DEALLOCATE;
  252                         goto RetryFault;
  253             }
  254         }
  255 
  256         /*
  257          *      Put the page in the physical map.
  258          */
  259 
  260         PMAP_ENTER(pmap_map->pmap, vaddr, m, VM_PROT_READ, PMAP_DEFAULT_CACHE, wired);
  261 
  262         if(pmap_map != map) {
  263                 vm_map_unlock_read(pmap_map);
  264                 vm_map_deallocate(pmap_map);
  265         }
  266         
  267         vm_object_lock(m->object);
  268         vm_page_lock_queues();
  269         if (!m->active && !m->inactive)
  270                 vm_page_activate(m);
  271         m->reference = TRUE;
  272         vm_page_unlock_queues();
  273 
  274         vm_map_verify_done(map, &version);
  275         PAGE_WAKEUP_DONE(m);
  276 
  277         UNLOCK_AND_DEALLOCATE;
  278 
  279 #undef  UNLOCK_AND_DEALLOCATE
  280 #undef  RELEASE_PAGE
  281         if (funnel_set)
  282                 thread_funnel_set( curflock, TRUE);
  283         return (KERN_SUCCESS);
  284 }
  285 

Cache object: 8a8a45fb4c99bc0f59792b43a4a83580


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.