The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/intel/read_fault.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1991,1990 Carnegie Mellon University
    4  * All Rights Reserved.
    5  * 
    6  * Permission to use, copy, modify and distribute this software and its
    7  * documentation is hereby granted, provided that both the copyright
    8  * notice and this permission notice appear in all copies of the
    9  * software, derivative works or modified versions, and any portions
   10  * thereof, and that both notices appear in supporting documentation.
   11  * 
   12  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   13  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   14  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   15  * 
   16  * Carnegie Mellon requests users of this software to return to
   17  * 
   18  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   19  *  School of Computer Science
   20  *  Carnegie Mellon University
   21  *  Pittsburgh PA 15213-3890
   22  * 
   23  * any improvements or extensions that they make and grant Carnegie Mellon
   24  * the rights to redistribute these changes.
   25  */
   26 /*
   27  * HISTORY
   28  * $Log:        read_fault.c,v $
   29  * Revision 2.2  91/06/06  16:57:48  jsb
   30  *      First checkin as intel/read_fault.c.
   31  * 
   32  * Revision 2.7  91/05/18  14:30:09  rpd
   33  *      Picked up software_reference_bits-related changes from vm_fault.
   34  *      [91/04/06            rpd]
   35  *      Added VM_FAULT_FICTITIOUS_SHORTAGE.
   36  *      [91/03/29            rpd]
   37  * 
   38  * Revision 2.6  91/05/14  16:15:46  mrt
   39  *      Correcting copyright
   40  * 
   41  * Revision 2.5  91/03/16  14:45:13  rpd
   42  *      Added resume, continuation arguments to vm_fault_page.
   43  *      Added continuation argument to VM_PAGE_WAIT.
   44  *      [91/02/05            rpd]
   45  * 
   46  * Revision 2.4  91/02/05  17:14:21  mrt
   47  *      Changed to new Mach copyright
   48  *      [91/02/01  17:37:39  mrt]
   49  * 
   50  * Revision 2.3  91/01/08  15:11:05  rpd
   51  *      Changed VM_WAIT to VM_PAGE_WAIT.
   52  *      [90/12/11            rpd]
   53  * 
   54  * Revision 2.2  90/05/03  15:37:20  dbg
   55  *      Created.
   56  *      [90/04/05            dbg]
   57  * 
   58  */
   59 
   60 #include <vm/vm_fault.h>
   61 #include <mach/kern_return.h>
   62 #include <vm/vm_map.h>
   63 #include <vm/vm_object.h>
   64 #include <vm/vm_page.h>
   65 #include <vm/pmap.h>
   66 
   67 #include <kern/macro_help.h>
   68 
   69 /*
   70  *      Expansion of vm_fault for read fault in kernel mode.
   71  *      Must enter the mapping as writable, since the i386
   72  *      (and i860 in i386 compatability mode) ignores write
   73  *      protection in kernel mode.
   74  */
   75 kern_return_t
   76 intel_read_fault(map, vaddr)
   77         vm_map_t        map;
   78         vm_offset_t     vaddr;
   79 {
   80         vm_map_version_t        version;        /* Map version for
   81                                                    verification */
   82         vm_object_t             object;         /* Top-level object */
   83         vm_offset_t             offset;         /* Top-level offset */
   84         vm_prot_t               prot;           /* Protection for mapping */
   85         vm_page_t               result_page;    /* Result of vm_fault_page */
   86         vm_page_t               top_page;       /* Placeholder page */
   87         boolean_t               wired;          /* Is map region wired? */
   88         boolean_t               su;
   89         kern_return_t           result;
   90         register vm_page_t      m;
   91 
   92     RetryFault:
   93 
   94         /*
   95          *      Find the backing store object and offset into it
   96          *      to begin search.
   97          */
   98         result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
   99                         &object, &offset, &prot, &wired, &su);
  100         if (result != KERN_SUCCESS)
  101             return (result);
  102 
  103         /*
  104          *      Make a reference to this object to prevent its
  105          *      disposal while we are playing with it.
  106          */
  107         assert(object->ref_count > 0);
  108         object->ref_count++;
  109         vm_object_paging_begin(object);
  110 
  111         result = vm_fault_page(object, offset, VM_PROT_READ, FALSE, TRUE,
  112                                &prot, &result_page, &top_page,
  113                                FALSE, (void (*)()) 0);
  114 
  115         if (result != VM_FAULT_SUCCESS) {
  116             vm_object_deallocate(object);
  117 
  118             switch (result) {
  119                 case VM_FAULT_RETRY:
  120                     goto RetryFault;
  121                 case VM_FAULT_INTERRUPTED:
  122                     return (KERN_SUCCESS);
  123                 case VM_FAULT_MEMORY_SHORTAGE:
  124                     VM_PAGE_WAIT((void (*)()) 0);
  125                     goto RetryFault;
  126                 case VM_FAULT_FICTITIOUS_SHORTAGE:
  127                     vm_page_more_fictitious();
  128                     goto RetryFault;
  129                 case VM_FAULT_MEMORY_ERROR:
  130                     return (KERN_MEMORY_ERROR);
  131             }
  132         }
  133 
  134         m = result_page;
  135 
  136         /*
  137          *      How to clean up the result of vm_fault_page.  This
  138          *      happens whether the mapping is entered or not.
  139          */
  140 
  141 #define UNLOCK_AND_DEALLOCATE                           \
  142         MACRO_BEGIN                                     \
  143         vm_fault_cleanup(m->object, top_page);          \
  144         vm_object_deallocate(object);                   \
  145         MACRO_END
  146 
  147         /*
  148          *      What to do with the resulting page from vm_fault_page
  149          *      if it doesn't get entered into the physical map:
  150          */
  151 
  152 #define RELEASE_PAGE(m)                                 \
  153         MACRO_BEGIN                                     \
  154         PAGE_WAKEUP_DONE(m);                            \
  155         vm_page_lock_queues();                          \
  156         if (!m->active && !m->inactive)                 \
  157                 vm_page_activate(m);                    \
  158         vm_page_unlock_queues();                        \
  159         MACRO_END
  160 
  161         /*
  162          *      We must verify that the maps have not changed.
  163          */
  164         vm_object_unlock(m->object);
  165         while (!vm_map_verify(map, &version)) {
  166             vm_object_t         retry_object;
  167             vm_offset_t         retry_offset;
  168             vm_prot_t           retry_prot;
  169 
  170             result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
  171                                 &retry_object, &retry_offset, &retry_prot,
  172                                 &wired, &su);
  173             if (result != KERN_SUCCESS) {
  174                 vm_object_lock(m->object);
  175                 RELEASE_PAGE(m);
  176                 UNLOCK_AND_DEALLOCATE;
  177                 return (result);
  178             }
  179 
  180             vm_object_unlock(retry_object);
  181 
  182             if (retry_object != object || retry_offset != offset) {
  183                 vm_object_lock(m->object);
  184                 RELEASE_PAGE(m);
  185                 UNLOCK_AND_DEALLOCATE;
  186                 goto RetryFault;
  187             }
  188         }
  189 
  190         /*
  191          *      Put the page in the physical map.
  192          */
  193         PMAP_ENTER(map->pmap, vaddr, m, VM_PROT_READ|VM_PROT_WRITE, wired);
  194 
  195         vm_object_lock(m->object);
  196         vm_page_lock_queues();
  197         if (!m->active && !m->inactive)
  198                 vm_page_activate(m);
  199         m->reference = TRUE;
  200         vm_page_unlock_queues();
  201 
  202         vm_map_verify_done(map, &version);
  203         PAGE_WAKEUP_DONE(m);
  204 
  205         UNLOCK_AND_DEALLOCATE;
  206 
  207 #undef  UNLOCK_AND_DEALLOCATE
  208 #undef  RELEASE_PAGE
  209 
  210         return (KERN_SUCCESS);
  211 }

Cache object: 362a01d4b862e962e584c1ee6ba442d3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.