1 /*
2 * Mach Operating System
3 * Copyright (c) 1993,1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: read_fault.c,v $
29 * Revision 2.3 93/11/17 16:55:40 dbg
30 * Removed 'su' argument from call to vm_map_lookup.
31 * [93/06/17 dbg]
32 *
33 * Revision 2.2 91/06/06 16:57:48 jsb
34 * First checkin as intel/read_fault.c.
35 *
36 * Revision 2.7 91/05/18 14:30:09 rpd
37 * Picked up software_reference_bits-related changes from vm_fault.
38 * [91/04/06 rpd]
39 * Added VM_FAULT_FICTITIOUS_SHORTAGE.
40 * [91/03/29 rpd]
41 *
42 * Revision 2.6 91/05/14 16:15:46 mrt
43 * Correcting copyright
44 *
45 * Revision 2.5 91/03/16 14:45:13 rpd
46 * Added resume, continuation arguments to vm_fault_page.
47 * Added continuation argument to VM_PAGE_WAIT.
48 * [91/02/05 rpd]
49 *
50 * Revision 2.4 91/02/05 17:14:21 mrt
51 * Changed to new Mach copyright
52 * [91/02/01 17:37:39 mrt]
53 *
54 * Revision 2.3 91/01/08 15:11:05 rpd
55 * Changed VM_WAIT to VM_PAGE_WAIT.
56 * [90/12/11 rpd]
57 *
58 * Revision 2.2 90/05/03 15:37:20 dbg
59 * Created.
60 * [90/04/05 dbg]
61 *
62 */
63
64 #include <vm/vm_fault.h>
65 #include <mach/kern_return.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/pmap.h>
70
71 #include <kern/macro_help.h>
72
73 /*
74 * Expansion of vm_fault for read fault in kernel mode.
75 * Must enter the mapping as writable, since the i386
76 * (and i860 in i386 compatibility mode) ignores write
77 * protection in kernel mode.
78 */
79 kern_return_t
80 intel_read_fault(
81 vm_map_t map,
82 vm_offset_t vaddr)
83 {
84 vm_map_version_t version; /* Map version for
85 verification */
86 vm_object_t object; /* Top-level object */
87 vm_offset_t offset; /* Top-level offset */
88 vm_prot_t prot; /* Protection for mapping */
89 vm_page_t result_page; /* Result of vm_fault_page */
90 vm_page_t top_page; /* Placeholder page */
91 boolean_t wired; /* Is map region wired? */
92 kern_return_t result;
93 register vm_page_t m;
94
95 RetryFault:
96
97 /*
98 * Find the backing store object and offset into it
99 * to begin search.
100 */
101 result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
102 &object, &offset, &prot, &wired);
103 if (result != KERN_SUCCESS)
104 return result;
105
106 /*
107 * Make a reference to this object to prevent its
108 * disposal while we are playing with it.
109 */
110 assert(object->ref_count > 0);
111 object->ref_count++;
112 vm_object_paging_begin(object);
113
114 result = vm_fault_page(object, offset, VM_PROT_READ, FALSE, TRUE,
115 &prot, &result_page, &top_page,
116 FALSE, CONTINUE_NULL);
117
118 if (result != VM_FAULT_SUCCESS) {
119 vm_object_deallocate(object);
120
121 switch (result) {
122 case VM_FAULT_RETRY:
123 goto RetryFault;
124 case VM_FAULT_INTERRUPTED:
125 return KERN_SUCCESS;
126 case VM_FAULT_MEMORY_SHORTAGE:
127 VM_PAGE_WAIT(CONTINUE_NULL);
128 goto RetryFault;
129 case VM_FAULT_FICTITIOUS_SHORTAGE:
130 vm_page_more_fictitious();
131 goto RetryFault;
132 case VM_FAULT_MEMORY_ERROR:
133 return KERN_MEMORY_ERROR;
134 }
135 }
136
137 m = result_page;
138
139 /*
140 * How to clean up the result of vm_fault_page. This
141 * happens whether the mapping is entered or not.
142 */
143
144 #define UNLOCK_AND_DEALLOCATE \
145 MACRO_BEGIN \
146 vm_fault_cleanup(m->object, top_page); \
147 vm_object_deallocate(object); \
148 MACRO_END
149
150 /*
151 * What to do with the resulting page from vm_fault_page
152 * if it doesn't get entered into the physical map:
153 */
154
155 #define RELEASE_PAGE(m) \
156 MACRO_BEGIN \
157 PAGE_WAKEUP_DONE(m); \
158 vm_page_lock_queues(); \
159 if (!m->active && !m->inactive) \
160 vm_page_activate(m); \
161 vm_page_unlock_queues(); \
162 MACRO_END
163
164 /*
165 * We must verify that the maps have not changed.
166 */
167 vm_object_unlock(m->object);
168 while (!vm_map_verify(map, &version)) {
169 vm_object_t retry_object;
170 vm_offset_t retry_offset;
171 vm_prot_t retry_prot;
172
173 result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
174 &retry_object, &retry_offset, &retry_prot,
175 &wired);
176 if (result != KERN_SUCCESS) {
177 vm_object_lock(m->object);
178 RELEASE_PAGE(m);
179 UNLOCK_AND_DEALLOCATE;
180 return result;
181 }
182
183 vm_object_unlock(retry_object);
184
185 if (retry_object != object || retry_offset != offset) {
186 vm_object_lock(m->object);
187 RELEASE_PAGE(m);
188 UNLOCK_AND_DEALLOCATE;
189 goto RetryFault;
190 }
191 }
192
193 /*
194 * Put the page in the physical map.
195 */
196 PMAP_ENTER(map->pmap, vaddr, m, VM_PROT_READ|VM_PROT_WRITE, wired);
197
198 vm_object_lock(m->object);
199 vm_page_lock_queues();
200 if (!m->active && !m->inactive)
201 vm_page_activate(m);
202 m->reference = TRUE;
203 vm_page_unlock_queues();
204
205 vm_map_verify_done(map, &version);
206 PAGE_WAKEUP_DONE(m);
207
208 UNLOCK_AND_DEALLOCATE;
209
210 #undef UNLOCK_AND_DEALLOCATE
211 #undef RELEASE_PAGE
212
213 return KERN_SUCCESS;
214 }
Cache object: 43ab89a6f918e6fd580a00474049d706
|