FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_fault.c
1 /* $NetBSD: uvm_fault.c,v 1.91.2.1 2005/08/24 18:43:38 riz Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
35 */
36
37 /*
38 * uvm_fault.c: fault handler
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.91.2.1 2005/08/24 18:43:38 riz Exp $");
43
44 #include "opt_uvmhist.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/mman.h>
52 #include <sys/user.h>
53 #include <sys/vnode.h>
54
55 #include <uvm/uvm.h>
56
57 /*
58 *
59 * a word on page faults:
60 *
61 * types of page faults we handle:
62 *
63 * CASE 1: upper layer faults CASE 2: lower layer faults
64 *
65 * CASE 1A CASE 1B CASE 2A CASE 2B
66 * read/write1 write>1 read/write +-cow_write/zero
67 * | | | |
68 * +--|--+ +--|--+ +-----+ + | + | +-----+
69 * amap | V | | ----------->new| | | | ^ |
70 * +-----+ +-----+ +-----+ + | + | +--|--+
71 * | | |
72 * +-----+ +-----+ +--|--+ | +--|--+
73 * uobj | d/c | | d/c | | V | +----| |
74 * +-----+ +-----+ +-----+ +-----+
75 *
76 * d/c = don't care
77 *
78 * case [0]: layerless fault
79 * no amap or uobj is present. this is an error.
80 *
81 * case [1]: upper layer fault [anon active]
82 * 1A: [read] or [write with anon->an_ref == 1]
83 * I/O takes place in top level anon and uobj is not touched.
84 * 1B: [write with anon->an_ref > 1]
85 * new anon is alloc'd and data is copied off ["COW"]
86 *
87 * case [2]: lower layer fault [uobj]
88 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
89 * I/O takes place directly in object.
90 * 2B: [write to copy_on_write] or [read on NULL uobj]
91 * data is "promoted" from uobj to a new anon.
92 * if uobj is null, then we zero fill.
93 *
94 * we follow the standard UVM locking protocol ordering:
95 *
96 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
97 * we hold a PG_BUSY page if we unlock for I/O
98 *
99 *
100 * the code is structured as follows:
101 *
102 * - init the "IN" params in the ufi structure
103 * ReFault:
104 * - do lookups [locks maps], check protection, handle needs_copy
105 * - check for case 0 fault (error)
106 * - establish "range" of fault
107 * - if we have an amap lock it and extract the anons
108 * - if sequential advice deactivate pages behind us
109 * - at the same time check pmap for unmapped areas and anon for pages
110 * that we could map in (and do map it if found)
111 * - check object for resident pages that we could map in
112 * - if (case 2) goto Case2
113 * - >>> handle case 1
114 * - ensure source anon is resident in RAM
115 * - if case 1B alloc new anon and copy from source
116 * - map the correct page in
117 * Case2:
118 * - >>> handle case 2
119 * - ensure source page is resident (if uobj)
120 * - if case 2B alloc new anon and copy from source (could be zero
121 * fill if uobj == NULL)
122 * - map the correct page in
123 * - done!
124 *
125 * note on paging:
126 * if we have to do I/O we place a PG_BUSY page in the correct object,
127 * unlock everything, and do the I/O. when I/O is done we must reverify
128 * the state of the world before assuming that our data structures are
129 * valid. [because mappings could change while the map is unlocked]
130 *
131 * alternative 1: unbusy the page in question and restart the page fault
132 * from the top (ReFault). this is easy but does not take advantage
133 * of the information that we already have from our previous lookup,
134 * although it is possible that the "hints" in the vm_map will help here.
135 *
136 * alternative 2: the system already keeps track of a "version" number of
137 * a map. [i.e. every time you write-lock a map (e.g. to change a
138 * mapping) you bump the version number up by one...] so, we can save
139 * the version number of the map before we release the lock and start I/O.
140 * then when I/O is done we can relock and check the version numbers
141 * to see if anything changed. this might save us some over 1 because
142 * we don't have to unbusy the page and may be less compares(?).
143 *
144 * alternative 3: put in backpointers or a way to "hold" part of a map
145 * in place while I/O is in progress. this could be complex to
146 * implement (especially with structures like amap that can be referenced
147 * by multiple map entries, and figuring out what should wait could be
148 * complex as well...).
149 *
150 * given that we are not currently multiprocessor or multithreaded we might
151 * as well choose alternative 2 now. maybe alternative 3 would be useful
152 * in the future. XXX keep in mind for future consideration//rechecking.
153 */
154
155 /*
156 * local data structures
157 */
158
159 struct uvm_advice {
160 int advice;
161 int nback;
162 int nforw;
163 };
164
165 /*
166 * page range array:
167 * note: index in array must match "advice" value
168 * XXX: borrowed numbers from freebsd. do they work well for us?
169 */
170
171 static struct uvm_advice uvmadvice[] = {
172 { MADV_NORMAL, 3, 4 },
173 { MADV_RANDOM, 0, 0 },
174 { MADV_SEQUENTIAL, 8, 7},
175 };
176
177 #define UVM_MAXRANGE 16 /* must be MAX() of nback+nforw+1 */
178
179 /*
180 * private prototypes
181 */
182
183 static void uvmfault_amapcopy(struct uvm_faultinfo *);
184 static __inline void uvmfault_anonflush(struct vm_anon **, int);
185
186 /*
187 * inline functions
188 */
189
190 /*
191 * uvmfault_anonflush: try and deactivate pages in specified anons
192 *
193 * => does not have to deactivate page if it is busy
194 */
195
196 static __inline void
197 uvmfault_anonflush(anons, n)
198 struct vm_anon **anons;
199 int n;
200 {
201 int lcv;
202 struct vm_page *pg;
203
204 for (lcv = 0 ; lcv < n ; lcv++) {
205 if (anons[lcv] == NULL)
206 continue;
207 simple_lock(&anons[lcv]->an_lock);
208 pg = anons[lcv]->u.an_page;
209 if (pg && (pg->flags & PG_BUSY) == 0 && pg->loan_count == 0) {
210 uvm_lock_pageq();
211 if (pg->wire_count == 0) {
212 pmap_clear_reference(pg);
213 uvm_pagedeactivate(pg);
214 }
215 uvm_unlock_pageq();
216 }
217 simple_unlock(&anons[lcv]->an_lock);
218 }
219 }
220
221 /*
222 * normal functions
223 */
224
225 /*
226 * uvmfault_amapcopy: clear "needs_copy" in a map.
227 *
228 * => called with VM data structures unlocked (usually, see below)
229 * => we get a write lock on the maps and clear needs_copy for a VA
230 * => if we are out of RAM we sleep (waiting for more)
231 */
232
233 static void
234 uvmfault_amapcopy(ufi)
235 struct uvm_faultinfo *ufi;
236 {
237 for (;;) {
238
239 /*
240 * no mapping? give up.
241 */
242
243 if (uvmfault_lookup(ufi, TRUE) == FALSE)
244 return;
245
246 /*
247 * copy if needed.
248 */
249
250 if (UVM_ET_ISNEEDSCOPY(ufi->entry))
251 amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE,
252 ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
253
254 /*
255 * didn't work? must be out of RAM. unlock and sleep.
256 */
257
258 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
259 uvmfault_unlockmaps(ufi, TRUE);
260 uvm_wait("fltamapcopy");
261 continue;
262 }
263
264 /*
265 * got it! unlock and return.
266 */
267
268 uvmfault_unlockmaps(ufi, TRUE);
269 return;
270 }
271 /*NOTREACHED*/
272 }
273
274 /*
275 * uvmfault_anonget: get data in an anon into a non-busy, non-released
276 * page in that anon.
277 *
278 * => maps, amap, and anon locked by caller.
279 * => if we fail (result != 0) we unlock everything.
280 * => if we are successful, we return with everything still locked.
281 * => we don't move the page on the queues [gets moved later]
282 * => if we allocate a new page [we_own], it gets put on the queues.
283 * either way, the result is that the page is on the queues at return time
284 * => for pages which are on loan from a uvm_object (and thus are not
285 * owned by the anon): if successful, we return with the owning object
286 * locked. the caller must unlock this object when it unlocks everything
287 * else.
288 */
289
290 int
291 uvmfault_anonget(ufi, amap, anon)
292 struct uvm_faultinfo *ufi;
293 struct vm_amap *amap;
294 struct vm_anon *anon;
295 {
296 boolean_t we_own; /* we own anon's page? */
297 boolean_t locked; /* did we relock? */
298 struct vm_page *pg;
299 int error;
300 UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist);
301
302 LOCK_ASSERT(simple_lock_held(&anon->an_lock));
303
304 error = 0;
305 uvmexp.fltanget++;
306 /* bump rusage counters */
307 if (anon->u.an_page)
308 curproc->p_stats->p_ru.ru_minflt++;
309 else
310 curproc->p_stats->p_ru.ru_majflt++;
311
312 /*
313 * loop until we get it, or fail.
314 */
315
316 for (;;) {
317 we_own = FALSE; /* TRUE if we set PG_BUSY on a page */
318 pg = anon->u.an_page;
319
320 /*
321 * if there is a resident page and it is loaned, then anon
322 * may not own it. call out to uvm_anon_lockpage() to ensure
323 * the real owner of the page has been identified and locked.
324 */
325
326 if (pg && pg->loan_count)
327 pg = uvm_anon_lockloanpg(anon);
328
329 /*
330 * page there? make sure it is not busy/released.
331 */
332
333 if (pg) {
334
335 /*
336 * at this point, if the page has a uobject [meaning
337 * we have it on loan], then that uobject is locked
338 * by us! if the page is busy, we drop all the
339 * locks (including uobject) and try again.
340 */
341
342 if ((pg->flags & PG_BUSY) == 0) {
343 UVMHIST_LOG(maphist, "<- OK",0,0,0,0);
344 return (0);
345 }
346 pg->flags |= PG_WANTED;
347 uvmexp.fltpgwait++;
348
349 /*
350 * the last unlock must be an atomic unlock+wait on
351 * the owner of page
352 */
353
354 if (pg->uobject) { /* owner is uobject ? */
355 uvmfault_unlockall(ufi, amap, NULL, anon);
356 UVMHIST_LOG(maphist, " unlock+wait on uobj",0,
357 0,0,0);
358 UVM_UNLOCK_AND_WAIT(pg,
359 &pg->uobject->vmobjlock,
360 FALSE, "anonget1",0);
361 } else {
362 /* anon owns page */
363 uvmfault_unlockall(ufi, amap, NULL, NULL);
364 UVMHIST_LOG(maphist, " unlock+wait on anon",0,
365 0,0,0);
366 UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0,
367 "anonget2",0);
368 }
369 } else {
370
371 /*
372 * no page, we must try and bring it in.
373 */
374
375 pg = uvm_pagealloc(NULL, 0, anon, 0);
376 if (pg == NULL) { /* out of RAM. */
377 uvmfault_unlockall(ufi, amap, NULL, anon);
378 uvmexp.fltnoram++;
379 UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0,
380 0,0,0);
381 uvm_wait("flt_noram1");
382 } else {
383 /* we set the PG_BUSY bit */
384 we_own = TRUE;
385 uvmfault_unlockall(ufi, amap, NULL, anon);
386
387 /*
388 * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
389 * page into the uvm_swap_get function with
390 * all data structures unlocked. note that
391 * it is ok to read an_swslot here because
392 * we hold PG_BUSY on the page.
393 */
394 uvmexp.pageins++;
395 error = uvm_swap_get(pg, anon->an_swslot,
396 PGO_SYNCIO);
397
398 /*
399 * we clean up after the i/o below in the
400 * "we_own" case
401 */
402 }
403 }
404
405 /*
406 * now relock and try again
407 */
408
409 locked = uvmfault_relock(ufi);
410 if (locked && amap != NULL) {
411 amap_lock(amap);
412 }
413 if (locked || we_own)
414 simple_lock(&anon->an_lock);
415
416 /*
417 * if we own the page (i.e. we set PG_BUSY), then we need
418 * to clean up after the I/O. there are three cases to
419 * consider:
420 * [1] page released during I/O: free anon and ReFault.
421 * [2] I/O not OK. free the page and cause the fault
422 * to fail.
423 * [3] I/O OK! activate the page and sync with the
424 * non-we_own case (i.e. drop anon lock if not locked).
425 */
426
427 if (we_own) {
428 if (pg->flags & PG_WANTED) {
429 wakeup(pg);
430 }
431 if (error) {
432
433 /*
434 * remove the swap slot from the anon
435 * and mark the anon as having no real slot.
436 * don't free the swap slot, thus preventing
437 * it from being used again.
438 */
439
440 if (anon->an_swslot > 0)
441 uvm_swap_markbad(anon->an_swslot, 1);
442 anon->an_swslot = SWSLOT_BAD;
443
444 if ((pg->flags & PG_RELEASED) != 0)
445 goto released;
446
447 /*
448 * note: page was never !PG_BUSY, so it
449 * can't be mapped and thus no need to
450 * pmap_page_protect it...
451 */
452
453 uvm_lock_pageq();
454 uvm_pagefree(pg);
455 uvm_unlock_pageq();
456
457 if (locked)
458 uvmfault_unlockall(ufi, amap, NULL,
459 anon);
460 else
461 simple_unlock(&anon->an_lock);
462 UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
463 return error;
464 }
465
466 if ((pg->flags & PG_RELEASED) != 0) {
467 released:
468 KASSERT(anon->an_ref == 0);
469
470 /*
471 * released while we unlocked amap.
472 */
473
474 if (locked)
475 uvmfault_unlockall(ufi, amap, NULL,
476 NULL);
477
478 uvm_anon_release(anon);
479
480 if (error) {
481 UVMHIST_LOG(maphist,
482 "<- ERROR/RELEASED", 0,0,0,0);
483 return error;
484 }
485
486 UVMHIST_LOG(maphist, "<- RELEASED", 0,0,0,0);
487 return ERESTART;
488 }
489
490 /*
491 * we've successfully read the page, activate it.
492 */
493
494 uvm_lock_pageq();
495 uvm_pageactivate(pg);
496 uvm_unlock_pageq();
497 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
498 UVM_PAGE_OWN(pg, NULL);
499 if (!locked)
500 simple_unlock(&anon->an_lock);
501 }
502
503 /*
504 * we were not able to relock. restart fault.
505 */
506
507 if (!locked) {
508 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
509 return (ERESTART);
510 }
511
512 /*
513 * verify no one has touched the amap and moved the anon on us.
514 */
515
516 if (ufi != NULL &&
517 amap_lookup(&ufi->entry->aref,
518 ufi->orig_rvaddr - ufi->entry->start) != anon) {
519
520 uvmfault_unlockall(ufi, amap, NULL, anon);
521 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
522 return (ERESTART);
523 }
524
525 /*
526 * try it again!
527 */
528
529 uvmexp.fltanretry++;
530 continue;
531 }
532 /*NOTREACHED*/
533 }
534
535 /*
536 * F A U L T - m a i n e n t r y p o i n t
537 */
538
539 /*
540 * uvm_fault: page fault handler
541 *
542 * => called from MD code to resolve a page fault
543 * => VM data structures usually should be unlocked. however, it is
544 * possible to call here with the main map locked if the caller
545 * gets a write lock, sets it recusive, and then calls us (c.f.
546 * uvm_map_pageable). this should be avoided because it keeps
547 * the map locked off during I/O.
548 * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
549 */
550
551 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
552 ~VM_PROT_WRITE : VM_PROT_ALL)
553
554 int
555 uvm_fault(orig_map, vaddr, fault_type, access_type)
556 struct vm_map *orig_map;
557 vaddr_t vaddr;
558 vm_fault_t fault_type;
559 vm_prot_t access_type;
560 {
561 struct uvm_faultinfo ufi;
562 vm_prot_t enter_prot, check_prot;
563 boolean_t wired, narrow, promote, locked, shadowed, wire_fault, cow_now;
564 int npages, nback, nforw, centeridx, error, lcv, gotpages;
565 vaddr_t startva, currva;
566 voff_t uoff;
567 struct vm_amap *amap;
568 struct uvm_object *uobj;
569 struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon;
570 struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage;
571 UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist);
572
573 UVMHIST_LOG(maphist, "(map=0x%x, vaddr=0x%x, ft=%d, at=%d)",
574 orig_map, vaddr, fault_type, access_type);
575
576 anon = NULL;
577 pg = NULL;
578
579 uvmexp.faults++; /* XXX: locking? */
580
581 /*
582 * init the IN parameters in the ufi
583 */
584
585 ufi.orig_map = orig_map;
586 ufi.orig_rvaddr = trunc_page(vaddr);
587 ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
588 wire_fault = fault_type == VM_FAULT_WIRE ||
589 fault_type == VM_FAULT_WIREMAX;
590 if (wire_fault)
591 narrow = TRUE; /* don't look for neighborhood
592 * pages on wire */
593 else
594 narrow = FALSE; /* normal fault */
595
596 /*
597 * "goto ReFault" means restart the page fault from ground zero.
598 */
599 ReFault:
600
601 /*
602 * lookup and lock the maps
603 */
604
605 if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
606 UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", vaddr, 0,0,0);
607 return (EFAULT);
608 }
609 /* locked: maps(read) */
610
611 #ifdef DIAGNOSTIC
612 if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) {
613 printf("Page fault on non-pageable map:\n");
614 printf("ufi.map = %p\n", ufi.map);
615 printf("ufi.orig_map = %p\n", ufi.orig_map);
616 printf("ufi.orig_rvaddr = 0x%lx\n", (u_long) ufi.orig_rvaddr);
617 panic("uvm_fault: (ufi.map->flags & VM_MAP_PAGEABLE) == 0");
618 }
619 #endif
620
621 /*
622 * check protection
623 */
624
625 check_prot = fault_type == VM_FAULT_WIREMAX ?
626 ufi.entry->max_protection : ufi.entry->protection;
627 if ((check_prot & access_type) != access_type) {
628 UVMHIST_LOG(maphist,
629 "<- protection failure (prot=0x%x, access=0x%x)",
630 ufi.entry->protection, access_type, 0, 0);
631 uvmfault_unlockmaps(&ufi, FALSE);
632 return EACCES;
633 }
634
635 /*
636 * "enter_prot" is the protection we want to enter the page in at.
637 * for certain pages (e.g. copy-on-write pages) this protection can
638 * be more strict than ufi.entry->protection. "wired" means either
639 * the entry is wired or we are fault-wiring the pg.
640 */
641
642 enter_prot = ufi.entry->protection;
643 wired = VM_MAPENT_ISWIRED(ufi.entry) || wire_fault;
644 if (wired) {
645 access_type = enter_prot; /* full access for wired */
646 cow_now = (check_prot & VM_PROT_WRITE) != 0;
647 } else {
648 cow_now = (access_type & VM_PROT_WRITE) != 0;
649 }
650
651 /*
652 * handle "needs_copy" case. if we need to copy the amap we will
653 * have to drop our readlock and relock it with a write lock. (we
654 * need a write lock to change anything in a map entry [e.g.
655 * needs_copy]).
656 */
657
658 if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
659 KASSERT(fault_type != VM_FAULT_WIREMAX);
660 if (cow_now || (ufi.entry->object.uvm_obj == NULL)) {
661 /* need to clear */
662 UVMHIST_LOG(maphist,
663 " need to clear needs_copy and refault",0,0,0,0);
664 uvmfault_unlockmaps(&ufi, FALSE);
665 uvmfault_amapcopy(&ufi);
666 uvmexp.fltamcopy++;
667 goto ReFault;
668
669 } else {
670
671 /*
672 * ensure that we pmap_enter page R/O since
673 * needs_copy is still true
674 */
675
676 enter_prot &= ~VM_PROT_WRITE;
677 }
678 }
679
680 /*
681 * identify the players
682 */
683
684 amap = ufi.entry->aref.ar_amap; /* top layer */
685 uobj = ufi.entry->object.uvm_obj; /* bottom layer */
686
687 /*
688 * check for a case 0 fault. if nothing backing the entry then
689 * error now.
690 */
691
692 if (amap == NULL && uobj == NULL) {
693 uvmfault_unlockmaps(&ufi, FALSE);
694 UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
695 return (EFAULT);
696 }
697
698 /*
699 * establish range of interest based on advice from mapper
700 * and then clip to fit map entry. note that we only want
701 * to do this the first time through the fault. if we
702 * ReFault we will disable this by setting "narrow" to true.
703 */
704
705 if (narrow == FALSE) {
706
707 /* wide fault (!narrow) */
708 KASSERT(uvmadvice[ufi.entry->advice].advice ==
709 ufi.entry->advice);
710 nback = MIN(uvmadvice[ufi.entry->advice].nback,
711 (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
712 startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
713 nforw = MIN(uvmadvice[ufi.entry->advice].nforw,
714 ((ufi.entry->end - ufi.orig_rvaddr) >>
715 PAGE_SHIFT) - 1);
716 /*
717 * note: "-1" because we don't want to count the
718 * faulting page as forw
719 */
720 npages = nback + nforw + 1;
721 centeridx = nback;
722
723 narrow = TRUE; /* ensure only once per-fault */
724
725 } else {
726
727 /* narrow fault! */
728 nback = nforw = 0;
729 startva = ufi.orig_rvaddr;
730 npages = 1;
731 centeridx = 0;
732
733 }
734
735 /* locked: maps(read) */
736 UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=0x%x",
737 narrow, nback, nforw, startva);
738 UVMHIST_LOG(maphist, " entry=0x%x, amap=0x%x, obj=0x%x", ufi.entry,
739 amap, uobj, 0);
740
741 /*
742 * if we've got an amap, lock it and extract current anons.
743 */
744
745 if (amap) {
746 amap_lock(amap);
747 anons = anons_store;
748 amap_lookups(&ufi.entry->aref, startva - ufi.entry->start,
749 anons, npages);
750 } else {
751 anons = NULL; /* to be safe */
752 }
753
754 /* locked: maps(read), amap(if there) */
755
756 /*
757 * for MADV_SEQUENTIAL mappings we want to deactivate the back pages
758 * now and then forget about them (for the rest of the fault).
759 */
760
761 if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) {
762
763 UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages",
764 0,0,0,0);
765 /* flush back-page anons? */
766 if (amap)
767 uvmfault_anonflush(anons, nback);
768
769 /* flush object? */
770 if (uobj) {
771 uoff = (startva - ufi.entry->start) + ufi.entry->offset;
772 simple_lock(&uobj->vmobjlock);
773 (void) (uobj->pgops->pgo_put)(uobj, uoff, uoff +
774 (nback << PAGE_SHIFT), PGO_DEACTIVATE);
775 }
776
777 /* now forget about the backpages */
778 if (amap)
779 anons += nback;
780 startva += (nback << PAGE_SHIFT);
781 npages -= nback;
782 nback = centeridx = 0;
783 }
784
785 /* locked: maps(read), amap(if there) */
786
787 /*
788 * map in the backpages and frontpages we found in the amap in hopes
789 * of preventing future faults. we also init the pages[] array as
790 * we go.
791 */
792
793 currva = startva;
794 shadowed = FALSE;
795 for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) {
796
797 /*
798 * dont play with VAs that are already mapped
799 * except for center)
800 */
801 if (lcv != centeridx &&
802 pmap_extract(ufi.orig_map->pmap, currva, NULL)) {
803 pages[lcv] = PGO_DONTCARE;
804 continue;
805 }
806
807 /*
808 * unmapped or center page. check if any anon at this level.
809 */
810 if (amap == NULL || anons[lcv] == NULL) {
811 pages[lcv] = NULL;
812 continue;
813 }
814
815 /*
816 * check for present page and map if possible. re-activate it.
817 */
818
819 pages[lcv] = PGO_DONTCARE;
820 if (lcv == centeridx) { /* save center for later! */
821 shadowed = TRUE;
822 continue;
823 }
824 anon = anons[lcv];
825 simple_lock(&anon->an_lock);
826 /* ignore loaned pages */
827 if (anon->u.an_page && anon->u.an_page->loan_count == 0 &&
828 (anon->u.an_page->flags & PG_BUSY) == 0) {
829 uvm_lock_pageq();
830 uvm_pageactivate(anon->u.an_page);
831 uvm_unlock_pageq();
832 UVMHIST_LOG(maphist,
833 " MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x",
834 ufi.orig_map->pmap, currva, anon->u.an_page, 0);
835 uvmexp.fltnamap++;
836
837 /*
838 * Since this isn't the page that's actually faulting,
839 * ignore pmap_enter() failures; it's not critical
840 * that we enter these right now.
841 */
842
843 (void) pmap_enter(ufi.orig_map->pmap, currva,
844 VM_PAGE_TO_PHYS(anon->u.an_page),
845 (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
846 enter_prot,
847 PMAP_CANFAIL |
848 (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0));
849 }
850 simple_unlock(&anon->an_lock);
851 pmap_update(ufi.orig_map->pmap);
852 }
853
854 /* locked: maps(read), amap(if there) */
855 /* (shadowed == TRUE) if there is an anon at the faulting address */
856 UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed,
857 (uobj && shadowed == FALSE),0,0);
858
859 /*
860 * note that if we are really short of RAM we could sleep in the above
861 * call to pmap_enter with everything locked. bad?
862 *
863 * XXX Actually, that is bad; pmap_enter() should just fail in that
864 * XXX case. --thorpej
865 */
866
867 /*
868 * if the desired page is not shadowed by the amap and we have a
869 * backing object, then we check to see if the backing object would
870 * prefer to handle the fault itself (rather than letting us do it
871 * with the usual pgo_get hook). the backing object signals this by
872 * providing a pgo_fault routine.
873 */
874
875 if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) {
876 simple_lock(&uobj->vmobjlock);
877
878 /* locked: maps(read), amap (if there), uobj */
879 error = uobj->pgops->pgo_fault(&ufi, startva, pages, npages,
880 centeridx, fault_type, access_type, PGO_LOCKED|PGO_SYNCIO);
881
882 /* locked: nothing, pgo_fault has unlocked everything */
883
884 if (error == ERESTART)
885 goto ReFault; /* try again! */
886 /*
887 * object fault routine responsible for pmap_update().
888 */
889 return error;
890 }
891
892 /*
893 * now, if the desired page is not shadowed by the amap and we have
894 * a backing object that does not have a special fault routine, then
895 * we ask (with pgo_get) the object for resident pages that we care
896 * about and attempt to map them in. we do not let pgo_get block
897 * (PGO_LOCKED).
898 */
899
900 if (uobj && shadowed == FALSE) {
901 simple_lock(&uobj->vmobjlock);
902
903 /* locked (!shadowed): maps(read), amap (if there), uobj */
904 /*
905 * the following call to pgo_get does _not_ change locking state
906 */
907
908 uvmexp.fltlget++;
909 gotpages = npages;
910 (void) uobj->pgops->pgo_get(uobj, ufi.entry->offset +
911 (startva - ufi.entry->start),
912 pages, &gotpages, centeridx,
913 access_type & MASK(ufi.entry),
914 ufi.entry->advice, PGO_LOCKED);
915
916 /*
917 * check for pages to map, if we got any
918 */
919
920 uobjpage = NULL;
921
922 if (gotpages) {
923 currva = startva;
924 for (lcv = 0; lcv < npages;
925 lcv++, currva += PAGE_SIZE) {
926 struct vm_page *curpg;
927 boolean_t readonly;
928
929 curpg = pages[lcv];
930 if (curpg == NULL || curpg == PGO_DONTCARE) {
931 continue;
932 }
933
934 /*
935 * if center page is resident and not
936 * PG_BUSY|PG_RELEASED then pgo_get
937 * made it PG_BUSY for us and gave
938 * us a handle to it. remember this
939 * page as "uobjpage." (for later use).
940 */
941
942 if (lcv == centeridx) {
943 uobjpage = curpg;
944 UVMHIST_LOG(maphist, " got uobjpage "
945 "(0x%x) with locked get",
946 uobjpage, 0,0,0);
947 continue;
948 }
949
950 /*
951 * calling pgo_get with PGO_LOCKED returns us
952 * pages which are neither busy nor released,
953 * so we don't need to check for this.
954 * we can just directly enter the pages.
955 */
956
957 uvm_lock_pageq();
958 uvm_pageactivate(curpg);
959 uvm_unlock_pageq();
960 UVMHIST_LOG(maphist,
961 " MAPPING: n obj: pm=0x%x, va=0x%x, pg=0x%x",
962 ufi.orig_map->pmap, currva, curpg, 0);
963 uvmexp.fltnomap++;
964
965 /*
966 * Since this page isn't the page that's
967 * actually faulting, ignore pmap_enter()
968 * failures; it's not critical that we
969 * enter these right now.
970 */
971 KASSERT((curpg->flags & PG_PAGEOUT) == 0);
972 KASSERT((curpg->flags & PG_RELEASED) == 0);
973 KASSERT(!UVM_OBJ_IS_CLEAN(curpg->uobject) ||
974 (curpg->flags & PG_CLEAN) != 0);
975 readonly = (curpg->flags & PG_RDONLY)
976 || (curpg->loan_count > 0)
977 || UVM_OBJ_NEEDS_WRITEFAULT(curpg->uobject);
978
979 (void) pmap_enter(ufi.orig_map->pmap, currva,
980 VM_PAGE_TO_PHYS(curpg),
981 readonly ?
982 enter_prot & ~VM_PROT_WRITE :
983 enter_prot & MASK(ufi.entry),
984 PMAP_CANFAIL |
985 (wired ? PMAP_WIRED : 0));
986
987 /*
988 * NOTE: page can't be PG_WANTED or PG_RELEASED
989 * because we've held the lock the whole time
990 * we've had the handle.
991 */
992
993 curpg->flags &= ~(PG_BUSY);
994 UVM_PAGE_OWN(curpg, NULL);
995 }
996 pmap_update(ufi.orig_map->pmap);
997 }
998 } else {
999 uobjpage = NULL;
1000 }
1001
1002 /* locked (shadowed): maps(read), amap */
1003 /* locked (!shadowed): maps(read), amap(if there),
1004 uobj(if !null), uobjpage(if !null) */
1005
1006 /*
1007 * note that at this point we are done with any front or back pages.
1008 * we are now going to focus on the center page (i.e. the one we've
1009 * faulted on). if we have faulted on the top (anon) layer
1010 * [i.e. case 1], then the anon we want is anons[centeridx] (we have
1011 * not touched it yet). if we have faulted on the bottom (uobj)
1012 * layer [i.e. case 2] and the page was both present and available,
1013 * then we've got a pointer to it as "uobjpage" and we've already
1014 * made it BUSY.
1015 */
1016
1017 /*
1018 * there are four possible cases we must address: 1A, 1B, 2A, and 2B
1019 */
1020
1021 /*
1022 * redirect case 2: if we are not shadowed, go to case 2.
1023 */
1024
1025 if (shadowed == FALSE)
1026 goto Case2;
1027
1028 /* locked: maps(read), amap */
1029
1030 /*
1031 * handle case 1: fault on an anon in our amap
1032 */
1033
1034 anon = anons[centeridx];
1035 UVMHIST_LOG(maphist, " case 1 fault: anon=0x%x", anon, 0,0,0);
1036 simple_lock(&anon->an_lock);
1037
1038 /* locked: maps(read), amap, anon */
1039
1040 /*
1041 * no matter if we have case 1A or case 1B we are going to need to
1042 * have the anon's memory resident. ensure that now.
1043 */
1044
1045 /*
1046 * let uvmfault_anonget do the dirty work.
1047 * if it fails (!OK) it will unlock everything for us.
1048 * if it succeeds, locks are still valid and locked.
1049 * also, if it is OK, then the anon's page is on the queues.
1050 * if the page is on loan from a uvm_object, then anonget will
1051 * lock that object for us if it does not fail.
1052 */
1053
1054 error = uvmfault_anonget(&ufi, amap, anon);
1055 switch (error) {
1056 case 0:
1057 break;
1058
1059 case ERESTART:
1060 goto ReFault;
1061
1062 case EAGAIN:
1063 tsleep(&lbolt, PVM, "fltagain1", 0);
1064 goto ReFault;
1065
1066 default:
1067 return error;
1068 }
1069
1070 /*
1071 * uobj is non null if the page is on loan from an object (i.e. uobj)
1072 */
1073
1074 uobj = anon->u.an_page->uobject; /* locked by anonget if !NULL */
1075
1076 /* locked: maps(read), amap, anon, uobj(if one) */
1077
1078 /*
1079 * special handling for loaned pages
1080 */
1081
1082 if (anon->u.an_page->loan_count) {
1083
1084 if (!cow_now) {
1085
1086 /*
1087 * for read faults on loaned pages we just cap the
1088 * protection at read-only.
1089 */
1090
1091 enter_prot = enter_prot & ~VM_PROT_WRITE;
1092
1093 } else {
1094 /*
1095 * note that we can't allow writes into a loaned page!
1096 *
1097 * if we have a write fault on a loaned page in an
1098 * anon then we need to look at the anon's ref count.
1099 * if it is greater than one then we are going to do
1100 * a normal copy-on-write fault into a new anon (this
1101 * is not a problem). however, if the reference count
1102 * is one (a case where we would normally allow a
1103 * write directly to the page) then we need to kill
1104 * the loan before we continue.
1105 */
1106
1107 /* >1 case is already ok */
1108 if (anon->an_ref == 1) {
1109
1110 /* get new un-owned replacement page */
1111 pg = uvm_pagealloc(NULL, 0, NULL, 0);
1112 if (pg == NULL) {
1113 uvmfault_unlockall(&ufi, amap, uobj,
1114 anon);
1115 uvm_wait("flt_noram2");
1116 goto ReFault;
1117 }
1118
1119 /*
1120 * copy data, kill loan, and drop uobj lock
1121 * (if any)
1122 */
1123 /* copy old -> new */
1124 uvm_pagecopy(anon->u.an_page, pg);
1125
1126 /* force reload */
1127 pmap_page_protect(anon->u.an_page,
1128 VM_PROT_NONE);
1129 uvm_lock_pageq(); /* KILL loan */
1130
1131 anon->u.an_page->uanon = NULL;
1132 /* in case we owned */
1133 anon->u.an_page->pqflags &= ~PQ_ANON;
1134
1135 if (uobj) {
1136 /* if we were receiver of loan */
1137 anon->u.an_page->loan_count--;
1138 } else {
1139 /*
1140 * we were the lender (A->K); need
1141 * to remove the page from pageq's.
1142 */
1143 uvm_pagedequeue(anon->u.an_page);
1144 }
1145
1146 uvm_pageactivate(pg);
1147 uvm_unlock_pageq();
1148 if (uobj) {
1149 simple_unlock(&uobj->vmobjlock);
1150 uobj = NULL;
1151 }
1152
1153 /* install new page in anon */
1154 anon->u.an_page = pg;
1155 pg->uanon = anon;
1156 pg->pqflags |= PQ_ANON;
1157 pg->flags &= ~(PG_BUSY|PG_FAKE);
1158 UVM_PAGE_OWN(pg, NULL);
1159
1160 /* done! */
1161 } /* ref == 1 */
1162 } /* write fault */
1163 } /* loan count */
1164
1165 /*
1166 * if we are case 1B then we will need to allocate a new blank
1167 * anon to transfer the data into. note that we have a lock
1168 * on anon, so no one can busy or release the page until we are done.
1169 * also note that the ref count can't drop to zero here because
1170 * it is > 1 and we are only dropping one ref.
1171 *
1172 * in the (hopefully very rare) case that we are out of RAM we
1173 * will unlock, wait for more RAM, and refault.
1174 *
1175 * if we are out of anon VM we kill the process (XXX: could wait?).
1176 */
1177
1178 if (cow_now && anon->an_ref > 1) {
1179
1180 UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
1181 uvmexp.flt_acow++;
1182 oanon = anon; /* oanon = old, locked anon */
1183 anon = uvm_analloc();
1184 if (anon) {
1185 /* new anon is locked! */
1186 pg = uvm_pagealloc(NULL, 0, anon, 0);
1187 }
1188
1189 /* check for out of RAM */
1190 if (anon == NULL || pg == NULL) {
1191 if (anon) {
1192 anon->an_ref--;
1193 simple_unlock(&anon->an_lock);
1194 uvm_anfree(anon);
1195 }
1196 uvmfault_unlockall(&ufi, amap, uobj, oanon);
1197 if (anon == NULL || uvm_swapisfull()) {
1198 UVMHIST_LOG(maphist,
1199 "<- failed. out of VM",0,0,0,0);
1200 uvmexp.fltnoanon++;
1201 return ENOMEM;
1202 }
1203
1204 uvmexp.fltnoram++;
1205 uvm_wait("flt_noram3"); /* out of RAM, wait for more */
1206 goto ReFault;
1207 }
1208
1209 /* got all resources, replace anon with nanon */
1210 uvm_pagecopy(oanon->u.an_page, pg);
1211 uvm_lock_pageq();
1212 uvm_pageactivate(pg);
1213 pg->flags &= ~(PG_BUSY|PG_FAKE);
1214 uvm_unlock_pageq();
1215 UVM_PAGE_OWN(pg, NULL);
1216 amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
1217 anon, TRUE);
1218
1219 /* deref: can not drop to zero here by defn! */
1220 oanon->an_ref--;
1221
1222 /*
1223 * note: oanon is still locked, as is the new anon. we
1224 * need to check for this later when we unlock oanon; if
1225 * oanon != anon, we'll have to unlock anon, too.
1226 */
1227
1228 } else {
1229
1230 uvmexp.flt_anon++;
1231 oanon = anon; /* old, locked anon is same as anon */
1232 pg = anon->u.an_page;
1233 if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
1234 enter_prot = enter_prot & ~VM_PROT_WRITE;
1235
1236 }
1237
1238 /* locked: maps(read), amap, oanon, anon (if different from oanon) */
1239
1240 /*
1241 * now map the page in.
1242 */
1243
1244 UVMHIST_LOG(maphist, " MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x",
1245 ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0);
1246 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
1247 enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
1248 != 0) {
1249
1250 /*
1251 * No need to undo what we did; we can simply think of
1252 * this as the pmap throwing away the mapping information.
1253 *
1254 * We do, however, have to go through the ReFault path,
1255 * as the map may change while we're asleep.
1256 */
1257
1258 if (anon != oanon)
1259 simple_unlock(&anon->an_lock);
1260 uvmfault_unlockall(&ufi, amap, uobj, oanon);
1261 if (uvm_swapisfull()) {
1262 UVMHIST_LOG(maphist,
1263 "<- failed. out of VM",0,0,0,0);
1264 /* XXX instrumentation */
1265 return ENOMEM;
1266 }
1267 /* XXX instrumentation */
1268 uvm_wait("flt_pmfail1");
1269 goto ReFault;
1270 }
1271
1272 /*
1273 * ... update the page queues.
1274 */
1275
1276 uvm_lock_pageq();
1277 if (wire_fault) {
1278 uvm_pagewire(pg);
1279
1280 /*
1281 * since the now-wired page cannot be paged out,
1282 * release its swap resources for others to use.
1283 * since an anon with no swap cannot be PG_CLEAN,
1284 * clear its clean flag now.
1285 */
1286
1287 pg->flags &= ~(PG_CLEAN);
1288 uvm_anon_dropswap(anon);
1289 } else {
1290 uvm_pageactivate(pg);
1291 }
1292 uvm_unlock_pageq();
1293
1294 /*
1295 * done case 1! finish up by unlocking everything and returning success
1296 */
1297
1298 if (anon != oanon)
1299 simple_unlock(&anon->an_lock);
1300 uvmfault_unlockall(&ufi, amap, uobj, oanon);
1301 pmap_update(ufi.orig_map->pmap);
1302 return 0;
1303
1304 Case2:
1305 /*
1306 * handle case 2: faulting on backing object or zero fill
1307 */
1308
1309 /*
1310 * locked:
1311 * maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
1312 */
1313
1314 /*
1315 * note that uobjpage can not be PGO_DONTCARE at this point. we now
1316 * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
1317 * have a backing object, check and see if we are going to promote
1318 * the data up to an anon during the fault.
1319 */
1320
1321 if (uobj == NULL) {
1322 uobjpage = PGO_DONTCARE;
1323 promote = TRUE; /* always need anon here */
1324 } else {
1325 KASSERT(uobjpage != PGO_DONTCARE);
1326 promote = cow_now && UVM_ET_ISCOPYONWRITE(ufi.entry);
1327 }
1328 UVMHIST_LOG(maphist, " case 2 fault: promote=%d, zfill=%d",
1329 promote, (uobj == NULL), 0,0);
1330
1331 /*
1332 * if uobjpage is not null then we do not need to do I/O to get the
1333 * uobjpage.
1334 *
1335 * if uobjpage is null, then we need to unlock and ask the pager to
1336 * get the data for us. once we have the data, we need to reverify
1337 * the state the world. we are currently not holding any resources.
1338 */
1339
1340 if (uobjpage) {
1341 /* update rusage counters */
1342 curproc->p_stats->p_ru.ru_minflt++;
1343 } else {
1344 /* update rusage counters */
1345 curproc->p_stats->p_ru.ru_majflt++;
1346
1347 /* locked: maps(read), amap(if there), uobj */
1348 uvmfault_unlockall(&ufi, amap, NULL, NULL);
1349 /* locked: uobj */
1350
1351 uvmexp.fltget++;
1352 gotpages = 1;
1353 uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset;
1354 error = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages,
1355 0, access_type & MASK(ufi.entry), ufi.entry->advice,
1356 PGO_SYNCIO);
1357 /* locked: uobjpage(if no error) */
1358
1359 /*
1360 * recover from I/O
1361 */
1362
1363 if (error) {
1364 if (error == EAGAIN) {
1365 UVMHIST_LOG(maphist,
1366 " pgo_get says TRY AGAIN!",0,0,0,0);
1367 tsleep(&lbolt, PVM, "fltagain2", 0);
1368 goto ReFault;
1369 }
1370
1371 UVMHIST_LOG(maphist, "<- pgo_get failed (code %d)",
1372 error, 0,0,0);
1373 return error;
1374 }
1375
1376 /* locked: uobjpage */
1377
1378 uvm_lock_pageq();
1379 uvm_pageactivate(uobjpage);
1380 uvm_unlock_pageq();
1381
1382 /*
1383 * re-verify the state of the world by first trying to relock
1384 * the maps. always relock the object.
1385 */
1386
1387 locked = uvmfault_relock(&ufi);
1388 if (locked && amap)
1389 amap_lock(amap);
1390 simple_lock(&uobj->vmobjlock);
1391
1392 /* locked(locked): maps(read), amap(if !null), uobj, uobjpage */
1393 /* locked(!locked): uobj, uobjpage */
1394
1395 /*
1396 * verify that the page has not be released and re-verify
1397 * that amap slot is still free. if there is a problem,
1398 * we unlock and clean up.
1399 */
1400
1401 if ((uobjpage->flags & PG_RELEASED) != 0 ||
1402 (locked && amap &&
1403 amap_lookup(&ufi.entry->aref,
1404 ufi.orig_rvaddr - ufi.entry->start))) {
1405 if (locked)
1406 uvmfault_unlockall(&ufi, amap, NULL, NULL);
1407 locked = FALSE;
1408 }
1409
1410 /*
1411 * didn't get the lock? release the page and retry.
1412 */
1413
1414 if (locked == FALSE) {
1415 UVMHIST_LOG(maphist,
1416 " wasn't able to relock after fault: retry",
1417 0,0,0,0);
1418 if (uobjpage->flags & PG_WANTED)
1419 wakeup(uobjpage);
1420 if (uobjpage->flags & PG_RELEASED) {
1421 uvmexp.fltpgrele++;
1422 uvm_pagefree(uobjpage);
1423 goto ReFault;
1424 }
1425 uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
1426 UVM_PAGE_OWN(uobjpage, NULL);
1427 simple_unlock(&uobj->vmobjlock);
1428 goto ReFault;
1429 }
1430
1431 /*
1432 * we have the data in uobjpage which is busy and
1433 * not released. we are holding object lock (so the page
1434 * can't be released on us).
1435 */
1436
1437 /* locked: maps(read), amap(if !null), uobj, uobjpage */
1438 }
1439
1440 /*
1441 * locked:
1442 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
1443 */
1444
1445 /*
1446 * notes:
1447 * - at this point uobjpage can not be NULL
1448 * - at this point uobjpage can not be PG_RELEASED (since we checked
1449 * for it above)
1450 * - at this point uobjpage could be PG_WANTED (handle later)
1451 */
1452
1453 KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
1454 (uobjpage->flags & PG_CLEAN) != 0);
1455 if (promote == FALSE) {
1456
1457 /*
1458 * we are not promoting. if the mapping is COW ensure that we
1459 * don't give more access than we should (e.g. when doing a read
1460 * fault on a COPYONWRITE mapping we want to map the COW page in
1461 * R/O even though the entry protection could be R/W).
1462 *
1463 * set "pg" to the page we want to map in (uobjpage, usually)
1464 */
1465
1466 /* no anon in this case. */
1467 anon = NULL;
1468
1469 uvmexp.flt_obj++;
1470 if (UVM_ET_ISCOPYONWRITE(ufi.entry) ||
1471 UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
1472 enter_prot &= ~VM_PROT_WRITE;
1473 pg = uobjpage; /* map in the actual object */
1474
1475 /* assert(uobjpage != PGO_DONTCARE) */
1476
1477 /*
1478 * we are faulting directly on the page. be careful
1479 * about writing to loaned pages...
1480 */
1481
1482 if (uobjpage->loan_count) {
1483 if (!cow_now) {
1484 /* read fault: cap the protection at readonly */
1485 /* cap! */
1486 enter_prot = enter_prot & ~VM_PROT_WRITE;
1487 } else {
1488 /* write fault: must break the loan here */
1489
1490 pg = uvm_loanbreak(uobjpage);
1491 if (pg == NULL) {
1492
1493 /*
1494 * drop ownership of page, it can't
1495 * be released
1496 */
1497
1498 if (uobjpage->flags & PG_WANTED)
1499 wakeup(uobjpage);
1500 uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
1501 UVM_PAGE_OWN(uobjpage, NULL);
1502
1503 uvmfault_unlockall(&ufi, amap, uobj,
1504 NULL);
1505 UVMHIST_LOG(maphist,
1506 " out of RAM breaking loan, waiting",
1507 0,0,0,0);
1508 uvmexp.fltnoram++;
1509 uvm_wait("flt_noram4");
1510 goto ReFault;
1511 }
1512 uobjpage = pg;
1513 }
1514 }
1515 } else {
1516
1517 /*
1518 * if we are going to promote the data to an anon we
1519 * allocate a blank anon here and plug it into our amap.
1520 */
1521 #if DIAGNOSTIC
1522 if (amap == NULL)
1523 panic("uvm_fault: want to promote data, but no anon");
1524 #endif
1525
1526 anon = uvm_analloc();
1527 if (anon) {
1528
1529 /*
1530 * The new anon is locked.
1531 *
1532 * In `Fill in data...' below, if
1533 * uobjpage == PGO_DONTCARE, we want
1534 * a zero'd, dirty page, so have
1535 * uvm_pagealloc() do that for us.
1536 */
1537
1538 pg = uvm_pagealloc(NULL, 0, anon,
1539 (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
1540 }
1541
1542 /*
1543 * out of memory resources?
1544 */
1545
1546 if (anon == NULL || pg == NULL) {
1547 if (anon != NULL) {
1548 anon->an_ref--;
1549 simple_unlock(&anon->an_lock);
1550 uvm_anfree(anon);
1551 }
1552
1553 /*
1554 * arg! must unbusy our page and fail or sleep.
1555 */
1556
1557 if (uobjpage != PGO_DONTCARE) {
1558 if (uobjpage->flags & PG_WANTED)
1559 /* still holding object lock */
1560 wakeup(uobjpage);
1561
1562 uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
1563 UVM_PAGE_OWN(uobjpage, NULL);
1564 }
1565
1566 /* unlock and fail ... */
1567 uvmfault_unlockall(&ufi, amap, uobj, NULL);
1568 if (anon == NULL || uvm_swapisfull()) {
1569 UVMHIST_LOG(maphist, " promote: out of VM",
1570 0,0,0,0);
1571 uvmexp.fltnoanon++;
1572 return ENOMEM;
1573 }
1574
1575 UVMHIST_LOG(maphist, " out of RAM, waiting for more",
1576 0,0,0,0);
1577 uvmexp.fltnoram++;
1578 uvm_wait("flt_noram5");
1579 goto ReFault;
1580 }
1581
1582 /*
1583 * fill in the data
1584 */
1585
1586 if (uobjpage != PGO_DONTCARE) {
1587 uvmexp.flt_prcopy++;
1588 /* copy page [pg now dirty] */
1589 uvm_pagecopy(uobjpage, pg);
1590
1591 /*
1592 * promote to shared amap? make sure all sharing
1593 * procs see it
1594 */
1595
1596 if ((amap_flags(amap) & AMAP_SHARED) != 0) {
1597 pmap_page_protect(uobjpage, VM_PROT_NONE);
1598 /*
1599 * XXX: PAGE MIGHT BE WIRED!
1600 */
1601 }
1602
1603 /*
1604 * dispose of uobjpage. it can't be PG_RELEASED
1605 * since we still hold the object lock.
1606 * drop handle to uobj as well.
1607 */
1608
1609 if (uobjpage->flags & PG_WANTED)
1610 /* still have the obj lock */
1611 wakeup(uobjpage);
1612 uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
1613 UVM_PAGE_OWN(uobjpage, NULL);
1614 simple_unlock(&uobj->vmobjlock);
1615 uobj = NULL;
1616
1617 UVMHIST_LOG(maphist,
1618 " promote uobjpage 0x%x to anon/page 0x%x/0x%x",
1619 uobjpage, anon, pg, 0);
1620
1621 } else {
1622 uvmexp.flt_przero++;
1623
1624 /*
1625 * Page is zero'd and marked dirty by uvm_pagealloc()
1626 * above.
1627 */
1628
1629 UVMHIST_LOG(maphist," zero fill anon/page 0x%x/0%x",
1630 anon, pg, 0, 0);
1631 }
1632 amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
1633 anon, FALSE);
1634 }
1635
1636 /*
1637 * locked:
1638 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj),
1639 * anon(if !null), pg(if anon)
1640 *
1641 * note: pg is either the uobjpage or the new page in the new anon
1642 */
1643
1644 /*
1645 * all resources are present. we can now map it in and free our
1646 * resources.
1647 */
1648
1649 UVMHIST_LOG(maphist,
1650 " MAPPING: case2: pm=0x%x, va=0x%x, pg=0x%x, promote=%d",
1651 ufi.orig_map->pmap, ufi.orig_rvaddr, pg, promote);
1652 KASSERT((access_type & VM_PROT_WRITE) == 0 ||
1653 (pg->flags & PG_RDONLY) == 0);
1654 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
1655 pg->flags & PG_RDONLY ? enter_prot & ~VM_PROT_WRITE : enter_prot,
1656 access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) {
1657
1658 /*
1659 * No need to undo what we did; we can simply think of
1660 * this as the pmap throwing away the mapping information.
1661 *
1662 * We do, however, have to go through the ReFault path,
1663 * as the map may change while we're asleep.
1664 */
1665
1666 if (pg->flags & PG_WANTED)
1667 wakeup(pg);
1668
1669 /*
1670 * note that pg can't be PG_RELEASED since we did not drop
1671 * the object lock since the last time we checked.
1672 */
1673
1674 pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
1675 UVM_PAGE_OWN(pg, NULL);
1676 uvmfault_unlockall(&ufi, amap, uobj, anon);
1677 if (uvm_swapisfull()) {
1678 UVMHIST_LOG(maphist,
1679 "<- failed. out of VM",0,0,0,0);
1680 /* XXX instrumentation */
1681 return ENOMEM;
1682 }
1683 /* XXX instrumentation */
1684 uvm_wait("flt_pmfail2");
1685 goto ReFault;
1686 }
1687
1688 uvm_lock_pageq();
1689 if (wire_fault) {
1690 uvm_pagewire(pg);
1691 if (pg->pqflags & PQ_AOBJ) {
1692
1693 /*
1694 * since the now-wired page cannot be paged out,
1695 * release its swap resources for others to use.
1696 * since an aobj page with no swap cannot be PG_CLEAN,
1697 * clear its clean flag now.
1698 */
1699
1700 pg->flags &= ~(PG_CLEAN);
1701 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
1702 }
1703 } else {
1704 uvm_pageactivate(pg);
1705 }
1706 uvm_unlock_pageq();
1707 if (pg->flags & PG_WANTED)
1708 wakeup(pg);
1709
1710 /*
1711 * note that pg can't be PG_RELEASED since we did not drop the object
1712 * lock since the last time we checked.
1713 */
1714
1715 pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
1716 UVM_PAGE_OWN(pg, NULL);
1717 uvmfault_unlockall(&ufi, amap, uobj, anon);
1718 pmap_update(ufi.orig_map->pmap);
1719 UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
1720 return 0;
1721 }
1722
1723 /*
1724 * uvm_fault_wire: wire down a range of virtual addresses in a map.
1725 *
1726 * => map may be read-locked by caller, but MUST NOT be write-locked.
1727 * => if map is read-locked, any operations which may cause map to
1728 * be write-locked in uvm_fault() must be taken care of by
1729 * the caller. See uvm_map_pageable().
1730 */
1731
1732 int
1733 uvm_fault_wire(map, start, end, fault_type, access_type)
1734 struct vm_map *map;
1735 vaddr_t start, end;
1736 vm_fault_t fault_type;
1737 vm_prot_t access_type;
1738 {
1739 vaddr_t va;
1740 int error;
1741
1742 /*
1743 * now fault it in a page at a time. if the fault fails then we have
1744 * to undo what we have done. note that in uvm_fault VM_PROT_NONE
1745 * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
1746 */
1747
1748 /*
1749 * XXX work around overflowing a vaddr_t. this prevents us from
1750 * wiring the last page in the address space, though.
1751 */
1752 if (start > end) {
1753 return EFAULT;
1754 }
1755
1756 for (va = start ; va < end ; va += PAGE_SIZE) {
1757 error = uvm_fault(map, va, fault_type, access_type);
1758 if (error) {
1759 if (va != start) {
1760 uvm_fault_unwire(map, start, va);
1761 }
1762 return error;
1763 }
1764 }
1765 return 0;
1766 }
1767
1768 /*
1769 * uvm_fault_unwire(): unwire range of virtual space.
1770 */
1771
1772 void
1773 uvm_fault_unwire(map, start, end)
1774 struct vm_map *map;
1775 vaddr_t start, end;
1776 {
1777 vm_map_lock_read(map);
1778 uvm_fault_unwire_locked(map, start, end);
1779 vm_map_unlock_read(map);
1780 }
1781
1782 /*
1783 * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
1784 *
1785 * => map must be at least read-locked.
1786 */
1787
1788 void
1789 uvm_fault_unwire_locked(map, start, end)
1790 struct vm_map *map;
1791 vaddr_t start, end;
1792 {
1793 struct vm_map_entry *entry;
1794 pmap_t pmap = vm_map_pmap(map);
1795 vaddr_t va;
1796 paddr_t pa;
1797 struct vm_page *pg;
1798
1799 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
1800
1801 /*
1802 * we assume that the area we are unwiring has actually been wired
1803 * in the first place. this means that we should be able to extract
1804 * the PAs from the pmap. we also lock out the page daemon so that
1805 * we can call uvm_pageunwire.
1806 */
1807
1808 uvm_lock_pageq();
1809
1810 /*
1811 * find the beginning map entry for the region.
1812 */
1813
1814 KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
1815 if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
1816 panic("uvm_fault_unwire_locked: address not in map");
1817
1818 for (va = start; va < end; va += PAGE_SIZE) {
1819 if (pmap_extract(pmap, va, &pa) == FALSE)
1820 continue;
1821
1822 /*
1823 * find the map entry for the current address.
1824 */
1825
1826 KASSERT(va >= entry->start);
1827 while (va >= entry->end) {
1828 KASSERT(entry->next != &map->header &&
1829 entry->next->start <= entry->end);
1830 entry = entry->next;
1831 }
1832
1833 /*
1834 * if the entry is no longer wired, tell the pmap.
1835 */
1836
1837 if (VM_MAPENT_ISWIRED(entry) == 0)
1838 pmap_unwire(pmap, va);
1839
1840 pg = PHYS_TO_VM_PAGE(pa);
1841 if (pg)
1842 uvm_pageunwire(pg);
1843 }
1844
1845 uvm_unlock_pageq();
1846 }
Cache object: 49e2dee5a7085489ef075029a013bad9
|