FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_aobj.c
1 /* $NetBSD: uvm_aobj.c,v 1.62 2004/03/24 07:55:01 junyoung Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5 * Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
35 */
36 /*
37 * uvm_aobj.c: anonymous memory uvm_object pager
38 *
39 * author: Chuck Silvers <chuq@chuq.com>
40 * started: Jan-1998
41 *
42 * - design mostly from Chuck Cranor
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.62 2004/03/24 07:55:01 junyoung Exp $");
47
48 #include "opt_uvmhist.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/pool.h>
56 #include <sys/kernel.h>
57
58 #include <uvm/uvm.h>
59
60 /*
61 * an aobj manages anonymous-memory backed uvm_objects. in addition
62 * to keeping the list of resident pages, it also keeps a list of
63 * allocated swap blocks. depending on the size of the aobj this list
64 * of allocated swap blocks is either stored in an array (small objects)
65 * or in a hash table (large objects).
66 */
67
68 /*
69 * local structures
70 */
71
72 /*
73 * for hash tables, we break the address space of the aobj into blocks
74 * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
75 * be a power of two.
76 */
77
78 #define UAO_SWHASH_CLUSTER_SHIFT 4
79 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
80
81 /* get the "tag" for this page index */
82 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
83 ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
84
85 /* given an ELT and a page index, find the swap slot */
86 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
87 ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
88
89 /* given an ELT, return its pageidx base */
90 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
91 ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
92
93 /*
94 * the swhash hash function
95 */
96
97 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
98 (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
99 & (AOBJ)->u_swhashmask)])
100
101 /*
102 * the swhash threshhold determines if we will use an array or a
103 * hash table to store the list of allocated swap blocks.
104 */
105
106 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
107 #define UAO_USES_SWHASH(AOBJ) \
108 ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
109
110 /*
111 * the number of buckets in a swhash, with an upper bound
112 */
113
114 #define UAO_SWHASH_MAXBUCKETS 256
115 #define UAO_SWHASH_BUCKETS(AOBJ) \
116 (MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
117 UAO_SWHASH_MAXBUCKETS))
118
119
120 /*
121 * uao_swhash_elt: when a hash table is being used, this structure defines
122 * the format of an entry in the bucket list.
123 */
124
125 struct uao_swhash_elt {
126 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
127 voff_t tag; /* our 'tag' */
128 int count; /* our number of active slots */
129 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
130 };
131
132 /*
133 * uao_swhash: the swap hash table structure
134 */
135
136 LIST_HEAD(uao_swhash, uao_swhash_elt);
137
138 /*
139 * uao_swhash_elt_pool: pool of uao_swhash_elt structures
140 */
141
142 struct pool uao_swhash_elt_pool;
143
144 /*
145 * uvm_aobj: the actual anon-backed uvm_object
146 *
147 * => the uvm_object is at the top of the structure, this allows
148 * (struct uvm_aobj *) == (struct uvm_object *)
149 * => only one of u_swslots and u_swhash is used in any given aobj
150 */
151
152 struct uvm_aobj {
153 struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
154 int u_pages; /* number of pages in entire object */
155 int u_flags; /* the flags (see uvm_aobj.h) */
156 int *u_swslots; /* array of offset->swapslot mappings */
157 /*
158 * hashtable of offset->swapslot mappings
159 * (u_swhash is an array of bucket heads)
160 */
161 struct uao_swhash *u_swhash;
162 u_long u_swhashmask; /* mask for hashtable */
163 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
164 };
165
166 /*
167 * uvm_aobj_pool: pool of uvm_aobj structures
168 */
169
170 struct pool uvm_aobj_pool;
171
172 MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
173
174 /*
175 * local functions
176 */
177
178 static struct uao_swhash_elt *uao_find_swhash_elt
179 (struct uvm_aobj *, int, boolean_t);
180
181 static void uao_free(struct uvm_aobj *);
182 static int uao_get(struct uvm_object *, voff_t, struct vm_page **,
183 int *, int, vm_prot_t, int, int);
184 static boolean_t uao_put(struct uvm_object *, voff_t, voff_t, int);
185 static boolean_t uao_pagein(struct uvm_aobj *, int, int);
186 static boolean_t uao_pagein_page(struct uvm_aobj *, int);
187
188 /*
189 * aobj_pager
190 *
191 * note that some functions (e.g. put) are handled elsewhere
192 */
193
194 struct uvm_pagerops aobj_pager = {
195 NULL, /* init */
196 uao_reference, /* reference */
197 uao_detach, /* detach */
198 NULL, /* fault */
199 uao_get, /* get */
200 uao_put, /* flush */
201 };
202
203 /*
204 * uao_list: global list of active aobjs, locked by uao_list_lock
205 */
206
207 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
208 static struct simplelock uao_list_lock;
209
210 /*
211 * functions
212 */
213
214 /*
215 * hash table/array related functions
216 */
217
218 /*
219 * uao_find_swhash_elt: find (or create) a hash table entry for a page
220 * offset.
221 *
222 * => the object should be locked by the caller
223 */
224
225 static struct uao_swhash_elt *
226 uao_find_swhash_elt(aobj, pageidx, create)
227 struct uvm_aobj *aobj;
228 int pageidx;
229 boolean_t create;
230 {
231 struct uao_swhash *swhash;
232 struct uao_swhash_elt *elt;
233 voff_t page_tag;
234
235 swhash = UAO_SWHASH_HASH(aobj, pageidx);
236 page_tag = UAO_SWHASH_ELT_TAG(pageidx);
237
238 /*
239 * now search the bucket for the requested tag
240 */
241
242 LIST_FOREACH(elt, swhash, list) {
243 if (elt->tag == page_tag) {
244 return elt;
245 }
246 }
247 if (!create) {
248 return NULL;
249 }
250
251 /*
252 * allocate a new entry for the bucket and init/insert it in
253 */
254
255 elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
256 if (elt == NULL) {
257 return NULL;
258 }
259 LIST_INSERT_HEAD(swhash, elt, list);
260 elt->tag = page_tag;
261 elt->count = 0;
262 memset(elt->slots, 0, sizeof(elt->slots));
263 return elt;
264 }
265
266 /*
267 * uao_find_swslot: find the swap slot number for an aobj/pageidx
268 *
269 * => object must be locked by caller
270 */
271
272 int
273 uao_find_swslot(uobj, pageidx)
274 struct uvm_object *uobj;
275 int pageidx;
276 {
277 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
278 struct uao_swhash_elt *elt;
279
280 /*
281 * if noswap flag is set, then we never return a slot
282 */
283
284 if (aobj->u_flags & UAO_FLAG_NOSWAP)
285 return(0);
286
287 /*
288 * if hashing, look in hash table.
289 */
290
291 if (UAO_USES_SWHASH(aobj)) {
292 elt = uao_find_swhash_elt(aobj, pageidx, FALSE);
293 if (elt)
294 return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
295 else
296 return(0);
297 }
298
299 /*
300 * otherwise, look in the array
301 */
302
303 return(aobj->u_swslots[pageidx]);
304 }
305
306 /*
307 * uao_set_swslot: set the swap slot for a page in an aobj.
308 *
309 * => setting a slot to zero frees the slot
310 * => object must be locked by caller
311 * => we return the old slot number, or -1 if we failed to allocate
312 * memory to record the new slot number
313 */
314
315 int
316 uao_set_swslot(uobj, pageidx, slot)
317 struct uvm_object *uobj;
318 int pageidx, slot;
319 {
320 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
321 struct uao_swhash_elt *elt;
322 int oldslot;
323 UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
324 UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
325 aobj, pageidx, slot, 0);
326
327 /*
328 * if noswap flag is set, then we can't set a non-zero slot.
329 */
330
331 if (aobj->u_flags & UAO_FLAG_NOSWAP) {
332 if (slot == 0)
333 return(0);
334
335 printf("uao_set_swslot: uobj = %p\n", uobj);
336 panic("uao_set_swslot: NOSWAP object");
337 }
338
339 /*
340 * are we using a hash table? if so, add it in the hash.
341 */
342
343 if (UAO_USES_SWHASH(aobj)) {
344
345 /*
346 * Avoid allocating an entry just to free it again if
347 * the page had not swap slot in the first place, and
348 * we are freeing.
349 */
350
351 elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
352 if (elt == NULL) {
353 return slot ? -1 : 0;
354 }
355
356 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
357 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
358
359 /*
360 * now adjust the elt's reference counter and free it if we've
361 * dropped it to zero.
362 */
363
364 if (slot) {
365 if (oldslot == 0)
366 elt->count++;
367 } else {
368 if (oldslot)
369 elt->count--;
370
371 if (elt->count == 0) {
372 LIST_REMOVE(elt, list);
373 pool_put(&uao_swhash_elt_pool, elt);
374 }
375 }
376 } else {
377 /* we are using an array */
378 oldslot = aobj->u_swslots[pageidx];
379 aobj->u_swslots[pageidx] = slot;
380 }
381 return (oldslot);
382 }
383
384 /*
385 * end of hash/array functions
386 */
387
388 /*
389 * uao_free: free all resources held by an aobj, and then free the aobj
390 *
391 * => the aobj should be dead
392 */
393
394 static void
395 uao_free(aobj)
396 struct uvm_aobj *aobj;
397 {
398 int swpgonlydelta = 0;
399
400 simple_unlock(&aobj->u_obj.vmobjlock);
401 if (UAO_USES_SWHASH(aobj)) {
402 int i, hashbuckets = aobj->u_swhashmask + 1;
403
404 /*
405 * free the swslots from each hash bucket,
406 * then the hash bucket, and finally the hash table itself.
407 */
408
409 for (i = 0; i < hashbuckets; i++) {
410 struct uao_swhash_elt *elt, *next;
411
412 for (elt = LIST_FIRST(&aobj->u_swhash[i]);
413 elt != NULL;
414 elt = next) {
415 int j;
416
417 for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
418 int slot = elt->slots[j];
419
420 if (slot > 0) {
421 uvm_swap_free(slot, 1);
422 swpgonlydelta++;
423 }
424 }
425
426 next = LIST_NEXT(elt, list);
427 pool_put(&uao_swhash_elt_pool, elt);
428 }
429 }
430 free(aobj->u_swhash, M_UVMAOBJ);
431 } else {
432 int i;
433
434 /*
435 * free the array
436 */
437
438 for (i = 0; i < aobj->u_pages; i++) {
439 int slot = aobj->u_swslots[i];
440
441 if (slot > 0) {
442 uvm_swap_free(slot, 1);
443 swpgonlydelta++;
444 }
445 }
446 free(aobj->u_swslots, M_UVMAOBJ);
447 }
448
449 /*
450 * finally free the aobj itself
451 */
452
453 pool_put(&uvm_aobj_pool, aobj);
454
455 /*
456 * adjust the counter of pages only in swap for all
457 * the swap slots we've freed.
458 */
459
460 if (swpgonlydelta > 0) {
461 simple_lock(&uvm.swap_data_lock);
462 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
463 uvmexp.swpgonly -= swpgonlydelta;
464 simple_unlock(&uvm.swap_data_lock);
465 }
466 }
467
468 /*
469 * pager functions
470 */
471
472 /*
473 * uao_create: create an aobj of the given size and return its uvm_object.
474 *
475 * => for normal use, flags are always zero
476 * => for the kernel object, the flags are:
477 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
478 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
479 */
480
481 struct uvm_object *
482 uao_create(size, flags)
483 vsize_t size;
484 int flags;
485 {
486 static struct uvm_aobj kernel_object_store;
487 static int kobj_alloced = 0;
488 int pages = round_page(size) >> PAGE_SHIFT;
489 struct uvm_aobj *aobj;
490
491 /*
492 * malloc a new aobj unless we are asked for the kernel object
493 */
494
495 if (flags & UAO_FLAG_KERNOBJ) {
496 KASSERT(!kobj_alloced);
497 aobj = &kernel_object_store;
498 aobj->u_pages = pages;
499 aobj->u_flags = UAO_FLAG_NOSWAP;
500 aobj->u_obj.uo_refs = UVM_OBJ_KERN;
501 kobj_alloced = UAO_FLAG_KERNOBJ;
502 } else if (flags & UAO_FLAG_KERNSWAP) {
503 KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
504 aobj = &kernel_object_store;
505 kobj_alloced = UAO_FLAG_KERNSWAP;
506 } else {
507 aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
508 aobj->u_pages = pages;
509 aobj->u_flags = 0;
510 aobj->u_obj.uo_refs = 1;
511 }
512
513 /*
514 * allocate hash/array if necessary
515 *
516 * note: in the KERNSWAP case no need to worry about locking since
517 * we are still booting we should be the only thread around.
518 */
519
520 if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
521 int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
522 M_NOWAIT : M_WAITOK;
523
524 /* allocate hash table or array depending on object size */
525 if (UAO_USES_SWHASH(aobj)) {
526 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
527 HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
528 if (aobj->u_swhash == NULL)
529 panic("uao_create: hashinit swhash failed");
530 } else {
531 aobj->u_swslots = malloc(pages * sizeof(int),
532 M_UVMAOBJ, mflags);
533 if (aobj->u_swslots == NULL)
534 panic("uao_create: malloc swslots failed");
535 memset(aobj->u_swslots, 0, pages * sizeof(int));
536 }
537
538 if (flags) {
539 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
540 return(&aobj->u_obj);
541 }
542 }
543
544 /*
545 * init aobj fields
546 */
547
548 simple_lock_init(&aobj->u_obj.vmobjlock);
549 aobj->u_obj.pgops = &aobj_pager;
550 TAILQ_INIT(&aobj->u_obj.memq);
551 aobj->u_obj.uo_npages = 0;
552
553 /*
554 * now that aobj is ready, add it to the global list
555 */
556
557 simple_lock(&uao_list_lock);
558 LIST_INSERT_HEAD(&uao_list, aobj, u_list);
559 simple_unlock(&uao_list_lock);
560 return(&aobj->u_obj);
561 }
562
563
564
565 /*
566 * uao_init: set up aobj pager subsystem
567 *
568 * => called at boot time from uvm_pager_init()
569 */
570
571 void
572 uao_init(void)
573 {
574 static int uao_initialized;
575
576 if (uao_initialized)
577 return;
578 uao_initialized = TRUE;
579 LIST_INIT(&uao_list);
580 simple_lock_init(&uao_list_lock);
581
582 /*
583 * NOTE: Pages fror this pool must not come from a pageable
584 * kernel map!
585 */
586
587 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
588 0, 0, 0, "uaoeltpl", NULL);
589 pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
590 "aobjpl", &pool_allocator_nointr);
591 }
592
593 /*
594 * uao_reference: add a ref to an aobj
595 *
596 * => aobj must be unlocked
597 * => just lock it and call the locked version
598 */
599
600 void
601 uao_reference(uobj)
602 struct uvm_object *uobj;
603 {
604 simple_lock(&uobj->vmobjlock);
605 uao_reference_locked(uobj);
606 simple_unlock(&uobj->vmobjlock);
607 }
608
609 /*
610 * uao_reference_locked: add a ref to an aobj that is already locked
611 *
612 * => aobj must be locked
613 * this needs to be separate from the normal routine
614 * since sometimes we need to add a reference to an aobj when
615 * it's already locked.
616 */
617
618 void
619 uao_reference_locked(uobj)
620 struct uvm_object *uobj;
621 {
622 UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
623
624 /*
625 * kernel_object already has plenty of references, leave it alone.
626 */
627
628 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
629 return;
630
631 uobj->uo_refs++;
632 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
633 uobj, uobj->uo_refs,0,0);
634 }
635
636 /*
637 * uao_detach: drop a reference to an aobj
638 *
639 * => aobj must be unlocked
640 * => just lock it and call the locked version
641 */
642
643 void
644 uao_detach(uobj)
645 struct uvm_object *uobj;
646 {
647 simple_lock(&uobj->vmobjlock);
648 uao_detach_locked(uobj);
649 }
650
651 /*
652 * uao_detach_locked: drop a reference to an aobj
653 *
654 * => aobj must be locked, and is unlocked (or freed) upon return.
655 * this needs to be separate from the normal routine
656 * since sometimes we need to detach from an aobj when
657 * it's already locked.
658 */
659
660 void
661 uao_detach_locked(uobj)
662 struct uvm_object *uobj;
663 {
664 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
665 struct vm_page *pg;
666 UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
667
668 /*
669 * detaching from kernel_object is a noop.
670 */
671
672 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
673 simple_unlock(&uobj->vmobjlock);
674 return;
675 }
676
677 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
678 uobj->uo_refs--;
679 if (uobj->uo_refs) {
680 simple_unlock(&uobj->vmobjlock);
681 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
682 return;
683 }
684
685 /*
686 * remove the aobj from the global list.
687 */
688
689 simple_lock(&uao_list_lock);
690 LIST_REMOVE(aobj, u_list);
691 simple_unlock(&uao_list_lock);
692
693 /*
694 * free all the pages left in the aobj. for each page,
695 * when the page is no longer busy (and thus after any disk i/o that
696 * it's involved in is complete), release any swap resources and
697 * free the page itself.
698 */
699
700 uvm_lock_pageq();
701 while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
702 pmap_page_protect(pg, VM_PROT_NONE);
703 if (pg->flags & PG_BUSY) {
704 pg->flags |= PG_WANTED;
705 uvm_unlock_pageq();
706 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, FALSE,
707 "uao_det", 0);
708 simple_lock(&uobj->vmobjlock);
709 uvm_lock_pageq();
710 continue;
711 }
712 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
713 uvm_pagefree(pg);
714 }
715 uvm_unlock_pageq();
716
717 /*
718 * finally, free the aobj itself.
719 */
720
721 uao_free(aobj);
722 }
723
724 /*
725 * uao_put: flush pages out of a uvm object
726 *
727 * => object should be locked by caller. we may _unlock_ the object
728 * if (and only if) we need to clean a page (PGO_CLEANIT).
729 * XXXJRT Currently, however, we don't. In the case of cleaning
730 * XXXJRT a page, we simply just deactivate it. Should probably
731 * XXXJRT handle this better, in the future (although "flushing"
732 * XXXJRT anonymous memory isn't terribly important).
733 * => if PGO_CLEANIT is not set, then we will neither unlock the object
734 * or block.
735 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
736 * for flushing.
737 * => NOTE: we rely on the fact that the object's memq is a TAILQ and
738 * that new pages are inserted on the tail end of the list. thus,
739 * we can make a complete pass through the object in one go by starting
740 * at the head and working towards the tail (new pages are put in
741 * front of us).
742 * => NOTE: we are allowed to lock the page queues, so the caller
743 * must not be holding the lock on them [e.g. pagedaemon had
744 * better not call us with the queues locked]
745 * => we return TRUE unless we encountered some sort of I/O error
746 * XXXJRT currently never happens, as we never directly initiate
747 * XXXJRT I/O
748 *
749 * note on page traversal:
750 * we can traverse the pages in an object either by going down the
751 * linked list in "uobj->memq", or we can go over the address range
752 * by page doing hash table lookups for each address. depending
753 * on how many pages are in the object it may be cheaper to do one
754 * or the other. we set "by_list" to true if we are using memq.
755 * if the cost of a hash lookup was equal to the cost of the list
756 * traversal we could compare the number of pages in the start->stop
757 * range to the total number of pages in the object. however, it
758 * seems that a hash table lookup is more expensive than the linked
759 * list traversal, so we multiply the number of pages in the
760 * start->stop range by a penalty which we define below.
761 */
762
763 int
764 uao_put(uobj, start, stop, flags)
765 struct uvm_object *uobj;
766 voff_t start, stop;
767 int flags;
768 {
769 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
770 struct vm_page *pg, *nextpg, curmp, endmp;
771 boolean_t by_list;
772 voff_t curoff;
773 UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
774
775 curoff = 0;
776 if (flags & PGO_ALLPAGES) {
777 start = 0;
778 stop = aobj->u_pages << PAGE_SHIFT;
779 by_list = TRUE; /* always go by the list */
780 } else {
781 start = trunc_page(start);
782 stop = round_page(stop);
783 if (stop > (aobj->u_pages << PAGE_SHIFT)) {
784 printf("uao_flush: strange, got an out of range "
785 "flush (fixed)\n");
786 stop = aobj->u_pages << PAGE_SHIFT;
787 }
788 by_list = (uobj->uo_npages <=
789 ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
790 }
791 UVMHIST_LOG(maphist,
792 " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
793 start, stop, by_list, flags);
794
795 /*
796 * Don't need to do any work here if we're not freeing
797 * or deactivating pages.
798 */
799
800 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
801 simple_unlock(&uobj->vmobjlock);
802 return 0;
803 }
804
805 /*
806 * Initialize the marker pages. See the comment in
807 * genfs_putpages() also.
808 */
809
810 curmp.uobject = uobj;
811 curmp.offset = (voff_t)-1;
812 curmp.flags = PG_BUSY;
813 endmp.uobject = uobj;
814 endmp.offset = (voff_t)-1;
815 endmp.flags = PG_BUSY;
816
817 /*
818 * now do it. note: we must update nextpg in the body of loop or we
819 * will get stuck. we need to use nextpg if we'll traverse the list
820 * because we may free "pg" before doing the next loop.
821 */
822
823 if (by_list) {
824 TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
825 nextpg = TAILQ_FIRST(&uobj->memq);
826 PHOLD(curlwp);
827 } else {
828 curoff = start;
829 nextpg = NULL; /* Quell compiler warning */
830 }
831
832 uvm_lock_pageq();
833
834 /* locked: both page queues and uobj */
835 for (;;) {
836 if (by_list) {
837 pg = nextpg;
838 if (pg == &endmp)
839 break;
840 nextpg = TAILQ_NEXT(pg, listq);
841 if (pg->offset < start || pg->offset >= stop)
842 continue;
843 } else {
844 if (curoff < stop) {
845 pg = uvm_pagelookup(uobj, curoff);
846 curoff += PAGE_SIZE;
847 } else
848 break;
849 if (pg == NULL)
850 continue;
851 }
852 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
853
854 /*
855 * XXX In these first 3 cases, we always just
856 * XXX deactivate the page. We may want to
857 * XXX handle the different cases more specifically
858 * XXX in the future.
859 */
860
861 case PGO_CLEANIT|PGO_FREE:
862 case PGO_CLEANIT|PGO_DEACTIVATE:
863 case PGO_DEACTIVATE:
864 deactivate_it:
865 /* skip the page if it's loaned or wired */
866 if (pg->loan_count != 0 || pg->wire_count != 0)
867 continue;
868
869 /* ...and deactivate the page. */
870 pmap_clear_reference(pg);
871 uvm_pagedeactivate(pg);
872 continue;
873
874 case PGO_FREE:
875
876 /*
877 * If there are multiple references to
878 * the object, just deactivate the page.
879 */
880
881 if (uobj->uo_refs > 1)
882 goto deactivate_it;
883
884 /* XXX skip the page if it's loaned or wired */
885 if (pg->loan_count != 0 || pg->wire_count != 0)
886 continue;
887
888 /*
889 * wait and try again if the page is busy.
890 * otherwise free the swap slot and the page.
891 */
892
893 pmap_page_protect(pg, VM_PROT_NONE);
894 if (pg->flags & PG_BUSY) {
895 if (by_list) {
896 TAILQ_INSERT_BEFORE(pg, &curmp, listq);
897 }
898 pg->flags |= PG_WANTED;
899 uvm_unlock_pageq();
900 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
901 "uao_put", 0);
902 simple_lock(&uobj->vmobjlock);
903 uvm_lock_pageq();
904 if (by_list) {
905 nextpg = TAILQ_NEXT(&curmp, listq);
906 TAILQ_REMOVE(&uobj->memq, &curmp,
907 listq);
908 } else
909 curoff -= PAGE_SIZE;
910 continue;
911 }
912 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
913 uvm_pagefree(pg);
914 continue;
915 }
916 }
917 uvm_unlock_pageq();
918 if (by_list) {
919 TAILQ_REMOVE(&uobj->memq, &endmp, listq);
920 PRELE(curlwp);
921 }
922 simple_unlock(&uobj->vmobjlock);
923 return 0;
924 }
925
926 /*
927 * uao_get: fetch me a page
928 *
929 * we have three cases:
930 * 1: page is resident -> just return the page.
931 * 2: page is zero-fill -> allocate a new page and zero it.
932 * 3: page is swapped out -> fetch the page from swap.
933 *
934 * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
935 * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
936 * then we will need to return EBUSY.
937 *
938 * => prefer map unlocked (not required)
939 * => object must be locked! we will _unlock_ it before starting any I/O.
940 * => flags: PGO_ALLPAGES: get all of the pages
941 * PGO_LOCKED: fault data structures are locked
942 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
943 * => NOTE: caller must check for released pages!!
944 */
945
946 static int
947 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
948 struct uvm_object *uobj;
949 voff_t offset;
950 struct vm_page **pps;
951 int *npagesp;
952 int centeridx, advice, flags;
953 vm_prot_t access_type;
954 {
955 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
956 voff_t current_offset;
957 struct vm_page *ptmp = NULL; /* Quell compiler warning */
958 int lcv, gotpages, maxpages, swslot, error, pageidx;
959 boolean_t done;
960 UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
961
962 UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
963 aobj, offset, flags,0);
964
965 /*
966 * get number of pages
967 */
968
969 maxpages = *npagesp;
970
971 /*
972 * step 1: handled the case where fault data structures are locked.
973 */
974
975 if (flags & PGO_LOCKED) {
976
977 /*
978 * step 1a: get pages that are already resident. only do
979 * this if the data structures are locked (i.e. the first
980 * time through).
981 */
982
983 done = TRUE; /* be optimistic */
984 gotpages = 0; /* # of pages we got so far */
985 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
986 lcv++, current_offset += PAGE_SIZE) {
987 /* do we care about this page? if not, skip it */
988 if (pps[lcv] == PGO_DONTCARE)
989 continue;
990 ptmp = uvm_pagelookup(uobj, current_offset);
991
992 /*
993 * if page is new, attempt to allocate the page,
994 * zero-fill'd.
995 */
996
997 if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
998 current_offset >> PAGE_SHIFT) == 0) {
999 ptmp = uvm_pagealloc(uobj, current_offset,
1000 NULL, UVM_PGA_ZERO);
1001 if (ptmp) {
1002 /* new page */
1003 ptmp->flags &= ~(PG_FAKE);
1004 ptmp->pqflags |= PQ_AOBJ;
1005 goto gotpage;
1006 }
1007 }
1008
1009 /*
1010 * to be useful must get a non-busy page
1011 */
1012
1013 if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
1014 if (lcv == centeridx ||
1015 (flags & PGO_ALLPAGES) != 0)
1016 /* need to do a wait or I/O! */
1017 done = FALSE;
1018 continue;
1019 }
1020
1021 /*
1022 * useful page: busy/lock it and plug it in our
1023 * result array
1024 */
1025
1026 /* caller must un-busy this page */
1027 ptmp->flags |= PG_BUSY;
1028 UVM_PAGE_OWN(ptmp, "uao_get1");
1029 gotpage:
1030 pps[lcv] = ptmp;
1031 gotpages++;
1032 }
1033
1034 /*
1035 * step 1b: now we've either done everything needed or we
1036 * to unlock and do some waiting or I/O.
1037 */
1038
1039 UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1040 *npagesp = gotpages;
1041 if (done)
1042 return 0;
1043 else
1044 return EBUSY;
1045 }
1046
1047 /*
1048 * step 2: get non-resident or busy pages.
1049 * object is locked. data structures are unlocked.
1050 */
1051
1052 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1053 lcv++, current_offset += PAGE_SIZE) {
1054
1055 /*
1056 * - skip over pages we've already gotten or don't want
1057 * - skip over pages we don't _have_ to get
1058 */
1059
1060 if (pps[lcv] != NULL ||
1061 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1062 continue;
1063
1064 pageidx = current_offset >> PAGE_SHIFT;
1065
1066 /*
1067 * we have yet to locate the current page (pps[lcv]). we
1068 * first look for a page that is already at the current offset.
1069 * if we find a page, we check to see if it is busy or
1070 * released. if that is the case, then we sleep on the page
1071 * until it is no longer busy or released and repeat the lookup.
1072 * if the page we found is neither busy nor released, then we
1073 * busy it (so we own it) and plug it into pps[lcv]. this
1074 * 'break's the following while loop and indicates we are
1075 * ready to move on to the next page in the "lcv" loop above.
1076 *
1077 * if we exit the while loop with pps[lcv] still set to NULL,
1078 * then it means that we allocated a new busy/fake/clean page
1079 * ptmp in the object and we need to do I/O to fill in the data.
1080 */
1081
1082 /* top of "pps" while loop */
1083 while (pps[lcv] == NULL) {
1084 /* look for a resident page */
1085 ptmp = uvm_pagelookup(uobj, current_offset);
1086
1087 /* not resident? allocate one now (if we can) */
1088 if (ptmp == NULL) {
1089
1090 ptmp = uvm_pagealloc(uobj, current_offset,
1091 NULL, 0);
1092
1093 /* out of RAM? */
1094 if (ptmp == NULL) {
1095 simple_unlock(&uobj->vmobjlock);
1096 UVMHIST_LOG(pdhist,
1097 "sleeping, ptmp == NULL\n",0,0,0,0);
1098 uvm_wait("uao_getpage");
1099 simple_lock(&uobj->vmobjlock);
1100 continue;
1101 }
1102
1103 /*
1104 * safe with PQ's unlocked: because we just
1105 * alloc'd the page
1106 */
1107
1108 ptmp->pqflags |= PQ_AOBJ;
1109
1110 /*
1111 * got new page ready for I/O. break pps while
1112 * loop. pps[lcv] is still NULL.
1113 */
1114
1115 break;
1116 }
1117
1118 /* page is there, see if we need to wait on it */
1119 if ((ptmp->flags & PG_BUSY) != 0) {
1120 ptmp->flags |= PG_WANTED;
1121 UVMHIST_LOG(pdhist,
1122 "sleeping, ptmp->flags 0x%x\n",
1123 ptmp->flags,0,0,0);
1124 UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1125 FALSE, "uao_get", 0);
1126 simple_lock(&uobj->vmobjlock);
1127 continue;
1128 }
1129
1130 /*
1131 * if we get here then the page has become resident and
1132 * unbusy between steps 1 and 2. we busy it now (so we
1133 * own it) and set pps[lcv] (so that we exit the while
1134 * loop).
1135 */
1136
1137 /* we own it, caller must un-busy */
1138 ptmp->flags |= PG_BUSY;
1139 UVM_PAGE_OWN(ptmp, "uao_get2");
1140 pps[lcv] = ptmp;
1141 }
1142
1143 /*
1144 * if we own the valid page at the correct offset, pps[lcv] will
1145 * point to it. nothing more to do except go to the next page.
1146 */
1147
1148 if (pps[lcv])
1149 continue; /* next lcv */
1150
1151 /*
1152 * we have a "fake/busy/clean" page that we just allocated.
1153 * do the needed "i/o", either reading from swap or zeroing.
1154 */
1155
1156 swslot = uao_find_swslot(&aobj->u_obj, pageidx);
1157
1158 /*
1159 * just zero the page if there's nothing in swap.
1160 */
1161
1162 if (swslot == 0) {
1163
1164 /*
1165 * page hasn't existed before, just zero it.
1166 */
1167
1168 uvm_pagezero(ptmp);
1169 } else {
1170 UVMHIST_LOG(pdhist, "pagein from swslot %d",
1171 swslot, 0,0,0);
1172
1173 /*
1174 * page in the swapped-out page.
1175 * unlock object for i/o, relock when done.
1176 */
1177
1178 simple_unlock(&uobj->vmobjlock);
1179 error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1180 simple_lock(&uobj->vmobjlock);
1181
1182 /*
1183 * I/O done. check for errors.
1184 */
1185
1186 if (error != 0) {
1187 UVMHIST_LOG(pdhist, "<- done (error=%d)",
1188 error,0,0,0);
1189 if (ptmp->flags & PG_WANTED)
1190 wakeup(ptmp);
1191
1192 /*
1193 * remove the swap slot from the aobj
1194 * and mark the aobj as having no real slot.
1195 * don't free the swap slot, thus preventing
1196 * it from being used again.
1197 */
1198
1199 swslot = uao_set_swslot(&aobj->u_obj, pageidx,
1200 SWSLOT_BAD);
1201 if (swslot > 0) {
1202 uvm_swap_markbad(swslot, 1);
1203 }
1204
1205 uvm_lock_pageq();
1206 uvm_pagefree(ptmp);
1207 uvm_unlock_pageq();
1208 simple_unlock(&uobj->vmobjlock);
1209 return error;
1210 }
1211 }
1212
1213 /*
1214 * we got the page! clear the fake flag (indicates valid
1215 * data now in page) and plug into our result array. note
1216 * that page is still busy.
1217 *
1218 * it is the callers job to:
1219 * => check if the page is released
1220 * => unbusy the page
1221 * => activate the page
1222 */
1223
1224 ptmp->flags &= ~PG_FAKE;
1225 pps[lcv] = ptmp;
1226 }
1227
1228 /*
1229 * finally, unlock object and return.
1230 */
1231
1232 simple_unlock(&uobj->vmobjlock);
1233 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1234 return 0;
1235 }
1236
1237 /*
1238 * uao_dropswap: release any swap resources from this aobj page.
1239 *
1240 * => aobj must be locked or have a reference count of 0.
1241 */
1242
1243 void
1244 uao_dropswap(uobj, pageidx)
1245 struct uvm_object *uobj;
1246 int pageidx;
1247 {
1248 int slot;
1249
1250 slot = uao_set_swslot(uobj, pageidx, 0);
1251 if (slot) {
1252 uvm_swap_free(slot, 1);
1253 }
1254 }
1255
1256 /*
1257 * page in every page in every aobj that is paged-out to a range of swslots.
1258 *
1259 * => nothing should be locked.
1260 * => returns TRUE if pagein was aborted due to lack of memory.
1261 */
1262
1263 boolean_t
1264 uao_swap_off(startslot, endslot)
1265 int startslot, endslot;
1266 {
1267 struct uvm_aobj *aobj, *nextaobj;
1268 boolean_t rv;
1269
1270 /*
1271 * walk the list of all aobjs.
1272 */
1273
1274 restart:
1275 simple_lock(&uao_list_lock);
1276 for (aobj = LIST_FIRST(&uao_list);
1277 aobj != NULL;
1278 aobj = nextaobj) {
1279
1280 /*
1281 * try to get the object lock, start all over if we fail.
1282 * most of the time we'll get the aobj lock,
1283 * so this should be a rare case.
1284 */
1285
1286 if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
1287 simple_unlock(&uao_list_lock);
1288 goto restart;
1289 }
1290
1291 /*
1292 * add a ref to the aobj so it doesn't disappear
1293 * while we're working.
1294 */
1295
1296 uao_reference_locked(&aobj->u_obj);
1297
1298 /*
1299 * now it's safe to unlock the uao list.
1300 */
1301
1302 simple_unlock(&uao_list_lock);
1303
1304 /*
1305 * page in any pages in the swslot range.
1306 * if there's an error, abort and return the error.
1307 */
1308
1309 rv = uao_pagein(aobj, startslot, endslot);
1310 if (rv) {
1311 uao_detach_locked(&aobj->u_obj);
1312 return rv;
1313 }
1314
1315 /*
1316 * we're done with this aobj.
1317 * relock the list and drop our ref on the aobj.
1318 */
1319
1320 simple_lock(&uao_list_lock);
1321 nextaobj = LIST_NEXT(aobj, u_list);
1322 uao_detach_locked(&aobj->u_obj);
1323 }
1324
1325 /*
1326 * done with traversal, unlock the list
1327 */
1328 simple_unlock(&uao_list_lock);
1329 return FALSE;
1330 }
1331
1332
1333 /*
1334 * page in any pages from aobj in the given range.
1335 *
1336 * => aobj must be locked and is returned locked.
1337 * => returns TRUE if pagein was aborted due to lack of memory.
1338 */
1339 static boolean_t
1340 uao_pagein(aobj, startslot, endslot)
1341 struct uvm_aobj *aobj;
1342 int startslot, endslot;
1343 {
1344 boolean_t rv;
1345
1346 if (UAO_USES_SWHASH(aobj)) {
1347 struct uao_swhash_elt *elt;
1348 int bucket;
1349
1350 restart:
1351 for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
1352 for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
1353 elt != NULL;
1354 elt = LIST_NEXT(elt, list)) {
1355 int i;
1356
1357 for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1358 int slot = elt->slots[i];
1359
1360 /*
1361 * if the slot isn't in range, skip it.
1362 */
1363
1364 if (slot < startslot ||
1365 slot >= endslot) {
1366 continue;
1367 }
1368
1369 /*
1370 * process the page,
1371 * the start over on this object
1372 * since the swhash elt
1373 * may have been freed.
1374 */
1375
1376 rv = uao_pagein_page(aobj,
1377 UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1378 if (rv) {
1379 return rv;
1380 }
1381 goto restart;
1382 }
1383 }
1384 }
1385 } else {
1386 int i;
1387
1388 for (i = 0; i < aobj->u_pages; i++) {
1389 int slot = aobj->u_swslots[i];
1390
1391 /*
1392 * if the slot isn't in range, skip it
1393 */
1394
1395 if (slot < startslot || slot >= endslot) {
1396 continue;
1397 }
1398
1399 /*
1400 * process the page.
1401 */
1402
1403 rv = uao_pagein_page(aobj, i);
1404 if (rv) {
1405 return rv;
1406 }
1407 }
1408 }
1409
1410 return FALSE;
1411 }
1412
1413 /*
1414 * page in a page from an aobj. used for swap_off.
1415 * returns TRUE if pagein was aborted due to lack of memory.
1416 *
1417 * => aobj must be locked and is returned locked.
1418 */
1419
1420 static boolean_t
1421 uao_pagein_page(aobj, pageidx)
1422 struct uvm_aobj *aobj;
1423 int pageidx;
1424 {
1425 struct vm_page *pg;
1426 int rv, npages;
1427
1428 pg = NULL;
1429 npages = 1;
1430 /* locked: aobj */
1431 rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
1432 &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
1433 /* unlocked: aobj */
1434
1435 /*
1436 * relock and finish up.
1437 */
1438
1439 simple_lock(&aobj->u_obj.vmobjlock);
1440 switch (rv) {
1441 case 0:
1442 break;
1443
1444 case EIO:
1445 case ERESTART:
1446
1447 /*
1448 * nothing more to do on errors.
1449 * ERESTART can only mean that the anon was freed,
1450 * so again there's nothing to do.
1451 */
1452
1453 return FALSE;
1454
1455 default:
1456 return TRUE;
1457 }
1458
1459 /*
1460 * ok, we've got the page now.
1461 * mark it as dirty, clear its swslot and un-busy it.
1462 */
1463 uao_dropswap(&aobj->u_obj, pageidx);
1464
1465 /*
1466 * deactivate the page (to make sure it's on a page queue).
1467 */
1468 uvm_lock_pageq();
1469 if (pg->wire_count == 0)
1470 uvm_pagedeactivate(pg);
1471 uvm_unlock_pageq();
1472
1473 if (pg->flags & PG_WANTED) {
1474 wakeup(pg);
1475 }
1476 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
1477 UVM_PAGE_OWN(pg, NULL);
1478
1479 return FALSE;
1480 }
Cache object: a5f4239a7a28c7e155f348ec0db163a1
|