FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_bio.c
1 /* $NetBSD: uvm_bio.c,v 1.37.2.2 2006/07/28 12:32:22 tron Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.37.2.2 2006/07/28 12:32:22 tron Exp $");
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45
46 #include <uvm/uvm.h>
47
48 /*
49 * global data structures
50 */
51
52 /*
53 * local functions
54 */
55
56 int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, int,
57 int, vm_fault_t, vm_prot_t, int);
58 struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
59
60 /*
61 * local data structues
62 */
63
64 #define UBC_HASH(uobj, offset) \
65 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
66 ubc_object.hashmask)
67
68 #define UBC_QUEUE(offset) \
69 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
70 (UBC_NQUEUES - 1)])
71
72 #define UBC_UMAP_ADDR(u) \
73 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
74
75
76 #define UMAP_PAGES_LOCKED 0x0001
77 #define UMAP_MAPPING_CACHED 0x0002
78
79 struct ubc_map
80 {
81 struct uvm_object * uobj; /* mapped object */
82 voff_t offset; /* offset into uobj */
83 voff_t writeoff; /* write offset */
84 vsize_t writelen; /* write len */
85 int refcount; /* refcount on mapping */
86 int flags; /* extra state */
87
88 LIST_ENTRY(ubc_map) hash; /* hash table */
89 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
90 };
91
92 static struct ubc_object
93 {
94 struct uvm_object uobj; /* glue for uvm_map() */
95 char *kva; /* where ubc_object is mapped */
96 struct ubc_map *umap; /* array of ubc_map's */
97
98 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
99 u_long hashmask; /* mask for hashtable */
100
101 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
102 /* inactive queues for ubc_map's */
103
104 } ubc_object;
105
106 struct uvm_pagerops ubc_pager =
107 {
108 NULL, /* init */
109 NULL, /* reference */
110 NULL, /* detach */
111 ubc_fault, /* fault */
112 /* ... rest are NULL */
113 };
114
115 int ubc_nwins = UBC_NWINS;
116 int ubc_winshift = UBC_WINSHIFT;
117 int ubc_winsize;
118 #if defined(PMAP_PREFER)
119 int ubc_nqueues;
120 #define UBC_NQUEUES ubc_nqueues
121 #else
122 #define UBC_NQUEUES 1
123 #endif
124
125 /*
126 * ubc_init
127 *
128 * init pager private data structures.
129 */
130
131 void
132 ubc_init(void)
133 {
134 struct ubc_map *umap;
135 vaddr_t va;
136 int i;
137
138 /*
139 * Make sure ubc_winshift is sane.
140 */
141 if (ubc_winshift < PAGE_SHIFT)
142 ubc_winshift = PAGE_SHIFT;
143
144 /*
145 * init ubc_object.
146 * alloc and init ubc_map's.
147 * init inactive queues.
148 * alloc and init hashtable.
149 * map in ubc_object.
150 */
151
152 simple_lock_init(&ubc_object.uobj.vmobjlock);
153 ubc_object.uobj.pgops = &ubc_pager;
154 TAILQ_INIT(&ubc_object.uobj.memq);
155 ubc_object.uobj.uo_npages = 0;
156 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
157
158 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
159 M_TEMP, M_NOWAIT);
160 if (ubc_object.umap == NULL)
161 panic("ubc_init: failed to allocate ubc_map");
162 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
163
164 if (ubc_winshift < PAGE_SHIFT) {
165 ubc_winshift = PAGE_SHIFT;
166 }
167 va = (vaddr_t)1L;
168 #ifdef PMAP_PREFER
169 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
170 ubc_nqueues = va >> ubc_winshift;
171 if (ubc_nqueues == 0) {
172 ubc_nqueues = 1;
173 }
174 #endif
175 ubc_winsize = 1 << ubc_winshift;
176 ubc_object.inactive = malloc(UBC_NQUEUES *
177 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
178 if (ubc_object.inactive == NULL)
179 panic("ubc_init: failed to allocate inactive queue heads");
180 for (i = 0; i < UBC_NQUEUES; i++) {
181 TAILQ_INIT(&ubc_object.inactive[i]);
182 }
183 for (i = 0; i < ubc_nwins; i++) {
184 umap = &ubc_object.umap[i];
185 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
186 umap, inactive);
187 }
188
189 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
190 &ubc_object.hashmask);
191 for (i = 0; i <= ubc_object.hashmask; i++) {
192 LIST_INIT(&ubc_object.hash[i]);
193 }
194
195 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
196 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
197 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
198 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
199 panic("ubc_init: failed to map ubc_object");
200 }
201 UVMHIST_INIT(ubchist, 300);
202 }
203
204 /*
205 * ubc_fault: fault routine for ubc mapping
206 */
207
208 int
209 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
210 struct uvm_faultinfo *ufi;
211 vaddr_t ign1;
212 struct vm_page **ign2;
213 int ign3, ign4;
214 vm_fault_t fault_type;
215 vm_prot_t access_type;
216 int flags;
217 {
218 struct uvm_object *uobj;
219 struct ubc_map *umap;
220 vaddr_t va, eva, ubc_offset, slot_offset;
221 int i, error, npages;
222 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
223 vm_prot_t prot;
224 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
225
226 /*
227 * no need to try with PGO_LOCKED...
228 * we don't need to have the map locked since we know that
229 * no one will mess with it until our reference is released.
230 */
231
232 if (flags & PGO_LOCKED) {
233 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
234 flags &= ~PGO_LOCKED;
235 }
236
237 va = ufi->orig_rvaddr;
238 ubc_offset = va - (vaddr_t)ubc_object.kva;
239 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
240 KASSERT(umap->refcount != 0);
241 slot_offset = ubc_offset & (ubc_winsize - 1);
242
243 /*
244 * some platforms cannot write to individual bytes atomically, so
245 * software has to do read/modify/write of larger quantities instead.
246 * this means that the access_type for "write" operations
247 * can be VM_PROT_READ, which confuses us mightily.
248 *
249 * deal with this by resetting access_type based on the info
250 * that ubc_alloc() stores for us.
251 */
252
253 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
254 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
255 va, ubc_offset, access_type, 0);
256
257 #ifdef DIAGNOSTIC
258 if ((access_type & VM_PROT_WRITE) != 0) {
259 if (slot_offset < trunc_page(umap->writeoff) ||
260 umap->writeoff + umap->writelen <= slot_offset) {
261 panic("ubc_fault: out of range write");
262 }
263 }
264 #endif
265
266 /* no umap locking needed since we have a ref on the umap */
267 uobj = umap->uobj;
268
269 if ((access_type & VM_PROT_WRITE) == 0) {
270 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
271 } else {
272 npages = (round_page(umap->offset + umap->writeoff +
273 umap->writelen) - (umap->offset + slot_offset))
274 >> PAGE_SHIFT;
275 flags |= PGO_PASTEOF;
276 }
277
278 again:
279 memset(pgs, 0, sizeof (pgs));
280 simple_lock(&uobj->vmobjlock);
281
282 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
283 slot_offset, umap->writeoff, umap->writelen, 0);
284 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
285 uobj, umap->offset + slot_offset, npages, 0);
286
287 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
288 &npages, 0, access_type, 0, flags | PGO_NOBLOCKALLOC |
289 PGO_NOTIMESTAMP);
290 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
291 0);
292
293 if (error == EAGAIN) {
294 tsleep(&lbolt, PVM, "ubc_fault", 0);
295 goto again;
296 }
297 if (error) {
298 return error;
299 }
300
301 va = ufi->orig_rvaddr;
302 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
303
304 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
305 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
306 boolean_t rdonly;
307 vm_prot_t mask;
308
309 /*
310 * for virtually-indexed, virtually-tagged caches we should
311 * avoid creating writable mappings when we don't absolutely
312 * need them, since the "compatible alias" trick doesn't work
313 * on such caches. otherwise, we can always map the pages
314 * writable.
315 */
316
317 #ifdef PMAP_CACHE_VIVT
318 prot = VM_PROT_READ | access_type;
319 #else
320 prot = VM_PROT_READ | VM_PROT_WRITE;
321 #endif
322 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
323 pg = pgs[i];
324
325 if (pg == NULL || pg == PGO_DONTCARE) {
326 continue;
327 }
328
329 uobj = pg->uobject;
330 simple_lock(&uobj->vmobjlock);
331 if (pg->flags & PG_WANTED) {
332 wakeup(pg);
333 }
334 KASSERT((pg->flags & PG_FAKE) == 0);
335 if (pg->flags & PG_RELEASED) {
336 uvm_lock_pageq();
337 uvm_pagefree(pg);
338 uvm_unlock_pageq();
339 simple_unlock(&uobj->vmobjlock);
340 continue;
341 }
342 if (pg->loan_count != 0) {
343
344 /*
345 * avoid unneeded loan break if possible.
346 */
347
348 if ((access_type & VM_PROT_WRITE) == 0)
349 prot &= ~VM_PROT_WRITE;
350
351 if (prot & VM_PROT_WRITE) {
352 pg = uvm_loanbreak(pg);
353 if (pg == NULL)
354 continue; /* will re-fault */
355 }
356 }
357
358 /*
359 * note that a page whose backing store is partially allocated
360 * is marked as PG_RDONLY.
361 */
362
363 rdonly = (access_type & VM_PROT_WRITE) == 0 &&
364 (pg->flags & PG_RDONLY) != 0;
365 KASSERT((pg->flags & PG_RDONLY) == 0 ||
366 (access_type & VM_PROT_WRITE) == 0 ||
367 pg->offset < umap->writeoff ||
368 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
369 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
370 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
371 prot & mask, PMAP_CANFAIL | (access_type & mask));
372 uvm_lock_pageq();
373 uvm_pageactivate(pg);
374 uvm_unlock_pageq();
375 pg->flags &= ~(PG_BUSY|PG_WANTED);
376 UVM_PAGE_OWN(pg, NULL);
377 simple_unlock(&uobj->vmobjlock);
378 if (error) {
379 UVMHIST_LOG(ubchist, "pmap_enter fail %d",
380 error, 0, 0, 0);
381 uvm_wait("ubc_pmfail");
382 /* will refault */
383 }
384 }
385 pmap_update(ufi->orig_map->pmap);
386 return 0;
387 }
388
389 /*
390 * local functions
391 */
392
393 struct ubc_map *
394 ubc_find_mapping(uobj, offset)
395 struct uvm_object *uobj;
396 voff_t offset;
397 {
398 struct ubc_map *umap;
399
400 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
401 if (umap->uobj == uobj && umap->offset == offset) {
402 return umap;
403 }
404 }
405 return NULL;
406 }
407
408
409 /*
410 * ubc interface functions
411 */
412
413 /*
414 * ubc_alloc: allocate a file mapping window
415 */
416
417 void *
418 ubc_alloc(uobj, offset, lenp, flags)
419 struct uvm_object *uobj;
420 voff_t offset;
421 vsize_t *lenp;
422 int flags;
423 {
424 vaddr_t slot_offset, va;
425 struct ubc_map *umap;
426 voff_t umap_offset;
427 int error;
428 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
429
430 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
431 uobj, offset, *lenp, 0);
432
433 KASSERT(*lenp > 0);
434 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
435 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
436 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
437
438 /*
439 * the object is always locked here, so we don't need to add a ref.
440 */
441
442 again:
443 simple_lock(&ubc_object.uobj.vmobjlock);
444 umap = ubc_find_mapping(uobj, umap_offset);
445 if (umap == NULL) {
446 umap = TAILQ_FIRST(UBC_QUEUE(offset));
447 if (umap == NULL) {
448 simple_unlock(&ubc_object.uobj.vmobjlock);
449 tsleep(&lbolt, PVM, "ubc_alloc", 0);
450 goto again;
451 }
452
453 /*
454 * remove from old hash (if any), add to new hash.
455 */
456
457 if (umap->uobj != NULL) {
458 LIST_REMOVE(umap, hash);
459 }
460 umap->uobj = uobj;
461 umap->offset = umap_offset;
462 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
463 umap, hash);
464 va = UBC_UMAP_ADDR(umap);
465 if (umap->flags & UMAP_MAPPING_CACHED) {
466 umap->flags &= ~UMAP_MAPPING_CACHED;
467 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
468 pmap_update(pmap_kernel());
469 }
470 } else {
471 va = UBC_UMAP_ADDR(umap);
472 }
473
474 if (umap->refcount == 0) {
475 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
476 }
477
478 #ifdef DIAGNOSTIC
479 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
480 panic("ubc_alloc: concurrent writes uobj %p", uobj);
481 }
482 #endif
483 if (flags & UBC_WRITE) {
484 umap->writeoff = slot_offset;
485 umap->writelen = *lenp;
486 }
487
488 umap->refcount++;
489 simple_unlock(&ubc_object.uobj.vmobjlock);
490 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
491 umap, umap->refcount, va, flags);
492
493 if (flags & UBC_FAULTBUSY) {
494 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
495 struct vm_page *pgs[npages];
496 int gpflags =
497 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
498 PGO_NOTIMESTAMP;
499 int i;
500 KDASSERT(flags & UBC_WRITE);
501
502 if (umap->flags & UMAP_MAPPING_CACHED) {
503 umap->flags &= ~UMAP_MAPPING_CACHED;
504 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
505 }
506 memset(pgs, 0, sizeof(pgs));
507 simple_lock(&uobj->vmobjlock);
508 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
509 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, 0, gpflags);
510 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
511 if (error) {
512 goto out;
513 }
514 for (i = 0; i < npages; i++) {
515 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
516 VM_PAGE_TO_PHYS(pgs[i]),
517 VM_PROT_READ | VM_PROT_WRITE);
518 }
519 pmap_update(pmap_kernel());
520 umap->flags |= UMAP_PAGES_LOCKED;
521 }
522
523 out:
524 return (void *)(va + slot_offset);
525 }
526
527 /*
528 * ubc_release: free a file mapping window.
529 */
530
531 void
532 ubc_release(va, flags)
533 void *va;
534 int flags;
535 {
536 struct ubc_map *umap;
537 struct uvm_object *uobj;
538 vaddr_t umapva;
539 boolean_t unmapped;
540 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
541
542 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
543 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
544 umapva = UBC_UMAP_ADDR(umap);
545 uobj = umap->uobj;
546 KASSERT(uobj != NULL);
547
548 if (umap->flags & UMAP_PAGES_LOCKED) {
549 int slot_offset = umap->writeoff;
550 int endoff = umap->writeoff + umap->writelen;
551 int zerolen = round_page(endoff) - endoff;
552 int npages = (int)(round_page(umap->writeoff + umap->writelen)
553 - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
554 struct vm_page *pgs[npages];
555 paddr_t pa;
556 int i;
557 boolean_t rv;
558
559 if (zerolen) {
560 memset((char *)umapva + endoff, 0, zerolen);
561 }
562 umap->flags &= ~UMAP_PAGES_LOCKED;
563 uvm_lock_pageq();
564 for (i = 0; i < npages; i++) {
565 rv = pmap_extract(pmap_kernel(),
566 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
567 KASSERT(rv);
568 pgs[i] = PHYS_TO_VM_PAGE(pa);
569 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
570 KASSERT(pgs[i]->loan_count == 0);
571 uvm_pageactivate(pgs[i]);
572 }
573 uvm_unlock_pageq();
574 pmap_kremove(umapva, ubc_winsize);
575 pmap_update(pmap_kernel());
576 simple_lock(&uobj->vmobjlock);
577 uvm_page_unbusy(pgs, npages);
578 simple_unlock(&uobj->vmobjlock);
579 unmapped = TRUE;
580 } else {
581 unmapped = FALSE;
582 }
583
584 simple_lock(&ubc_object.uobj.vmobjlock);
585 umap->writeoff = 0;
586 umap->writelen = 0;
587 umap->refcount--;
588 if (umap->refcount == 0) {
589 if (flags & UBC_UNMAP) {
590
591 /*
592 * Invalidate any cached mappings if requested.
593 * This is typically used to avoid leaving
594 * incompatible cache aliases around indefinitely.
595 */
596
597 pmap_remove(pmap_kernel(), umapva,
598 umapva + ubc_winsize);
599 umap->flags &= ~UMAP_MAPPING_CACHED;
600 pmap_update(pmap_kernel());
601 LIST_REMOVE(umap, hash);
602 umap->uobj = NULL;
603 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
604 inactive);
605 } else {
606 if (!unmapped) {
607 umap->flags |= UMAP_MAPPING_CACHED;
608 }
609 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
610 inactive);
611 }
612 }
613 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
614 simple_unlock(&ubc_object.uobj.vmobjlock);
615 }
616
617
618 #if 0 /* notused */
619 /*
620 * removing a range of mappings from the ubc mapping cache.
621 */
622
623 void
624 ubc_flush(uobj, start, end)
625 struct uvm_object *uobj;
626 voff_t start, end;
627 {
628 struct ubc_map *umap;
629 vaddr_t va;
630 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
631
632 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
633 uobj, start, end, 0);
634
635 simple_lock(&ubc_object.uobj.vmobjlock);
636 for (umap = ubc_object.umap;
637 umap < &ubc_object.umap[ubc_nwins];
638 umap++) {
639
640 if (umap->uobj != uobj || umap->offset < start ||
641 (umap->offset >= end && end != 0) ||
642 umap->refcount > 0) {
643 continue;
644 }
645
646 /*
647 * remove from hash,
648 * move to head of inactive queue.
649 */
650
651 va = (vaddr_t)(ubc_object.kva +
652 ((umap - ubc_object.umap) << ubc_winshift));
653 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
654
655 LIST_REMOVE(umap, hash);
656 umap->uobj = NULL;
657 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
658 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
659 }
660 pmap_update(pmap_kernel());
661 simple_unlock(&ubc_object.uobj.vmobjlock);
662 }
663 #endif /* notused */
Cache object: 1eb60ca4ffbf2407a18e97bd1fbb0da9
|