FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_bio.c
1 /* $NetBSD: uvm_bio.c,v 1.54 2006/11/01 10:18:27 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.54 2006/11/01 10:18:27 yamt Exp $");
38
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46
47 #include <uvm/uvm.h>
48
49 /*
50 * global data structures
51 */
52
53 /*
54 * local functions
55 */
56
57 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
58 int, int, vm_prot_t, int);
59 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
60
61 /*
62 * local data structues
63 */
64
65 #define UBC_HASH(uobj, offset) \
66 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
67 ubc_object.hashmask)
68
69 #define UBC_QUEUE(offset) \
70 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
71 (UBC_NQUEUES - 1)])
72
73 #define UBC_UMAP_ADDR(u) \
74 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
75
76
77 #define UMAP_PAGES_LOCKED 0x0001
78 #define UMAP_MAPPING_CACHED 0x0002
79
80 struct ubc_map
81 {
82 struct uvm_object * uobj; /* mapped object */
83 voff_t offset; /* offset into uobj */
84 voff_t writeoff; /* write offset */
85 vsize_t writelen; /* write len */
86 int refcount; /* refcount on mapping */
87 int flags; /* extra state */
88 int advice;
89
90 LIST_ENTRY(ubc_map) hash; /* hash table */
91 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
92 };
93
94 static struct ubc_object
95 {
96 struct uvm_object uobj; /* glue for uvm_map() */
97 char *kva; /* where ubc_object is mapped */
98 struct ubc_map *umap; /* array of ubc_map's */
99
100 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
101 u_long hashmask; /* mask for hashtable */
102
103 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
104 /* inactive queues for ubc_map's */
105
106 } ubc_object;
107
108 struct uvm_pagerops ubc_pager =
109 {
110 .pgo_fault = ubc_fault,
111 /* ... rest are NULL */
112 };
113
114 int ubc_nwins = UBC_NWINS;
115 int ubc_winshift = UBC_WINSHIFT;
116 int ubc_winsize;
117 #if defined(PMAP_PREFER)
118 int ubc_nqueues;
119 #define UBC_NQUEUES ubc_nqueues
120 #else
121 #define UBC_NQUEUES 1
122 #endif
123
124 #if defined(UBC_STATS)
125
126 #define UBC_EVCNT_DEFINE(name) \
127 struct evcnt ubc_evcnt_##name = \
128 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
129 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
130 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
131
132 #else /* defined(UBC_STATS) */
133
134 #define UBC_EVCNT_DEFINE(name) /* nothing */
135 #define UBC_EVCNT_INCR(name) /* nothing */
136
137 #endif /* defined(UBC_STATS) */
138
139 UBC_EVCNT_DEFINE(wincachehit)
140 UBC_EVCNT_DEFINE(wincachemiss)
141
142 /*
143 * ubc_init
144 *
145 * init pager private data structures.
146 */
147
148 void
149 ubc_init(void)
150 {
151 struct ubc_map *umap;
152 vaddr_t va;
153 int i;
154
155 /*
156 * Make sure ubc_winshift is sane.
157 */
158 if (ubc_winshift < PAGE_SHIFT)
159 ubc_winshift = PAGE_SHIFT;
160
161 /*
162 * init ubc_object.
163 * alloc and init ubc_map's.
164 * init inactive queues.
165 * alloc and init hashtable.
166 * map in ubc_object.
167 */
168
169 UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
170
171 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
172 M_TEMP, M_NOWAIT);
173 if (ubc_object.umap == NULL)
174 panic("ubc_init: failed to allocate ubc_map");
175 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
176
177 if (ubc_winshift < PAGE_SHIFT) {
178 ubc_winshift = PAGE_SHIFT;
179 }
180 va = (vaddr_t)1L;
181 #ifdef PMAP_PREFER
182 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
183 ubc_nqueues = va >> ubc_winshift;
184 if (ubc_nqueues == 0) {
185 ubc_nqueues = 1;
186 }
187 #endif
188 ubc_winsize = 1 << ubc_winshift;
189 ubc_object.inactive = malloc(UBC_NQUEUES *
190 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
191 if (ubc_object.inactive == NULL)
192 panic("ubc_init: failed to allocate inactive queue heads");
193 for (i = 0; i < UBC_NQUEUES; i++) {
194 TAILQ_INIT(&ubc_object.inactive[i]);
195 }
196 for (i = 0; i < ubc_nwins; i++) {
197 umap = &ubc_object.umap[i];
198 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
199 umap, inactive);
200 }
201
202 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
203 &ubc_object.hashmask);
204 for (i = 0; i <= ubc_object.hashmask; i++) {
205 LIST_INIT(&ubc_object.hash[i]);
206 }
207
208 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
209 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
210 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
211 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
212 panic("ubc_init: failed to map ubc_object");
213 }
214 UVMHIST_INIT(ubchist, 300);
215 }
216
217 /*
218 * ubc_fault: fault routine for ubc mapping
219 */
220
221 static int
222 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
223 int ign3, int ign4, vm_prot_t access_type, int flags)
224 {
225 struct uvm_object *uobj;
226 struct ubc_map *umap;
227 vaddr_t va, eva, ubc_offset, slot_offset;
228 int i, error, npages;
229 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
230 vm_prot_t prot;
231 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
232
233 /*
234 * no need to try with PGO_LOCKED...
235 * we don't need to have the map locked since we know that
236 * no one will mess with it until our reference is released.
237 */
238
239 if (flags & PGO_LOCKED) {
240 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
241 flags &= ~PGO_LOCKED;
242 }
243
244 va = ufi->orig_rvaddr;
245 ubc_offset = va - (vaddr_t)ubc_object.kva;
246 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
247 KASSERT(umap->refcount != 0);
248 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
249 slot_offset = ubc_offset & (ubc_winsize - 1);
250
251 /*
252 * some platforms cannot write to individual bytes atomically, so
253 * software has to do read/modify/write of larger quantities instead.
254 * this means that the access_type for "write" operations
255 * can be VM_PROT_READ, which confuses us mightily.
256 *
257 * deal with this by resetting access_type based on the info
258 * that ubc_alloc() stores for us.
259 */
260
261 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
262 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
263 va, ubc_offset, access_type, 0);
264
265 #ifdef DIAGNOSTIC
266 if ((access_type & VM_PROT_WRITE) != 0) {
267 if (slot_offset < trunc_page(umap->writeoff) ||
268 umap->writeoff + umap->writelen <= slot_offset) {
269 panic("ubc_fault: out of range write");
270 }
271 }
272 #endif
273
274 /* no umap locking needed since we have a ref on the umap */
275 uobj = umap->uobj;
276
277 if ((access_type & VM_PROT_WRITE) == 0) {
278 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
279 } else {
280 npages = (round_page(umap->offset + umap->writeoff +
281 umap->writelen) - (umap->offset + slot_offset))
282 >> PAGE_SHIFT;
283 flags |= PGO_PASTEOF;
284 }
285
286 again:
287 memset(pgs, 0, sizeof (pgs));
288 simple_lock(&uobj->vmobjlock);
289
290 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
291 slot_offset, umap->writeoff, umap->writelen, 0);
292 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
293 uobj, umap->offset + slot_offset, npages, 0);
294
295 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
296 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
297 PGO_NOTIMESTAMP);
298 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
299 0);
300
301 if (error == EAGAIN) {
302 tsleep(&lbolt, PVM, "ubc_fault", 0);
303 goto again;
304 }
305 if (error) {
306 return error;
307 }
308
309 va = ufi->orig_rvaddr;
310 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
311
312 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
313 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
314 boolean_t rdonly;
315 vm_prot_t mask;
316
317 /*
318 * for virtually-indexed, virtually-tagged caches we should
319 * avoid creating writable mappings when we don't absolutely
320 * need them, since the "compatible alias" trick doesn't work
321 * on such caches. otherwise, we can always map the pages
322 * writable.
323 */
324
325 #ifdef PMAP_CACHE_VIVT
326 prot = VM_PROT_READ | access_type;
327 #else
328 prot = VM_PROT_READ | VM_PROT_WRITE;
329 #endif
330 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
331 pg = pgs[i];
332
333 if (pg == NULL || pg == PGO_DONTCARE) {
334 continue;
335 }
336
337 uobj = pg->uobject;
338 simple_lock(&uobj->vmobjlock);
339 if (pg->flags & PG_WANTED) {
340 wakeup(pg);
341 }
342 KASSERT((pg->flags & PG_FAKE) == 0);
343 if (pg->flags & PG_RELEASED) {
344 uvm_lock_pageq();
345 uvm_pagefree(pg);
346 uvm_unlock_pageq();
347 simple_unlock(&uobj->vmobjlock);
348 continue;
349 }
350 if (pg->loan_count != 0) {
351
352 /*
353 * avoid unneeded loan break if possible.
354 */
355
356 if ((access_type & VM_PROT_WRITE) == 0)
357 prot &= ~VM_PROT_WRITE;
358
359 if (prot & VM_PROT_WRITE) {
360 struct vm_page *newpg;
361
362 newpg = uvm_loanbreak(pg);
363 if (newpg == NULL) {
364 uvm_page_unbusy(&pg, 1);
365 simple_unlock(&uobj->vmobjlock);
366 uvm_wait("ubc_loanbrk");
367 continue; /* will re-fault */
368 }
369 pg = newpg;
370 }
371 }
372
373 /*
374 * note that a page whose backing store is partially allocated
375 * is marked as PG_RDONLY.
376 */
377
378 rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
379 (pg->flags & PG_RDONLY) != 0) ||
380 UVM_OBJ_NEEDS_WRITEFAULT(uobj);
381 KASSERT((pg->flags & PG_RDONLY) == 0 ||
382 (access_type & VM_PROT_WRITE) == 0 ||
383 pg->offset < umap->writeoff ||
384 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
385 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
386 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
387 prot & mask, PMAP_CANFAIL | (access_type & mask));
388 uvm_lock_pageq();
389 uvm_pageactivate(pg);
390 uvm_unlock_pageq();
391 pg->flags &= ~(PG_BUSY|PG_WANTED);
392 UVM_PAGE_OWN(pg, NULL);
393 simple_unlock(&uobj->vmobjlock);
394 if (error) {
395 UVMHIST_LOG(ubchist, "pmap_enter fail %d",
396 error, 0, 0, 0);
397 uvm_wait("ubc_pmfail");
398 /* will refault */
399 }
400 }
401 pmap_update(ufi->orig_map->pmap);
402 return 0;
403 }
404
405 /*
406 * local functions
407 */
408
409 static struct ubc_map *
410 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
411 {
412 struct ubc_map *umap;
413
414 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
415 if (umap->uobj == uobj && umap->offset == offset) {
416 return umap;
417 }
418 }
419 return NULL;
420 }
421
422
423 /*
424 * ubc interface functions
425 */
426
427 /*
428 * ubc_alloc: allocate a file mapping window
429 */
430
431 void *
432 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
433 int flags)
434 {
435 vaddr_t slot_offset, va;
436 struct ubc_map *umap;
437 voff_t umap_offset;
438 int error;
439 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
440
441 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
442 uobj, offset, *lenp, 0);
443
444 KASSERT(*lenp > 0);
445 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
446 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
447 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
448
449 /*
450 * the object is always locked here, so we don't need to add a ref.
451 */
452
453 again:
454 simple_lock(&ubc_object.uobj.vmobjlock);
455 umap = ubc_find_mapping(uobj, umap_offset);
456 if (umap == NULL) {
457 UBC_EVCNT_INCR(wincachemiss);
458 umap = TAILQ_FIRST(UBC_QUEUE(offset));
459 if (umap == NULL) {
460 simple_unlock(&ubc_object.uobj.vmobjlock);
461 tsleep(&lbolt, PVM, "ubc_alloc", 0);
462 goto again;
463 }
464
465 /*
466 * remove from old hash (if any), add to new hash.
467 */
468
469 if (umap->uobj != NULL) {
470 LIST_REMOVE(umap, hash);
471 }
472 umap->uobj = uobj;
473 umap->offset = umap_offset;
474 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
475 umap, hash);
476 va = UBC_UMAP_ADDR(umap);
477 if (umap->flags & UMAP_MAPPING_CACHED) {
478 umap->flags &= ~UMAP_MAPPING_CACHED;
479 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
480 pmap_update(pmap_kernel());
481 }
482 } else {
483 UBC_EVCNT_INCR(wincachehit);
484 va = UBC_UMAP_ADDR(umap);
485 }
486
487 if (umap->refcount == 0) {
488 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
489 }
490
491 #ifdef DIAGNOSTIC
492 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
493 panic("ubc_alloc: concurrent writes uobj %p", uobj);
494 }
495 #endif
496 if (flags & UBC_WRITE) {
497 umap->writeoff = slot_offset;
498 umap->writelen = *lenp;
499 }
500
501 umap->refcount++;
502 umap->advice = advice;
503 simple_unlock(&ubc_object.uobj.vmobjlock);
504 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
505 umap, umap->refcount, va, flags);
506
507 if (flags & UBC_FAULTBUSY) {
508 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
509 struct vm_page *pgs[npages];
510 int gpflags =
511 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
512 PGO_NOTIMESTAMP;
513 int i;
514 KDASSERT(flags & UBC_WRITE);
515
516 if (umap->flags & UMAP_MAPPING_CACHED) {
517 umap->flags &= ~UMAP_MAPPING_CACHED;
518 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
519 }
520 memset(pgs, 0, sizeof(pgs));
521 simple_lock(&uobj->vmobjlock);
522 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
523 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
524 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
525 if (error) {
526 goto out;
527 }
528 for (i = 0; i < npages; i++) {
529 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
530 VM_PAGE_TO_PHYS(pgs[i]),
531 VM_PROT_READ | VM_PROT_WRITE);
532 }
533 pmap_update(pmap_kernel());
534 umap->flags |= UMAP_PAGES_LOCKED;
535 }
536
537 out:
538 return (void *)(va + slot_offset);
539 }
540
541 /*
542 * ubc_release: free a file mapping window.
543 */
544
545 void
546 ubc_release(void *va, int flags)
547 {
548 struct ubc_map *umap;
549 struct uvm_object *uobj;
550 vaddr_t umapva;
551 boolean_t unmapped;
552 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
553
554 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
555 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
556 umapva = UBC_UMAP_ADDR(umap);
557 uobj = umap->uobj;
558 KASSERT(uobj != NULL);
559
560 if (umap->flags & UMAP_PAGES_LOCKED) {
561 int slot_offset = umap->writeoff;
562 int endoff = umap->writeoff + umap->writelen;
563 int zerolen = round_page(endoff) - endoff;
564 int npages = (int)(round_page(umap->writeoff + umap->writelen)
565 - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
566 struct vm_page *pgs[npages];
567 paddr_t pa;
568 int i;
569 boolean_t rv;
570
571 if (zerolen) {
572 memset((char *)umapva + endoff, 0, zerolen);
573 }
574 umap->flags &= ~UMAP_PAGES_LOCKED;
575 uvm_lock_pageq();
576 for (i = 0; i < npages; i++) {
577 rv = pmap_extract(pmap_kernel(),
578 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
579 KASSERT(rv);
580 pgs[i] = PHYS_TO_VM_PAGE(pa);
581 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
582 KASSERT(pgs[i]->loan_count == 0);
583 uvm_pageactivate(pgs[i]);
584 }
585 uvm_unlock_pageq();
586 pmap_kremove(umapva, ubc_winsize);
587 pmap_update(pmap_kernel());
588 simple_lock(&uobj->vmobjlock);
589 uvm_page_unbusy(pgs, npages);
590 simple_unlock(&uobj->vmobjlock);
591 unmapped = TRUE;
592 } else {
593 unmapped = FALSE;
594 }
595
596 simple_lock(&ubc_object.uobj.vmobjlock);
597 umap->writeoff = 0;
598 umap->writelen = 0;
599 umap->refcount--;
600 if (umap->refcount == 0) {
601 if (flags & UBC_UNMAP) {
602
603 /*
604 * Invalidate any cached mappings if requested.
605 * This is typically used to avoid leaving
606 * incompatible cache aliases around indefinitely.
607 */
608
609 pmap_remove(pmap_kernel(), umapva,
610 umapva + ubc_winsize);
611 umap->flags &= ~UMAP_MAPPING_CACHED;
612 pmap_update(pmap_kernel());
613 LIST_REMOVE(umap, hash);
614 umap->uobj = NULL;
615 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
616 inactive);
617 } else {
618 if (!unmapped) {
619 umap->flags |= UMAP_MAPPING_CACHED;
620 }
621 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
622 inactive);
623 }
624 }
625 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
626 simple_unlock(&ubc_object.uobj.vmobjlock);
627 }
628
629
630 #if 0 /* notused */
631 /*
632 * removing a range of mappings from the ubc mapping cache.
633 */
634
635 void
636 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end)
637 {
638 struct ubc_map *umap;
639 vaddr_t va;
640 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
641
642 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
643 uobj, start, end, 0);
644
645 simple_lock(&ubc_object.uobj.vmobjlock);
646 for (umap = ubc_object.umap;
647 umap < &ubc_object.umap[ubc_nwins];
648 umap++) {
649
650 if (umap->uobj != uobj || umap->offset < start ||
651 (umap->offset >= end && end != 0) ||
652 umap->refcount > 0) {
653 continue;
654 }
655
656 /*
657 * remove from hash,
658 * move to head of inactive queue.
659 */
660
661 va = (vaddr_t)(ubc_object.kva +
662 ((umap - ubc_object.umap) << ubc_winshift));
663 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
664
665 LIST_REMOVE(umap, hash);
666 umap->uobj = NULL;
667 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
668 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
669 }
670 pmap_update(pmap_kernel());
671 simple_unlock(&ubc_object.uobj.vmobjlock);
672 }
673 #endif /* notused */
Cache object: f871c62a14fcc7df71c09a446c3a6260
|