FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_glue.c
1 /* $NetBSD: uvm_glue.c,v 1.83.4.3 2005/12/06 20:00:12 riz Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
42 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.83.4.3 2005/12/06 20:00:12 riz Exp $");
71
72 #include "opt_kgdb.h"
73 #include "opt_kstack.h"
74 #include "opt_uvmhist.h"
75
76 /*
77 * uvm_glue.c: glue functions
78 */
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc.h>
83 #include <sys/resourcevar.h>
84 #include <sys/buf.h>
85 #include <sys/user.h>
86
87 #include <uvm/uvm.h>
88
89 #include <machine/cpu.h>
90
91 /*
92 * local prototypes
93 */
94
95 static void uvm_swapout(struct lwp *);
96
97 #define UVM_NUAREA_MAX 16
98 void *uvm_uareas;
99 int uvm_nuarea;
100 struct simplelock uvm_uareas_slock = SIMPLELOCK_INITIALIZER;
101
102 static void uvm_uarea_free(vaddr_t);
103
104 /*
105 * XXXCDC: do these really belong here?
106 */
107
108 /*
109 * uvm_kernacc: can the kernel access a region of memory
110 *
111 * - used only by /dev/kmem driver (mem.c)
112 */
113
114 boolean_t
115 uvm_kernacc(addr, len, rw)
116 caddr_t addr;
117 size_t len;
118 int rw;
119 {
120 boolean_t rv;
121 vaddr_t saddr, eaddr;
122 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
123
124 saddr = trunc_page((vaddr_t)addr);
125 eaddr = round_page((vaddr_t)addr + len);
126 vm_map_lock_read(kernel_map);
127 rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
128 vm_map_unlock_read(kernel_map);
129
130 return(rv);
131 }
132
133 #ifdef KGDB
134 /*
135 * Change protections on kernel pages from addr to addr+len
136 * (presumably so debugger can plant a breakpoint).
137 *
138 * We force the protection change at the pmap level. If we were
139 * to use vm_map_protect a change to allow writing would be lazily-
140 * applied meaning we would still take a protection fault, something
141 * we really don't want to do. It would also fragment the kernel
142 * map unnecessarily. We cannot use pmap_protect since it also won't
143 * enforce a write-enable request. Using pmap_enter is the only way
144 * we can ensure the change takes place properly.
145 */
146 void
147 uvm_chgkprot(addr, len, rw)
148 caddr_t addr;
149 size_t len;
150 int rw;
151 {
152 vm_prot_t prot;
153 paddr_t pa;
154 vaddr_t sva, eva;
155
156 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
157 eva = round_page((vaddr_t)addr + len);
158 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
159 /*
160 * Extract physical address for the page.
161 */
162 if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
163 panic("chgkprot: invalid page");
164 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
165 }
166 pmap_update(pmap_kernel());
167 }
168 #endif
169
170 /*
171 * uvm_vslock: wire user memory for I/O
172 *
173 * - called from physio and sys___sysctl
174 * - XXXCDC: consider nuking this (or making it a macro?)
175 */
176
177 int
178 uvm_vslock(p, addr, len, access_type)
179 struct proc *p;
180 caddr_t addr;
181 size_t len;
182 vm_prot_t access_type;
183 {
184 struct vm_map *map;
185 vaddr_t start, end;
186 int error;
187
188 map = &p->p_vmspace->vm_map;
189 start = trunc_page((vaddr_t)addr);
190 end = round_page((vaddr_t)addr + len);
191 error = uvm_fault_wire(map, start, end, VM_FAULT_WIRE, access_type);
192 return error;
193 }
194
195 /*
196 * uvm_vsunlock: unwire user memory wired by uvm_vslock()
197 *
198 * - called from physio and sys___sysctl
199 * - XXXCDC: consider nuking this (or making it a macro?)
200 */
201
202 void
203 uvm_vsunlock(p, addr, len)
204 struct proc *p;
205 caddr_t addr;
206 size_t len;
207 {
208 uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr),
209 round_page((vaddr_t)addr + len));
210 }
211
212 /*
213 * uvm_proc_fork: fork a virtual address space
214 *
215 * - the address space is copied as per parent map's inherit values
216 */
217 void
218 uvm_proc_fork(p1, p2, shared)
219 struct proc *p1, *p2;
220 boolean_t shared;
221 {
222
223 if (shared == TRUE) {
224 p2->p_vmspace = NULL;
225 uvmspace_share(p1, p2);
226 } else {
227 p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
228 }
229
230 cpu_proc_fork(p1, p2);
231 }
232
233
234 /*
235 * uvm_lwp_fork: fork a thread
236 *
237 * - a new "user" structure is allocated for the child process
238 * [filled in by MD layer...]
239 * - if specified, the child gets a new user stack described by
240 * stack and stacksize
241 * - NOTE: the kernel stack may be at a different location in the child
242 * process, and thus addresses of automatic variables may be invalid
243 * after cpu_lwp_fork returns in the child process. We do nothing here
244 * after cpu_lwp_fork returns.
245 * - XXXCDC: we need a way for this to return a failure value rather
246 * than just hang
247 */
248 void
249 uvm_lwp_fork(l1, l2, stack, stacksize, func, arg)
250 struct lwp *l1, *l2;
251 void *stack;
252 size_t stacksize;
253 void (*func)(void *);
254 void *arg;
255 {
256 struct user *up = l2->l_addr;
257 int error;
258
259 /*
260 * Wire down the U-area for the process, which contains the PCB
261 * and the kernel stack. Wired state is stored in l->l_flag's
262 * L_INMEM bit rather than in the vm_map_entry's wired count
263 * to prevent kernel_map fragmentation. If we reused a cached U-area,
264 * L_INMEM will already be set and we don't need to do anything.
265 *
266 * Note the kernel stack gets read/write accesses right off the bat.
267 */
268
269 if ((l2->l_flag & L_INMEM) == 0) {
270 error = uvm_fault_wire(kernel_map, (vaddr_t)up,
271 (vaddr_t)up + USPACE, VM_FAULT_WIRE,
272 VM_PROT_READ | VM_PROT_WRITE);
273 if (error)
274 panic("uvm_lwp_fork: uvm_fault_wire failed: %d", error);
275 #ifdef PMAP_UAREA
276 /* Tell the pmap this is a u-area mapping */
277 PMAP_UAREA((vaddr_t)up);
278 #endif
279 l2->l_flag |= L_INMEM;
280 }
281
282 #ifdef KSTACK_CHECK_MAGIC
283 /*
284 * fill stack with magic number
285 */
286 kstack_setup_magic(l2);
287 #endif
288
289 /*
290 * cpu_lwp_fork() copy and update the pcb, and make the child ready
291 * to run. If this is a normal user fork, the child will exit
292 * directly to user mode via child_return() on its first time
293 * slice and will not return here. If this is a kernel thread,
294 * the specified entry point will be executed.
295 */
296 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
297 }
298
299 /*
300 * uvm_uarea_alloc: allocate a u-area
301 */
302
303 boolean_t
304 uvm_uarea_alloc(vaddr_t *uaddrp)
305 {
306 vaddr_t uaddr;
307
308 #ifndef USPACE_ALIGN
309 #define USPACE_ALIGN 0
310 #endif
311
312 simple_lock(&uvm_uareas_slock);
313 if (uvm_nuarea > 0) {
314 uaddr = (vaddr_t)uvm_uareas;
315 uvm_uareas = *(void **)uvm_uareas;
316 uvm_nuarea--;
317 simple_unlock(&uvm_uareas_slock);
318 *uaddrp = uaddr;
319 return TRUE;
320 } else {
321 simple_unlock(&uvm_uareas_slock);
322 *uaddrp = uvm_km_valloc1(kernel_map, USPACE, USPACE_ALIGN,
323 UVM_UNKNOWN_OFFSET, 0);
324 return FALSE;
325 }
326 }
327
328 /*
329 * uvm_uarea_free: free a u-area; never blocks
330 */
331
332 static __inline__ void
333 uvm_uarea_free(vaddr_t uaddr)
334 {
335 simple_lock(&uvm_uareas_slock);
336 *(void **)uaddr = uvm_uareas;
337 uvm_uareas = (void *)uaddr;
338 uvm_nuarea++;
339 simple_unlock(&uvm_uareas_slock);
340 }
341
342 /*
343 * uvm_uarea_drain: return memory of u-areas over limit
344 * back to system
345 */
346
347 void
348 uvm_uarea_drain(boolean_t empty)
349 {
350 int leave = empty ? 0 : UVM_NUAREA_MAX;
351 vaddr_t uaddr;
352
353 if (uvm_nuarea <= leave)
354 return;
355
356 simple_lock(&uvm_uareas_slock);
357 while(uvm_nuarea > leave) {
358 uaddr = (vaddr_t)uvm_uareas;
359 uvm_uareas = *(void **)uvm_uareas;
360 uvm_nuarea--;
361 simple_unlock(&uvm_uareas_slock);
362 uvm_km_free(kernel_map, uaddr, USPACE);
363 simple_lock(&uvm_uareas_slock);
364 }
365 simple_unlock(&uvm_uareas_slock);
366 }
367
368 /*
369 * uvm_exit: exit a virtual address space
370 *
371 * - the process passed to us is a dead (pre-zombie) process; we
372 * are running on a different context now (the reaper).
373 * - borrow proc0's address space because freeing the vmspace
374 * of the dead process may block.
375 */
376
377 void
378 uvm_proc_exit(p)
379 struct proc *p;
380 {
381 struct lwp *l = curlwp; /* XXX */
382 struct vmspace *ovm;
383
384 KASSERT(p == l->l_proc);
385 ovm = p->p_vmspace;
386
387 /*
388 * borrow proc0's address space.
389 */
390 pmap_deactivate(l);
391 p->p_vmspace = proc0.p_vmspace;
392 pmap_activate(l);
393
394 uvmspace_free(ovm);
395 }
396
397 void
398 uvm_lwp_exit(struct lwp *l)
399 {
400 vaddr_t va = (vaddr_t)l->l_addr;
401
402 l->l_flag &= ~L_INMEM;
403 uvm_uarea_free(va);
404 l->l_addr = NULL;
405 }
406
407 /*
408 * uvm_init_limit: init per-process VM limits
409 *
410 * - called for process 0 and then inherited by all others.
411 */
412
413 void
414 uvm_init_limits(p)
415 struct proc *p;
416 {
417
418 /*
419 * Set up the initial limits on process VM. Set the maximum
420 * resident set size to be all of (reasonably) available memory.
421 * This causes any single, large process to start random page
422 * replacement once it fills memory.
423 */
424
425 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
426 p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
427 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
428 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
429 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
430 }
431
432 #ifdef DEBUG
433 int enableswap = 1;
434 int swapdebug = 0;
435 #define SDB_FOLLOW 1
436 #define SDB_SWAPIN 2
437 #define SDB_SWAPOUT 4
438 #endif
439
440 /*
441 * uvm_swapin: swap in a process's u-area.
442 */
443
444 void
445 uvm_swapin(l)
446 struct lwp *l;
447 {
448 vaddr_t addr;
449 int s, error;
450
451 addr = (vaddr_t)l->l_addr;
452 /* make L_INMEM true */
453 error = uvm_fault_wire(kernel_map, addr, addr + USPACE, VM_FAULT_WIRE,
454 VM_PROT_READ | VM_PROT_WRITE);
455 if (error) {
456 panic("uvm_swapin: rewiring stack failed: %d", error);
457 }
458
459 /*
460 * Some architectures need to be notified when the user area has
461 * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c).
462 */
463 cpu_swapin(l);
464 SCHED_LOCK(s);
465 if (l->l_stat == LSRUN)
466 setrunqueue(l);
467 l->l_flag |= L_INMEM;
468 SCHED_UNLOCK(s);
469 l->l_swtime = 0;
470 ++uvmexp.swapins;
471 }
472
473 /*
474 * uvm_scheduler: process zero main loop
475 *
476 * - attempt to swapin every swaped-out, runnable process in order of
477 * priority.
478 * - if not enough memory, wake the pagedaemon and let it clear space.
479 */
480
481 void
482 uvm_scheduler()
483 {
484 struct lwp *l, *ll;
485 int pri;
486 int ppri;
487
488 loop:
489 #ifdef DEBUG
490 while (!enableswap)
491 tsleep(&proc0, PVM, "noswap", 0);
492 #endif
493 ll = NULL; /* process to choose */
494 ppri = INT_MIN; /* its priority */
495 proclist_lock_read();
496
497 LIST_FOREACH(l, &alllwp, l_list) {
498 /* is it a runnable swapped out process? */
499 if (l->l_stat == LSRUN && (l->l_flag & L_INMEM) == 0) {
500 pri = l->l_swtime + l->l_slptime -
501 (l->l_proc->p_nice - NZERO) * 8;
502 if (pri > ppri) { /* higher priority? remember it. */
503 ll = l;
504 ppri = pri;
505 }
506 }
507 }
508 /*
509 * XXXSMP: possible unlock/sleep race between here and the
510 * "scheduler" tsleep below..
511 */
512 proclist_unlock_read();
513
514 #ifdef DEBUG
515 if (swapdebug & SDB_FOLLOW)
516 printf("scheduler: running, procp %p pri %d\n", ll, ppri);
517 #endif
518 /*
519 * Nothing to do, back to sleep
520 */
521 if ((l = ll) == NULL) {
522 tsleep(&proc0, PVM, "scheduler", 0);
523 goto loop;
524 }
525
526 /*
527 * we have found swapped out process which we would like to bring
528 * back in.
529 *
530 * XXX: this part is really bogus cuz we could deadlock on memory
531 * despite our feeble check
532 */
533 if (uvmexp.free > atop(USPACE)) {
534 #ifdef DEBUG
535 if (swapdebug & SDB_SWAPIN)
536 printf("swapin: pid %d(%s)@%p, pri %d free %d\n",
537 l->l_proc->p_pid, l->l_proc->p_comm, l->l_addr, ppri, uvmexp.free);
538 #endif
539 uvm_swapin(l);
540 goto loop;
541 }
542 /*
543 * not enough memory, jab the pageout daemon and wait til the coast
544 * is clear
545 */
546 #ifdef DEBUG
547 if (swapdebug & SDB_FOLLOW)
548 printf("scheduler: no room for pid %d(%s), free %d\n",
549 l->l_proc->p_pid, l->l_proc->p_comm, uvmexp.free);
550 #endif
551 uvm_wait("schedpwait");
552 #ifdef DEBUG
553 if (swapdebug & SDB_FOLLOW)
554 printf("scheduler: room again, free %d\n", uvmexp.free);
555 #endif
556 goto loop;
557 }
558
559 /*
560 * swappable: is LWP "l" swappable?
561 */
562
563 #define swappable(l) \
564 (((l)->l_flag & (L_INMEM)) && \
565 ((((l)->l_proc->p_flag) & (P_SYSTEM | P_WEXIT)) == 0) && \
566 (l)->l_holdcnt == 0)
567
568 /*
569 * swapout_threads: find threads that can be swapped and unwire their
570 * u-areas.
571 *
572 * - called by the pagedaemon
573 * - try and swap at least one processs
574 * - processes that are sleeping or stopped for maxslp or more seconds
575 * are swapped... otherwise the longest-sleeping or stopped process
576 * is swapped, otherwise the longest resident process...
577 */
578
579 void
580 uvm_swapout_threads()
581 {
582 struct lwp *l;
583 struct lwp *outl, *outl2;
584 int outpri, outpri2;
585 int didswap = 0;
586 extern int maxslp;
587 /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
588
589 #ifdef DEBUG
590 if (!enableswap)
591 return;
592 #endif
593
594 /*
595 * outl/outpri : stop/sleep thread with largest sleeptime < maxslp
596 * outl2/outpri2: the longest resident thread (its swap time)
597 */
598 outl = outl2 = NULL;
599 outpri = outpri2 = 0;
600 proclist_lock_read();
601 LIST_FOREACH(l, &alllwp, l_list) {
602 KASSERT(l->l_proc != NULL);
603 if (!swappable(l))
604 continue;
605 switch (l->l_stat) {
606 case LSONPROC:
607 continue;
608
609 case LSRUN:
610 if (l->l_swtime > outpri2) {
611 outl2 = l;
612 outpri2 = l->l_swtime;
613 }
614 continue;
615
616 case LSSLEEP:
617 case LSSTOP:
618 if (l->l_slptime >= maxslp) {
619 uvm_swapout(l);
620 didswap++;
621 } else if (l->l_slptime > outpri) {
622 outl = l;
623 outpri = l->l_slptime;
624 }
625 continue;
626 }
627 }
628 proclist_unlock_read();
629
630 /*
631 * If we didn't get rid of any real duds, toss out the next most
632 * likely sleeping/stopped or running candidate. We only do this
633 * if we are real low on memory since we don't gain much by doing
634 * it (USPACE bytes).
635 */
636 if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
637 if ((l = outl) == NULL)
638 l = outl2;
639 #ifdef DEBUG
640 if (swapdebug & SDB_SWAPOUT)
641 printf("swapout_threads: no duds, try procp %p\n", l);
642 #endif
643 if (l)
644 uvm_swapout(l);
645 }
646 }
647
648 /*
649 * uvm_swapout: swap out lwp "l"
650 *
651 * - currently "swapout" means "unwire U-area" and "pmap_collect()"
652 * the pmap.
653 * - XXXCDC: should deactivate all process' private anonymous memory
654 */
655
656 static void
657 uvm_swapout(l)
658 struct lwp *l;
659 {
660 vaddr_t addr;
661 int s;
662 struct proc *p = l->l_proc;
663
664 #ifdef DEBUG
665 if (swapdebug & SDB_SWAPOUT)
666 printf("swapout: lid %d.%d(%s)@%p, stat %x pri %d free %d\n",
667 p->p_pid, l->l_lid, p->p_comm, l->l_addr, l->l_stat,
668 l->l_slptime, uvmexp.free);
669 #endif
670
671 /*
672 * Mark it as (potentially) swapped out.
673 */
674 SCHED_LOCK(s);
675 if (l->l_stat == LSONPROC) {
676 KDASSERT(l->l_cpu != curcpu());
677 SCHED_UNLOCK(s);
678 return;
679 }
680 l->l_flag &= ~L_INMEM;
681 if (l->l_stat == LSRUN)
682 remrunqueue(l);
683 SCHED_UNLOCK(s);
684 l->l_swtime = 0;
685 p->p_stats->p_ru.ru_nswap++;
686 ++uvmexp.swapouts;
687
688 /*
689 * Do any machine-specific actions necessary before swapout.
690 * This can include saving floating point state, etc.
691 */
692 cpu_swapout(l);
693
694 /*
695 * Unwire the to-be-swapped process's user struct and kernel stack.
696 */
697 addr = (vaddr_t)l->l_addr;
698 uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !L_INMEM */
699 pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
700 }
701
702 /*
703 * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
704 * a core file.
705 */
706
707 int
708 uvm_coredump_walkmap(p, vp, cred, func, cookie)
709 struct proc *p;
710 struct vnode *vp;
711 struct ucred *cred;
712 int (*func)(struct proc *, struct vnode *, struct ucred *,
713 struct uvm_coredump_state *);
714 void *cookie;
715 {
716 struct uvm_coredump_state state;
717 struct vmspace *vm = p->p_vmspace;
718 struct vm_map *map = &vm->vm_map;
719 struct vm_map_entry *entry;
720 int error;
721
722 entry = NULL;
723 vm_map_lock_read(map);
724 for (;;) {
725 if (entry == NULL)
726 entry = map->header.next;
727 else if (!uvm_map_lookup_entry(map, state.end, &entry))
728 entry = entry->next;
729 if (entry == &map->header)
730 break;
731
732 state.cookie = cookie;
733 state.start = entry->start;
734 state.end = entry->end;
735 state.prot = entry->protection;
736 state.flags = 0;
737
738 /*
739 * Dump the region unless one of the following is true:
740 *
741 * (1) the region has neither object nor amap behind it
742 * (ie. it has never been accessed).
743 *
744 * (2) the region has no amap and is read-only
745 * (eg. an executable text section).
746 *
747 * (3) the region's object is a device.
748 *
749 * (4) the region is unreadable by the process.
750 */
751
752 KASSERT(!UVM_ET_ISSUBMAP(entry));
753 KASSERT(state.start < VM_MAXUSER_ADDRESS);
754 KASSERT(state.end <= VM_MAXUSER_ADDRESS);
755 if (entry->object.uvm_obj == NULL &&
756 entry->aref.ar_amap == NULL) {
757 state.flags |= UVM_COREDUMP_NODUMP;
758 }
759 if ((entry->protection & VM_PROT_WRITE) == 0 &&
760 entry->aref.ar_amap == NULL) {
761 state.flags |= UVM_COREDUMP_NODUMP;
762 }
763 if (entry->object.uvm_obj != NULL &&
764 UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
765 state.flags |= UVM_COREDUMP_NODUMP;
766 }
767 if ((entry->protection & VM_PROT_READ) == 0) {
768 state.flags |= UVM_COREDUMP_NODUMP;
769 }
770 if (state.start >= (vaddr_t)vm->vm_maxsaddr) {
771 state.flags |= UVM_COREDUMP_STACK;
772 }
773
774 vm_map_unlock_read(map);
775 error = (*func)(p, vp, cred, &state);
776 if (error)
777 return (error);
778 vm_map_lock_read(map);
779 }
780 vm_map_unlock_read(map);
781
782 return (0);
783 }
Cache object: 0b5610a8bb999bdfbbd8fb227d462e9b
|