The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_extern.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: uvm_extern.h,v 1.166 2022/11/17 18:53:05 deraadt Exp $        */
    2 /*      $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $      */
    3 
    4 /*
    5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 /*-
   30  * Copyright (c) 1991, 1992, 1993
   31  *      The Regents of the University of California.  All rights reserved.
   32  *
   33  * Redistribution and use in source and binary forms, with or without
   34  * modification, are permitted provided that the following conditions
   35  * are met:
   36  * 1. Redistributions of source code must retain the above copyright
   37  *    notice, this list of conditions and the following disclaimer.
   38  * 2. Redistributions in binary form must reproduce the above copyright
   39  *    notice, this list of conditions and the following disclaimer in the
   40  *    documentation and/or other materials provided with the distribution.
   41  * 3. Neither the name of the University nor the names of its contributors
   42  *    may be used to endorse or promote products derived from this software
   43  *    without specific prior written permission.
   44  *
   45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   55  * SUCH DAMAGE.
   56  *
   57  *      @(#)vm_extern.h 8.5 (Berkeley) 5/3/95
   58  */
   59 
   60 #ifndef _UVM_UVM_EXTERN_H_
   61 #define _UVM_UVM_EXTERN_H_
   62 
   63 typedef int vm_fault_t;
   64 
   65 typedef int vm_inherit_t;       /* XXX: inheritance codes */
   66 typedef off_t voff_t;           /* XXX: offset within a uvm_object */
   67 
   68 struct vm_map_entry;
   69 typedef struct vm_map_entry *vm_map_entry_t;
   70 
   71 struct vm_map;
   72 typedef struct vm_map *vm_map_t;
   73 
   74 struct vm_page;
   75 typedef struct vm_page  *vm_page_t;
   76 
   77 /*
   78  * Bit assignments assigned by UVM_MAPFLAG() and extracted by
   79  * UVM_{PROTECTION,INHERIT,MAXPROTECTION,ADVICE}():
   80  * bits 0-2     protection
   81  *  bit 3        unused
   82  * bits 4-5     inheritance
   83  *  bits 6-7     unused
   84  * bits 8-10    max protection
   85  *  bit 11       unused
   86  * bits 12-14   advice
   87  *  bit 15       unused
   88  * bits 16-N    flags
   89  */
   90 
   91 /* protections bits */
   92 #define PROT_MASK       (PROT_READ | PROT_WRITE | PROT_EXEC)
   93 
   94 /* inherit codes */
   95 #define MAP_INHERIT_MASK        0x3     /* inherit mask */
   96 
   97 typedef int             vm_prot_t;
   98 
   99 #define MADV_MASK       0x7     /* mask */
  100 
  101 /* mapping flags */
  102 #define UVM_FLAG_FIXED   0x0010000 /* find space */
  103 #define UVM_FLAG_OVERLAY 0x0020000 /* establish overlay */
  104 #define UVM_FLAG_NOMERGE 0x0040000 /* don't merge map entries */
  105 #define UVM_FLAG_COPYONW 0x0080000 /* set copy_on_write flag */
  106 #define UVM_FLAG_TRYLOCK 0x0100000 /* fail if we can not lock map */
  107 #define UVM_FLAG_HOLE    0x0200000 /* no backend */
  108 #define UVM_FLAG_QUERY   0x0400000 /* do everything, except actual execution */
  109 #define UVM_FLAG_NOFAULT 0x0800000 /* don't fault */
  110 #define UVM_FLAG_UNMAP   0x1000000 /* unmap to make space */
  111 #define UVM_FLAG_STACK   0x2000000 /* page may contain a stack */
  112 #define UVM_FLAG_WC      0x4000000 /* write combining */
  113 #define UVM_FLAG_CONCEAL 0x8000000 /* omit from dumps */
  114 #define UVM_FLAG_SYSCALL 0x10000000 /* system calls allowed */
  115 #define UVM_FLAG_SIGALTSTACK 0x20000000 /* sigaltstack validation required */
  116 
  117 /* macros to extract info */
  118 #define UVM_PROTECTION(X)       ((X) & PROT_MASK)
  119 #define UVM_INHERIT(X)          (((X) >> 4) & MAP_INHERIT_MASK)
  120 #define UVM_MAXPROTECTION(X)    (((X) >> 8) & PROT_MASK)
  121 #define UVM_ADVICE(X)           (((X) >> 12) & MADV_MASK)
  122 
  123 #define UVM_MAPFLAG(prot, maxprot, inh, advice, flags) \
  124         ((prot) | ((maxprot) << 8) | ((inh) << 4) | ((advice) << 12) | (flags))
  125 
  126 /* magic offset value */
  127 #define UVM_UNKNOWN_OFFSET ((voff_t) -1)
  128                                 /* offset not known(obj) or don't care(!obj) */
  129 
  130 /*
  131  * the following defines are for uvm_km_kmemalloc's flags
  132  */
  133 #define UVM_KMF_NOWAIT  0x1                     /* matches M_NOWAIT */
  134 #define UVM_KMF_VALLOC  0x2                     /* allocate VA only */
  135 #define UVM_KMF_CANFAIL 0x4                     /* caller handles failure */
  136 #define UVM_KMF_ZERO    0x08                    /* zero pages */
  137 #define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK        /* try locking only */
  138 
  139 /*
  140  * flags for uvm_pagealloc()
  141  */
  142 #define UVM_PGA_USERESERVE      0x0001  /* ok to use reserve pages */
  143 #define UVM_PGA_ZERO            0x0002  /* returned page must be zeroed */
  144 
  145 /*
  146  * flags for uvm_pglistalloc() also used by uvm_pmr_getpages()
  147  */
  148 #define UVM_PLA_WAITOK          0x0001  /* may sleep */
  149 #define UVM_PLA_NOWAIT          0x0002  /* can't sleep (need one of the two) */
  150 #define UVM_PLA_ZERO            0x0004  /* zero all pages before returning */
  151 #define UVM_PLA_TRYCONTIG       0x0008  /* try to allocate contig physmem */
  152 #define UVM_PLA_FAILOK          0x0010  /* caller can handle failure */
  153 #define UVM_PLA_NOWAKE          0x0020  /* don't wake page daemon on failure */
  154 #define UVM_PLA_USERESERVE      0x0040  /* can allocate from kernel reserve */
  155 
  156 /*
  157  * lockflags that control the locking behavior of various functions.
  158  */
  159 #define UVM_LK_ENTER    0x00000001      /* map locked on entry */
  160 #define UVM_LK_EXIT     0x00000002      /* leave map locked on exit */
  161 
  162 /*
  163  * flags to uvm_page_physload.
  164  */
  165 #define PHYSLOAD_DEVICE 0x01    /* don't add to the page queue */
  166 
  167 #include <sys/queue.h>
  168 #include <sys/tree.h>
  169 #include <sys/mman.h>
  170 
  171 #ifdef _KERNEL
  172 struct buf;
  173 struct mount;
  174 struct pglist;
  175 struct vmspace;
  176 struct pmap;
  177 #endif
  178 
  179 #include <uvm/uvm_param.h>
  180 
  181 #include <uvm/uvm_pmap.h>
  182 #include <uvm/uvm_object.h>
  183 #include <uvm/uvm_page.h>
  184 #include <uvm/uvm_map.h>
  185 
  186 #ifdef _KERNEL
  187 #include <uvm/uvm_fault.h>
  188 #include <uvm/uvm_pager.h>
  189 #endif
  190 
  191 /*
  192  * Shareable process virtual address space.
  193  * May eventually be merged with vm_map.
  194  * Several fields are temporary (text, data stuff).
  195  *
  196  *  Locks used to protect struct members in this file:
  197  *      K       kernel lock
  198  *      I       immutable after creation
  199  *      v       vm_map's lock
  200  */
  201 struct vmspace {
  202         struct  vm_map vm_map;  /* VM address map */
  203         int     vm_refcnt;      /* [K] number of references */
  204         caddr_t vm_shm;         /* SYS5 shared memory private data XXX */
  205 /* we copy from vm_startcopy to the end of the structure on fork */
  206 #define vm_startcopy vm_rssize
  207         segsz_t vm_rssize;      /* current resident set size in pages */
  208         segsz_t vm_swrss;       /* resident set size before last swap */
  209         segsz_t vm_tsize;       /* text size (pages) XXX */
  210         segsz_t vm_dsize;       /* data size (pages) XXX */
  211         segsz_t vm_dused;       /* data segment length (pages) XXX */
  212         segsz_t vm_ssize;       /* [v] stack size (pages) */
  213         caddr_t vm_taddr;       /* [I] user virtual address of text */
  214         caddr_t vm_daddr;       /* [I] user virtual address of data */
  215         caddr_t vm_maxsaddr;    /* [I] user VA at max stack growth */
  216         caddr_t vm_minsaddr;    /* [I] user VA at top of stack */
  217 };
  218 
  219 /*
  220  * uvm_constraint_range's:
  221  * MD code is allowed to setup constraint ranges for memory allocators, the
  222  * primary use for this is to keep allocation for certain memory consumers
  223  * such as mbuf pools within address ranges that are reachable by devices
  224  * that perform DMA.
  225  *
  226  * It is also to discourge memory allocations from being satisfied from ranges
  227  * such as the ISA memory range, if they can be satisfied with allocation
  228  * from other ranges.
  229  *
  230  * the MD ranges are defined in arch/ARCH/ARCH/machdep.c
  231  */
  232 struct uvm_constraint_range {
  233         paddr_t ucr_low;
  234         paddr_t ucr_high;
  235 };
  236 
  237 #ifdef _KERNEL
  238 
  239 #include <uvm/uvmexp.h>
  240 extern struct uvmexp uvmexp;
  241 
  242 /* Constraint ranges, set by MD code. */
  243 extern struct uvm_constraint_range  isa_constraint;
  244 extern struct uvm_constraint_range  dma_constraint;
  245 extern struct uvm_constraint_range  no_constraint;
  246 extern struct uvm_constraint_range *uvm_md_constraints[];
  247 
  248 /*
  249  * the various kernel maps, owned by MD code
  250  */
  251 extern struct vm_map *exec_map;
  252 extern struct vm_map *kernel_map;
  253 extern struct vm_map *kmem_map;
  254 extern struct vm_map *phys_map;
  255 
  256 /* base of kernel virtual memory */
  257 extern vaddr_t vm_min_kernel_address;
  258 
  259 /* zalloc zeros memory, alloc does not */
  260 #define uvm_km_zalloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,0,TRUE)
  261 #define uvm_km_alloc(MAP,SIZE)  uvm_km_alloc1(MAP,SIZE,0,FALSE)
  262 
  263 #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
  264 
  265 struct plimit;
  266 
  267 void                    vmapbuf(struct buf *, vsize_t);
  268 void                    vunmapbuf(struct buf *, vsize_t);
  269 struct uvm_object       *uao_create(vsize_t, int);
  270 void                    uao_detach(struct uvm_object *);
  271 void                    uao_reference(struct uvm_object *);
  272 int                     uvm_fault(vm_map_t, vaddr_t, vm_fault_t, vm_prot_t);
  273 
  274 vaddr_t                 uvm_uarea_alloc(void);
  275 void                    uvm_uarea_free(struct proc *);
  276 void                    uvm_exit(struct process *);
  277 void                    uvm_init_limits(struct plimit *);
  278 boolean_t               uvm_kernacc(caddr_t, size_t, int);
  279 
  280 int                     uvm_vslock(struct proc *, caddr_t, size_t,
  281                             vm_prot_t);
  282 void                    uvm_vsunlock(struct proc *, caddr_t, size_t);
  283 int                     uvm_vslock_device(struct proc *, void *, size_t,
  284                             vm_prot_t, void **);
  285 void                    uvm_vsunlock_device(struct proc *, void *, size_t,
  286                             void *);
  287 void                    uvm_pause(void);
  288 void                    uvm_init(void); 
  289 void                    uvm_init_percpu(void);
  290 int                     uvm_io(vm_map_t, struct uio *, int);
  291 
  292 #define UVM_IO_FIXPROT  0x01
  293 
  294 vaddr_t                 uvm_km_alloc1(vm_map_t, vsize_t, vsize_t, boolean_t);
  295 void                    uvm_km_free(vm_map_t, vaddr_t, vsize_t);
  296 vaddr_t                 uvm_km_kmemalloc_pla(struct vm_map *,
  297                             struct uvm_object *, vsize_t, vsize_t, int,
  298                             paddr_t, paddr_t, paddr_t, paddr_t, int);
  299 #define uvm_km_kmemalloc(map, obj, sz, flags)                           \
  300         uvm_km_kmemalloc_pla(map, obj, sz, 0, flags, 0, (paddr_t)-1, 0, 0, 0)
  301 struct vm_map           *uvm_km_suballoc(vm_map_t, vaddr_t *, vaddr_t *,
  302                             vsize_t, int, boolean_t, vm_map_t);
  303 /*
  304  * Allocation mode for virtual space.
  305  *
  306  *  kv_map - pointer to the pointer to the map we're allocating from.
  307  *  kv_align - alignment.
  308  *  kv_wait - wait for free space in the map if it's full. The default
  309  *   allocators don't wait since running out of space in kernel_map and
  310  *   kmem_map is usually fatal. Special maps like exec_map are specifically
  311  *   limited, so waiting for space in them is necessary.
  312  *  kv_singlepage - use the single page allocator.
  313  *  kv_executable - map the physical pages with PROT_EXEC.
  314  */
  315 struct kmem_va_mode {
  316         struct vm_map **kv_map;
  317         vsize_t kv_align;
  318         char kv_wait;
  319         char kv_singlepage;
  320 };
  321 
  322 /*
  323  * Allocation mode for physical pages.
  324  *
  325  *  kp_constraint - allocation constraint for physical pages.
  326  *  kp_object - if the pages should be allocated from an object.
  327  *  kp_align - physical alignment of the first page in the allocation.
  328  *  kp_boundary - boundary that the physical addresses can't cross if
  329  *   the allocation is contiguous.
  330  *  kp_nomem - don't allocate any backing pages.
  331  *  kp_maxseg - maximal amount of contiguous segments.
  332  *  kp_zero - zero the returned memory.
  333  *  kp_pageable - allocate pageable memory.
  334  */
  335 struct kmem_pa_mode {
  336         struct uvm_constraint_range *kp_constraint;
  337         struct uvm_object **kp_object;
  338         paddr_t kp_align;
  339         paddr_t kp_boundary;
  340         int kp_maxseg;
  341         char kp_nomem;
  342         char kp_zero;
  343         char kp_pageable;
  344 };
  345 
  346 /*
  347  * Dynamic allocation parameters. Stuff that changes too often or too much
  348  * to create separate va and pa modes for.
  349  *
  350  * kd_waitok - is it ok to sleep?
  351  * kd_trylock - don't sleep on map locks.
  352  * kd_prefer - offset to feed to PMAP_PREFER
  353  * kd_slowdown - special parameter for the singlepage va allocator
  354  *  that tells the caller to sleep if possible to let the singlepage
  355  *  allocator catch up.
  356  */
  357 struct kmem_dyn_mode {
  358         voff_t kd_prefer;
  359         int *kd_slowdown;
  360         char kd_waitok;
  361         char kd_trylock;
  362 };
  363 
  364 #define KMEM_DYN_INITIALIZER { UVM_UNKNOWN_OFFSET, NULL, 0, 0 }
  365 
  366 /*
  367  * Notice that for kv_ waiting has a different meaning. It's only supposed
  368  * to be used for very space constrained maps where waiting is a way
  369  * to throttle some other operation.
  370  * The exception is kv_page which needs to wait relatively often.
  371  * All kv_ except kv_intrsafe will potentially sleep.
  372  */
  373 extern const struct kmem_va_mode kv_any;
  374 extern const struct kmem_va_mode kv_intrsafe;
  375 extern const struct kmem_va_mode kv_page;
  376 
  377 extern const struct kmem_pa_mode kp_dirty;
  378 extern const struct kmem_pa_mode kp_zero;
  379 extern const struct kmem_pa_mode kp_dma;
  380 extern const struct kmem_pa_mode kp_dma_contig;
  381 extern const struct kmem_pa_mode kp_dma_zero;
  382 extern const struct kmem_pa_mode kp_pageable;
  383 extern const struct kmem_pa_mode kp_none;
  384 
  385 extern const struct kmem_dyn_mode kd_waitok;
  386 extern const struct kmem_dyn_mode kd_nowait;
  387 extern const struct kmem_dyn_mode kd_trylock;
  388 
  389 void                    *km_alloc(size_t, const struct kmem_va_mode *,
  390                             const struct kmem_pa_mode *,
  391                             const struct kmem_dyn_mode *);
  392 void                    km_free(void *, size_t, const struct kmem_va_mode *,
  393                             const struct kmem_pa_mode *);
  394 int                     uvm_map(vm_map_t, vaddr_t *, vsize_t,
  395                             struct uvm_object *, voff_t, vsize_t, unsigned int);
  396 int                     uvm_mapanon(vm_map_t, vaddr_t *, vsize_t, vsize_t, unsigned int);
  397 int                     uvm_map_pageable(vm_map_t, vaddr_t, 
  398                             vaddr_t, boolean_t, int);
  399 int                     uvm_map_pageable_all(vm_map_t, int, vsize_t);
  400 boolean_t               uvm_map_checkprot(vm_map_t, vaddr_t,
  401                             vaddr_t, vm_prot_t);
  402 int                     uvm_map_protect(vm_map_t, vaddr_t, 
  403                             vaddr_t, vm_prot_t, int etype, boolean_t, boolean_t);
  404 struct vmspace          *uvmspace_alloc(vaddr_t, vaddr_t,
  405                             boolean_t, boolean_t);
  406 void                    uvmspace_init(struct vmspace *, struct pmap *,
  407                             vaddr_t, vaddr_t, boolean_t, boolean_t);
  408 void                    uvmspace_exec(struct proc *, vaddr_t, vaddr_t);
  409 struct vmspace          *uvmspace_fork(struct process *);
  410 void                    uvmspace_addref(struct vmspace *);
  411 void                    uvmspace_free(struct vmspace *);
  412 struct vmspace          *uvmspace_share(struct process *);
  413 int                     uvm_share(vm_map_t, vaddr_t, vm_prot_t,
  414                             vm_map_t, vaddr_t, vsize_t);
  415 void                    uvm_meter(void);
  416 int                     uvm_sysctl(int *, u_int, void *, size_t *, 
  417                             void *, size_t, struct proc *);
  418 struct vm_page          *uvm_pagealloc(struct uvm_object *,
  419                             voff_t, struct vm_anon *, int);
  420 int                     uvm_pagealloc_multi(struct uvm_object *, voff_t,
  421                             vsize_t, int);
  422 void                    uvm_pagerealloc(struct vm_page *, 
  423                             struct uvm_object *, voff_t);
  424 int                     uvm_pagerealloc_multi(struct uvm_object *, voff_t,
  425                             vsize_t, int, struct uvm_constraint_range *);
  426 /* Actually, uvm_page_physload takes PF#s which need their own type */
  427 void                    uvm_page_physload(paddr_t, paddr_t, paddr_t,
  428                             paddr_t, int);
  429 void                    uvm_setpagesize(void);
  430 void                    uvm_shutdown(void);
  431 void                    uvm_aio_biodone(struct buf *);
  432 void                    uvm_aio_aiodone(struct buf *);
  433 void                    uvm_pageout(void *);
  434 void                    uvm_aiodone_daemon(void *);
  435 void                    uvm_wait(const char *);
  436 int                     uvm_pglistalloc(psize_t, paddr_t, paddr_t,
  437                             paddr_t, paddr_t, struct pglist *, int, int);
  438 void                    uvm_pglistfree(struct pglist *);
  439 void                    uvm_pmr_use_inc(paddr_t, paddr_t);
  440 void                    uvm_swap_init(void);
  441 typedef int             uvm_coredump_setup_cb(int _nsegment, void *_cookie);
  442 typedef int             uvm_coredump_walk_cb(vaddr_t _start, vaddr_t _realend,
  443                             vaddr_t _end, vm_prot_t _prot, int _nsegment,
  444                             void *_cookie);
  445 int                     uvm_coredump_walkmap(struct proc *_p,
  446                             uvm_coredump_setup_cb *_setup,
  447                             uvm_coredump_walk_cb *_walk, void *_cookie);
  448 void                    uvm_grow(struct proc *, vaddr_t);
  449 void                    uvm_pagezero_thread(void *);
  450 void                    kmeminit_nkmempages(void);
  451 void                    kmeminit(void);
  452 extern u_int            nkmempages;
  453 
  454 struct vnode;
  455 struct uvm_object       *uvn_attach(struct vnode *, vm_prot_t);
  456 
  457 struct process;
  458 struct kinfo_vmentry;
  459 int                     fill_vmmap(struct process *, struct kinfo_vmentry *,
  460                             size_t *);
  461 
  462 #endif /* _KERNEL */
  463 
  464 #endif /* _UVM_UVM_EXTERN_H_ */

Cache object: b07e9a6bde73fdd7e0583730662e55fb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.