The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_extern.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: uvm_extern.h,v 1.232 2021/05/31 10:57:02 riastradh Exp $       */
    2 
    3 /*
    4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   26  *
   27  * from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp
   28  */
   29 
   30 /*-
   31  * Copyright (c) 1991, 1992, 1993
   32  *      The Regents of the University of California.  All rights reserved.
   33  *
   34  * Redistribution and use in source and binary forms, with or without
   35  * modification, are permitted provided that the following conditions
   36  * are met:
   37  * 1. Redistributions of source code must retain the above copyright
   38  *    notice, this list of conditions and the following disclaimer.
   39  * 2. Redistributions in binary form must reproduce the above copyright
   40  *    notice, this list of conditions and the following disclaimer in the
   41  *    documentation and/or other materials provided with the distribution.
   42  * 3. Neither the name of the University nor the names of its contributors
   43  *    may be used to endorse or promote products derived from this software
   44  *    without specific prior written permission.
   45  *
   46  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   56  * SUCH DAMAGE.
   57  *
   58  *      @(#)vm_extern.h 8.5 (Berkeley) 5/3/95
   59  */
   60 
   61 #ifndef _UVM_UVM_EXTERN_H_
   62 #define _UVM_UVM_EXTERN_H_
   63 
   64 /*
   65  * uvm_extern.h: this file defines the external interface to the VM system.
   66  *
   67  * this should be the only file included by non-VM parts of the kernel
   68  * which need access to VM services.   if you want to know the interface
   69  * to the MI VM layer without knowing the details, this is the file to
   70  * learn.
   71  *
   72  * NOTE: vm system calls are prototyped in syscallargs.h
   73  */
   74 
   75 #include <sys/types.h>
   76 
   77 /*
   78  * defines
   79  */
   80 
   81 /*
   82  * the following defines are for uvm_map and functions which call it.
   83  */
   84 
   85 /* protections bits */
   86 #define UVM_PROT_MASK   0x07    /* protection mask */
   87 #define UVM_PROT_NONE   0x00    /* protection none */
   88 #define UVM_PROT_ALL    0x07    /* everything */
   89 #define UVM_PROT_READ   0x01    /* read */
   90 #define UVM_PROT_WRITE  0x02    /* write */
   91 #define UVM_PROT_EXEC   0x04    /* exec */
   92 
   93 /* protection short codes */
   94 #define UVM_PROT_R      0x01    /* read */
   95 #define UVM_PROT_W      0x02    /* write */
   96 #define UVM_PROT_RW     0x03    /* read-write */
   97 #define UVM_PROT_X      0x04    /* exec */
   98 #define UVM_PROT_RX     0x05    /* read-exec */
   99 #define UVM_PROT_WX     0x06    /* write-exec */
  100 #define UVM_PROT_RWX    0x07    /* read-write-exec */
  101 
  102 /* 0x08: not used */
  103 
  104 /* inherit codes */
  105 #define UVM_INH_MASK    0x30    /* inherit mask */
  106 #define UVM_INH_SHARE   0x00    /* "share" */
  107 #define UVM_INH_COPY    0x10    /* "copy" */
  108 #define UVM_INH_NONE    0x20    /* "none" */
  109 #define UVM_INH_DONATE  0x30    /* "donate" << not used */
  110 
  111 /* 0x40, 0x80: not used */
  112 
  113 /* bits 0x700: max protection, 0x800: not used */
  114 
  115 /* bits 0x7000: advice, 0x8000: not used */
  116 /* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */
  117 #define UVM_ADV_NORMAL  0x0     /* 'normal' */
  118 #define UVM_ADV_RANDOM  0x1     /* 'random' */
  119 #define UVM_ADV_SEQUENTIAL 0x2  /* 'sequential' */
  120 #define UVM_ADV_WILLNEED 0x3    /* pages will be needed */
  121 #define UVM_ADV_DONTNEED 0x4    /* pages won't be needed */
  122 #define UVM_ADV_NOREUSE 0x5     /* pages will be used only once */
  123 #define UVM_ADV_MASK    0x7     /* mask */
  124 
  125 /* bits 0xffff0000: mapping flags */
  126 #define UVM_FLAG_FIXED          0x00010000 /* find space */
  127 #define UVM_FLAG_OVERLAY        0x00020000 /* establish overlay */
  128 #define UVM_FLAG_NOMERGE        0x00040000 /* don't merge map entries */
  129 #define UVM_FLAG_COPYONW        0x00080000 /* set copy_on_write flag */
  130 #define UVM_FLAG_AMAPPAD        0x00100000 /* for bss: pad amap */
  131 #define UVM_FLAG_TRYLOCK        0x00200000 /* fail if we can not lock map */
  132 #define UVM_FLAG_NOWAIT         0x00400000 /* not allowed to sleep */
  133 #define UVM_FLAG_WAITVA         0x00800000 /* wait for va */
  134 #define UVM_FLAG_VAONLY         0x02000000 /* unmap: no pages are mapped */
  135 #define UVM_FLAG_COLORMATCH     0x04000000 /* match color given in off */
  136 #define UVM_FLAG_UNMAP          0x08000000 /* unmap existing entries */
  137 
  138 #define UVM_FLAG_BITS "\177\020\
  139 F\0\3\
  140 :\0PROT=NONE\0\
  141 :\1PROT=R\0\
  142 :\2PROT=W\0\
  143 :\3PROT=RW\0\
  144 :\4PROT=X\0\
  145 :\5PROT=RX\0\
  146 :\6PROT=WX\0\
  147 :\7PROT=RWX\0\
  148 F\4\2\
  149 :\0INH=SHARE\0\
  150 :\1INH=COPY\0\
  151 :\2INH=NONE\0\
  152 :\3INH=DONATE\0\
  153 F\1\3\
  154 :\0MAXPROT=NONE\0\
  155 :\1MAXPROT=R\0\
  156 :\2MAXPROT=W\0\
  157 :\3MAXPROT=RW\0\
  158 :\4MAXPROT=X\0\
  159 :\5MAXPROT=RX\0\
  160 :\6MAXPROT=WX\0\
  161 :\7MAXPROT=RWX\0\
  162 F\14\3\
  163 :\0ADV=NORMAL\0\
  164 :\1ADV=RANDOM\0\
  165 :\2ADV=SEQUENTIAL\0\
  166 :\3ADV=WILLNEED\0\
  167 :\4ADV=DONTNEED\0\
  168 :\5ADV=NOREUSE\0\
  169 b\20FIXED\0\
  170 b\21OVERLAY\0\
  171 b\22NOMERGE\0\
  172 b\23COPYONW\0\
  173 b\24AMAPPAD\0\
  174 b\25TRYLOCK\0\
  175 b\26NOWAIT\0\
  176 b\27WAITVA\0\
  177 b\30VAONLY\0\
  178 b\31COLORMATCH\0\
  179 b\32UNMAP\0\
  180 "
  181 
  182 /* macros to extract info */
  183 #define UVM_PROTECTION(X)       ((X) & UVM_PROT_MASK)
  184 #define UVM_INHERIT(X)          (((X) & UVM_INH_MASK) >> 4)
  185 #define UVM_MAXPROTECTION(X)    (((X) >> 8) & UVM_PROT_MASK)
  186 #define UVM_ADVICE(X)           (((X) >> 12) & UVM_ADV_MASK)
  187 
  188 #define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \
  189         (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS))
  190 
  191 /* magic offset value: offset not known(obj) or don't care(!obj) */
  192 #define UVM_UNKNOWN_OFFSET ((voff_t) -1)
  193 
  194 /*
  195  * the following defines are for uvm_km_alloc/free's flags
  196  */
  197 #define UVM_KMF_WIRED   0x1                     /* allocation type: wired */
  198 #define UVM_KMF_PAGEABLE 0x2                    /* allocation type: pageable */
  199 #define UVM_KMF_VAONLY  0x4                     /* allocation type: VA only */
  200 #define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED)
  201 #define UVM_KMF_CANFAIL 0x8                     /* caller handles failure */
  202 #define UVM_KMF_ZERO    0x10                    /* want zero filled memory */
  203 #define UVM_KMF_EXEC    0x20                    /* need executable mapping */
  204 #define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK        /* try locking only */
  205 #define UVM_KMF_NOWAIT  UVM_FLAG_NOWAIT         /* not allowed to sleep */
  206 #define UVM_KMF_WAITVA  UVM_FLAG_WAITVA         /* sleep for va */
  207 #define UVM_KMF_COLORMATCH UVM_FLAG_COLORMATCH  /* start at color in align */
  208 
  209 /*
  210  * the following defines the strategies for uvm_pagealloc_strat()
  211  */
  212 #define UVM_PGA_STRAT_NORMAL    0       /* priority (low id to high) walk */
  213 #define UVM_PGA_STRAT_ONLY      1       /* only specified free list */
  214 #define UVM_PGA_STRAT_FALLBACK  2       /* ONLY falls back on NORMAL */
  215 #define UVM_PGA_STRAT_NUMA      3       /* strongly prefer ideal bucket */
  216 
  217 /*
  218  * flags for uvm_pagealloc_strat()
  219  */
  220 #define UVM_PGA_USERESERVE      0x0001  /* ok to use reserve pages */
  221 #define UVM_PGA_ZERO            0x0002  /* returned page must be zero'd */
  222 
  223 /*
  224  * flags for ubc_uiomove()
  225  */
  226 #define UBC_READ        0x001   /* reading from object */
  227 #define UBC_WRITE       0x002   /* writing to object */
  228 #define UBC_FAULTBUSY   0x004   /* nobody else is using these pages, so busy
  229                                  * them at alloc and unbusy at release (e.g.,
  230                                  * for writes extending a file) */
  231 #define UBC_ISMAPPED    0x008   /* object may be mapped by a process */
  232 
  233 /*
  234  * flags for ubc_release()
  235  */
  236 #define UBC_UNMAP       0x010   /* unmap pages now -- don't leave the
  237                                  * mappings cached indefinitely */
  238 
  239 /*
  240  * flags for ubc_uiomove()
  241  */
  242 #define UBC_PARTIALOK   0x100   /* return early on error; otherwise, zero all
  243                                  * remaining bytes after error */
  244 
  245 /*
  246  * flags for uvn_findpages().
  247  */
  248 #define UFP_ALL         0x00
  249 #define UFP_NOWAIT      0x01
  250 #define UFP_NOALLOC     0x02
  251 #define UFP_NOCACHE     0x04
  252 #define UFP_NORDONLY    0x08
  253 #define UFP_DIRTYONLY   0x10
  254 #define UFP_BACKWARD    0x20
  255 #define UFP_NOBUSY      0x40
  256 
  257 /*
  258  * lockflags that control the locking behavior of various functions.
  259  */
  260 #define UVM_LK_ENTER    0x00000001      /* map locked on entry */
  261 #define UVM_LK_EXIT     0x00000002      /* leave map locked on exit */
  262 
  263 /*
  264  * Default number of pages to allocate on the stack
  265  */
  266 #define UBC_MAX_PAGES   8
  267 
  268 /*
  269  * Value representing inactive emap.
  270  */
  271 #define UVM_EMAP_INACTIVE       (0)
  272 
  273 /*
  274  * structures
  275  */
  276 
  277 struct buf;
  278 struct core;
  279 struct loadavg;
  280 struct mount;
  281 struct pglist;
  282 struct proc;
  283 struct uio;
  284 struct uvm_object;
  285 struct vm_anon;
  286 struct vmspace;
  287 struct pmap;
  288 struct vnode;
  289 struct vm_map_entry;
  290 struct vm_map;
  291 struct vm_page;
  292 struct vmtotal;
  293 
  294 /*
  295  * uvm_pctparam: parameter to be shown as percentage to user.
  296  */
  297 
  298 #define UVM_PCTPARAM_SHIFT      8
  299 #define UVM_PCTPARAM_SCALE      (1 << UVM_PCTPARAM_SHIFT)
  300 #define UVM_PCTPARAM_APPLY(pct, x) \
  301         (((x) * (pct)->pct_scaled) >> UVM_PCTPARAM_SHIFT)
  302 struct uvm_pctparam {
  303         int pct_pct;    /* percent [0, 100] */ /* should be the first member */
  304         int pct_scaled;
  305         int (*pct_check)(struct uvm_pctparam *, int);
  306 };
  307 
  308 /*
  309  * uvmexp: global data structures that are exported to parts of the kernel
  310  * other than the vm system.
  311  */
  312 
  313 struct uvmexp {
  314         /* vm_page constants */
  315         int pagesize;   /* size of a page (PAGE_SIZE): must be power of 2 */
  316         int pagemask;   /* page mask */
  317         int pageshift;  /* page shift */
  318 
  319         /* vm_page counters */
  320         int npages;     /* number of pages we manage */
  321         int free;       /* number of free pages */
  322         int paging;     /* number of pages in the process of being paged out */
  323         int wired;      /* number of wired pages */
  324 
  325         /*
  326          * Adding anything before this line will break binary compatibility
  327          * with top(1) on NetBSD 1.5.
  328          */
  329 
  330         int ncolors;    /* number of page color buckets: must be p-o-2 */
  331         int colormask;  /* color bucket mask */
  332 
  333         int zeropages;          /* number of zero'd pages */
  334         int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
  335         int reserve_kernel;     /* number of pages reserved for kernel */
  336         unsigned anonpages;     /* number of pages used by anon mappings */
  337         unsigned filepages;     /* number of pages used by cached file data */
  338         unsigned execpages;     /* number of pages used by cached exec data */
  339 
  340         /* pageout params */
  341         int freemin;    /* min number of free pages */
  342         int freetarg;   /* target number of free pages */
  343         int wiredmax;   /* max number of wired pages */
  344 
  345         /* swap */
  346         int nswapdev;   /* number of configured swap devices in system */
  347         int swpages;    /* number of PAGE_SIZE'ed swap pages */
  348         int swpgavail;  /* number of swap pages currently available */
  349         int swpginuse;  /* number of swap pages in use */
  350         int swpgonly;   /* number of swap pages in use, not also in RAM */
  351         int nswget;     /* number of times fault calls uvm_swap_get() */
  352 
  353         /* stat counters.  XXX: should be 64-bit counters */
  354         int faults;             /* page fault count */
  355         int traps;              /* trap count */
  356         int intrs;              /* interrupt count */
  357         int swtch;              /* context switch count */
  358         int softs;              /* software interrupt count */
  359         int syscalls;           /* system calls */
  360         int pageins;            /* pagein operation count */
  361                                 /* pageouts are in pdpageouts below */
  362         int _unused1;
  363         int _unused2;
  364         int pgswapin;           /* pages swapped in */
  365         int pgswapout;          /* pages swapped out */
  366         int forks;              /* forks */
  367         int forks_ppwait;       /* forks where parent waits */
  368         int forks_sharevm;      /* forks where vmspace is shared */
  369         int pga_zerohit;        /* pagealloc where zero wanted and zero
  370                                    was available */
  371         int pga_zeromiss;       /* pagealloc where zero wanted and zero
  372                                    not available */
  373         int zeroaborts;         /* number of times page zeroing was
  374                                    aborted */
  375         int colorhit;           /* pagealloc where we got optimal color */
  376         int colormiss;          /* pagealloc where we didn't */
  377         int cpuhit;             /* pagealloc where we allocated locally */
  378         int cpumiss;            /* pagealloc where we didn't */
  379 
  380         /* fault subcounters.  XXX: should be 64-bit counters */
  381         int fltnoram;   /* number of times fault was out of ram */
  382         int fltnoanon;  /* number of times fault was out of anons */
  383         int fltpgwait;  /* number of times fault had to wait on a page */
  384         int fltpgrele;  /* number of times fault found a released page */
  385         int fltrelck;   /* number of times fault relock called */
  386         int fltrelckok; /* number of times fault relock is a success */
  387         int fltanget;   /* number of times fault gets anon page */
  388         int fltanretry; /* number of times fault retrys an anon get */
  389         int fltamcopy;  /* number of times fault clears "needs copy" */
  390         int fltnamap;   /* number of times fault maps a neighbor anon page */
  391         int fltnomap;   /* number of times fault maps a neighbor obj page */
  392         int fltlget;    /* number of times fault does a locked pgo_get */
  393         int fltget;     /* number of times fault does an unlocked get */
  394         int flt_anon;   /* number of times fault anon (case 1a) */
  395         int flt_acow;   /* number of times fault anon cow (case 1b) */
  396         int flt_obj;    /* number of times fault is on object page (2a) */
  397         int flt_prcopy; /* number of times fault promotes with copy (2b) */
  398         int flt_przero; /* number of times fault promotes with zerofill (2b) */
  399 
  400         /* daemon counters.  XXX: should be 64-bit counters */
  401         int pdwoke;     /* number of times daemon woke up */
  402         int pdrevs;     /* number of times daemon rev'd clock hand */
  403         int _unused3;
  404         int pdfreed;    /* number of pages daemon freed since boot */
  405         int pdscans;    /* number of pages daemon scanned since boot */
  406         int pdanscan;   /* number of anonymous pages scanned by daemon */
  407         int pdobscan;   /* number of object pages scanned by daemon */
  408         int pdreact;    /* number of pages daemon reactivated since boot */
  409         int pdbusy;     /* number of times daemon found a busy page */
  410         int pdpageouts; /* number of times daemon started a pageout */
  411         int pdpending;  /* number of times daemon got a pending pagout */
  412         int pddeact;    /* number of pages daemon deactivates */
  413         int pdreanon;   /* anon pages reactivated due to thresholds */
  414         int pdrefile;   /* file pages reactivated due to thresholds */
  415         int pdreexec;   /* executable pages reactivated due to thresholds */
  416 
  417         int bootpages;  /* number of pages stolen at boot */
  418 };
  419 
  420 /*
  421  * The following structure is 64-bit alignment safe.  New elements
  422  * should only be added to the end of this structure so binary
  423  * compatibility can be preserved.
  424  */
  425 struct uvmexp_sysctl {
  426         int64_t pagesize;
  427         int64_t pagemask;
  428         int64_t pageshift;
  429         int64_t npages;
  430         int64_t free;
  431         int64_t active;
  432         int64_t inactive;
  433         int64_t paging;
  434         int64_t wired;
  435         int64_t zeropages;
  436         int64_t reserve_pagedaemon;
  437         int64_t reserve_kernel;
  438         int64_t freemin;
  439         int64_t freetarg;
  440         int64_t inactarg;               /* unused */
  441         int64_t wiredmax;
  442         int64_t nswapdev;
  443         int64_t swpages;
  444         int64_t swpginuse;
  445         int64_t swpgonly;
  446         int64_t nswget;
  447         int64_t unused1;                /* unused; was nanon */
  448         int64_t cpuhit;
  449         int64_t cpumiss;
  450         int64_t faults;
  451         int64_t traps;
  452         int64_t intrs;
  453         int64_t swtch;
  454         int64_t softs;
  455         int64_t syscalls;
  456         int64_t pageins;
  457         int64_t swapins;                /* unused */
  458         int64_t swapouts;               /* unused */
  459         int64_t pgswapin;               /* unused */
  460         int64_t pgswapout;
  461         int64_t forks;
  462         int64_t forks_ppwait;
  463         int64_t forks_sharevm;
  464         int64_t pga_zerohit;
  465         int64_t pga_zeromiss;
  466         int64_t zeroaborts;
  467         int64_t fltnoram;
  468         int64_t fltnoanon;
  469         int64_t fltpgwait;
  470         int64_t fltpgrele;
  471         int64_t fltrelck;
  472         int64_t fltrelckok;
  473         int64_t fltanget;
  474         int64_t fltanretry;
  475         int64_t fltamcopy;
  476         int64_t fltnamap;
  477         int64_t fltnomap;
  478         int64_t fltlget;
  479         int64_t fltget;
  480         int64_t flt_anon;
  481         int64_t flt_acow;
  482         int64_t flt_obj;
  483         int64_t flt_prcopy;
  484         int64_t flt_przero;
  485         int64_t pdwoke;
  486         int64_t pdrevs;
  487         int64_t unused4;
  488         int64_t pdfreed;
  489         int64_t pdscans;
  490         int64_t pdanscan;
  491         int64_t pdobscan;
  492         int64_t pdreact;
  493         int64_t pdbusy;
  494         int64_t pdpageouts;
  495         int64_t pdpending;
  496         int64_t pddeact;
  497         int64_t anonpages;
  498         int64_t filepages;
  499         int64_t execpages;
  500         int64_t colorhit;
  501         int64_t colormiss;
  502         int64_t ncolors;
  503         int64_t bootpages;
  504         int64_t poolpages;
  505         int64_t countsyncone;
  506         int64_t countsyncall;
  507         int64_t anonunknown;
  508         int64_t anonclean;
  509         int64_t anondirty;
  510         int64_t fileunknown;
  511         int64_t fileclean;
  512         int64_t filedirty;
  513         int64_t fltup;
  514         int64_t fltnoup;
  515 };
  516 
  517 #ifdef _KERNEL
  518 /* we need this before including uvm_page.h on some platforms */
  519 extern struct uvmexp uvmexp;
  520 #endif
  521 
  522 /*
  523  * Finally, bring in standard UVM headers.
  524  */
  525 #include <sys/vmmeter.h>
  526 #include <sys/queue.h>
  527 #include <sys/lock.h>
  528 #ifdef _KERNEL
  529 #include <sys/vmem.h>
  530 #endif
  531 #include <uvm/uvm_param.h>
  532 #include <uvm/uvm_prot.h>
  533 #include <uvm/uvm_pmap.h>
  534 #if defined(_KERNEL) || defined(_KMEMUSER)
  535 #include <uvm/uvm_map.h>
  536 #include <uvm/uvm_pager.h>
  537 #endif
  538 
  539 #ifdef _KERNEL
  540 /*
  541  * Include the uvm_hotplug(9) API unconditionally until
  542  * uvm_page_physload() et. al. are obsoleted
  543  *
  544  * After this, MD code will have to explicitly include it if needed.
  545  */
  546 #include <uvm/uvm_physseg.h> 
  547 #endif
  548 
  549 /*
  550  * helpers for calling ubc_release()
  551  */
  552 #ifdef PMAP_CACHE_VIVT
  553 #define UBC_VNODE_FLAGS(vp) \
  554     ((((vp)->v_iflag & VI_TEXT) != 0 ? UBC_UNMAP : 0) | \
  555     (((vp)->v_vflag & VV_MAPPED) != 0 ? UBC_ISMAPPED : 0))
  556 #else
  557 #define UBC_VNODE_FLAGS(vp) \
  558     (((vp)->v_vflag & VV_MAPPED) != 0 ? UBC_ISMAPPED : 0)
  559 #endif
  560 
  561 #if defined(_KERNEL) || defined(_KMEMUSER)
  562 /*
  563  * Shareable process virtual address space.
  564  * May eventually be merged with vm_map.
  565  * Several fields are temporary (text, data stuff).
  566  */
  567 struct vmspace {
  568         struct  vm_map vm_map;  /* VM address map */
  569         volatile int vm_refcnt; /* number of references */
  570         void *  vm_shm;         /* SYS5 shared memory private data XXX */
  571 /* we copy from vm_startcopy to the end of the structure on fork */
  572 #define vm_startcopy vm_rssize
  573         segsz_t vm_rssize;      /* current resident set size in pages */
  574         segsz_t vm_rssmax;      /* max resident size in pages */
  575         segsz_t vm_tsize;       /* text size (pages) XXX */
  576         segsz_t vm_dsize;       /* data size (pages) XXX */
  577         segsz_t vm_ssize;       /* stack size (pages) */
  578         segsz_t vm_issize;      /* initial unmapped stack size (pages) */
  579         void *  vm_taddr;       /* user virtual address of text XXX */
  580         void *  vm_daddr;       /* user virtual address of data XXX */
  581         void *vm_maxsaddr;      /* user VA at max stack growth */
  582         void *vm_minsaddr;      /* user VA at top of stack */
  583         size_t vm_aslr_delta_mmap;      /* mmap() random delta for ASLR */
  584 };
  585 #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map)
  586 #endif
  587 
  588 #ifdef _KERNEL
  589 
  590 /*
  591  * used to keep state while iterating over the map for a core dump.
  592  */
  593 struct uvm_coredump_state {
  594         void *cookie;           /* opaque for the caller */
  595         vaddr_t start;          /* start of region */
  596         vaddr_t realend;        /* real end of region */
  597         vaddr_t end;            /* virtual end of region */
  598         vm_prot_t prot;         /* protection of region */
  599         int flags;              /* flags; see below */
  600 };
  601 
  602 #define UVM_COREDUMP_STACK      0x01    /* region is user stack */
  603 
  604 /*
  605  * the various kernel maps, owned by MD code
  606  */
  607 extern struct vm_map *kernel_map;
  608 extern struct vm_map *phys_map;
  609 
  610 /*
  611  *      uvm_voaddr:
  612  *
  613  *      This structure encapsulates UVM's unique virtual object address
  614  *      for an individual byte inside a pageable page. Pageable pages can
  615  *      be owned by either a uvm_object or a vm_anon.
  616  *
  617  *      In each case, the byte offset into the owning object
  618  *      (uvm_object or vm_anon) is included in the ID, so that
  619  *      two different offsets into the same page have distinct
  620  *      IDs.
  621  *
  622  *      Note that the page does not necessarily have to be resident
  623  *      in order to know the virtual object address.  However, it
  624  *      is required that any pending copy-on-write is resolved.
  625  *
  626  *      When someone wants a virtual object address, an extra reference
  627  *      is taken on the owner while the caller uses the ID.  This
  628  *      ensures that the identity is stable for the duration of its
  629  *      use.
  630  */
  631 struct uvm_voaddr {
  632         uintptr_t object;
  633         voff_t offset;
  634 };
  635 
  636 /*
  637  * macros
  638  */
  639 
  640 #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
  641 
  642 
  643 /* vm_machdep.c */
  644 int             vmapbuf(struct buf *, vsize_t);
  645 void            vunmapbuf(struct buf *, vsize_t);
  646 void            ktext_write(void *, const void *, size_t);
  647 
  648 /* uvm_aobj.c */
  649 struct uvm_object       *uao_create(voff_t, int);
  650 void                    uao_set_pgfl(struct uvm_object *, int);
  651 void                    uao_detach(struct uvm_object *);
  652 void                    uao_reference(struct uvm_object *);
  653 
  654 /* uvm_bio.c */
  655 void                    ubc_init(void);
  656 void                    ubchist_init(void);
  657 int                     ubc_uiomove(struct uvm_object *, struct uio *, vsize_t,
  658                             int, int);
  659 void                    ubc_zerorange(struct uvm_object *, off_t, size_t, int);
  660 void                    ubc_purge(struct uvm_object *);
  661 
  662 /* uvm_fault.c */
  663 #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0)
  664 int             uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int);
  665                         /* handle a page fault */
  666 
  667 /* uvm_glue.c */
  668 #if defined(KGDB)
  669 void                    uvm_chgkprot(void *, size_t, int);
  670 #endif
  671 void                    uvm_proc_fork(struct proc *, struct proc *, bool);
  672 void                    uvm_lwp_fork(struct lwp *, struct lwp *,
  673                             void *, size_t, void (*)(void *), void *);
  674 int                     uvm_coredump_walkmap(struct proc *,
  675                             int (*)(struct uvm_coredump_state *), void *);
  676 int                     uvm_coredump_count_segs(struct proc *);
  677 void                    uvm_proc_exit(struct proc *);
  678 void                    uvm_lwp_exit(struct lwp *);
  679 void                    uvm_idle(void);
  680 void                    uvm_init_limits(struct proc *);
  681 bool                    uvm_kernacc(void *, size_t, vm_prot_t);
  682 __dead void             uvm_scheduler(void);
  683 vaddr_t                 uvm_uarea_alloc(void);
  684 void                    uvm_uarea_free(vaddr_t);
  685 vaddr_t                 uvm_uarea_system_alloc(struct cpu_info *);
  686 void                    uvm_uarea_system_free(vaddr_t);
  687 vaddr_t                 uvm_lwp_getuarea(lwp_t *);
  688 void                    uvm_lwp_setuarea(lwp_t *, vaddr_t);
  689 int                     uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t);
  690 void                    uvm_vsunlock(struct vmspace *, void *, size_t);
  691 void                    uvm_cpu_attach(struct cpu_info *);
  692 
  693 
  694 /* uvm_init.c */
  695 void                    uvm_md_init(void);
  696 void                    uvm_init(void);
  697 
  698 /* uvm_io.c */
  699 int                     uvm_io(struct vm_map *, struct uio *, int);
  700 
  701 /* uvm_km.c */
  702 vaddr_t                 uvm_km_alloc(struct vm_map *, vsize_t, vsize_t,
  703                             uvm_flag_t);
  704 int                     uvm_km_protect(struct vm_map *, vaddr_t, vsize_t,
  705                             vm_prot_t);
  706 void                    uvm_km_free(struct vm_map *, vaddr_t, vsize_t,
  707                             uvm_flag_t);
  708 
  709 struct vm_map           *uvm_km_suballoc(struct vm_map *, vaddr_t *,
  710                             vaddr_t *, vsize_t, int, bool,
  711                             struct vm_map *);
  712 int                     uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t,
  713                             vmem_addr_t *);
  714 void                    uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t);
  715 bool                    uvm_km_va_starved_p(void);
  716 
  717 /* uvm_map.c */
  718 int                     uvm_map(struct vm_map *, vaddr_t *, vsize_t,
  719                             struct uvm_object *, voff_t, vsize_t,
  720                             uvm_flag_t);
  721 int                     uvm_map_pageable(struct vm_map *, vaddr_t,
  722                             vaddr_t, bool, int);
  723 int                     uvm_map_pageable_all(struct vm_map *, int, vsize_t);
  724 bool                    uvm_map_checkprot(struct vm_map *, vaddr_t,
  725                             vaddr_t, vm_prot_t);
  726 int                     uvm_map_protect(struct vm_map *, vaddr_t,
  727                             vaddr_t, vm_prot_t, bool);
  728 int                     uvm_map_protect_user(struct lwp *, vaddr_t, vaddr_t,
  729                             vm_prot_t);
  730 struct vmspace          *uvmspace_alloc(vaddr_t, vaddr_t, bool);
  731 void                    uvmspace_init(struct vmspace *, struct pmap *,
  732                             vaddr_t, vaddr_t, bool);
  733 void                    uvmspace_exec(struct lwp *, vaddr_t, vaddr_t, bool);
  734 void                    uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t, bool);
  735 struct vmspace          *uvmspace_fork(struct vmspace *);
  736 void                    uvmspace_addref(struct vmspace *);
  737 void                    uvmspace_free(struct vmspace *);
  738 void                    uvmspace_share(struct proc *, struct proc *);
  739 void                    uvmspace_unshare(struct lwp *);
  740 
  741 bool                    uvm_voaddr_acquire(struct vm_map *, vaddr_t,
  742                             struct uvm_voaddr *);
  743 void                    uvm_voaddr_release(struct uvm_voaddr *);
  744 int                     uvm_voaddr_compare(const struct uvm_voaddr *,
  745                             const struct uvm_voaddr *);
  746 
  747 void                    uvm_whatis(uintptr_t, void (*)(const char *, ...));
  748 
  749 /* uvm_meter.c */
  750 int                     uvm_sysctl(int *, u_int, void *, size_t *,
  751                             void *, size_t, struct proc *);
  752 int                     uvm_pctparam_check(struct uvm_pctparam *, int);
  753 void                    uvm_pctparam_set(struct uvm_pctparam *, int);
  754 int                     uvm_pctparam_get(struct uvm_pctparam *);
  755 void                    uvm_pctparam_init(struct uvm_pctparam *, int,
  756                             int (*)(struct uvm_pctparam *, int));
  757 int                     uvm_pctparam_createsysctlnode(struct uvm_pctparam *,
  758                             const char *, const char *);
  759 void                    uvm_update_uvmexp(void);
  760 
  761 /* uvm_mmap.c */
  762 int                     uvm_mmap_dev(struct proc *, void **, size_t, dev_t,
  763                             off_t);
  764 int                     uvm_mmap_anon(struct proc *, void **, size_t);
  765 vaddr_t                 uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t,
  766                             int);
  767 
  768 /* uvm_mremap.c */
  769 int                     uvm_mremap(struct vm_map *, vaddr_t, vsize_t,
  770                             struct vm_map *, vaddr_t *, vsize_t,
  771                             struct proc *, int);
  772 
  773 /* uvm_object.c */
  774 void                    uvm_obj_init(struct uvm_object *,
  775                             const struct uvm_pagerops *, bool, u_int);
  776 void                    uvm_obj_setlock(struct uvm_object *, krwlock_t *);
  777 void                    uvm_obj_destroy(struct uvm_object *, bool);
  778 int                     uvm_obj_wirepages(struct uvm_object *, off_t, off_t,
  779                             struct pglist *);
  780 void                    uvm_obj_unwirepages(struct uvm_object *, off_t, off_t);
  781 bool                    uvm_obj_clean_p(struct uvm_object *);
  782 bool                    uvm_obj_nowriteback_p(struct uvm_object *);
  783 bool                    uvm_obj_page_dirty_p(struct vm_page *);
  784 void                    uvm_obj_page_set_dirty(struct vm_page *);
  785 void                    uvm_obj_page_clear_dirty(struct vm_page *);
  786 bool                    uvm_obj_page_writeback_p(struct vm_page *);
  787 void                    uvm_obj_page_set_writeback(struct vm_page *);
  788 void                    uvm_obj_page_clear_writeback(struct vm_page *);
  789 
  790 /* uvm_page.c */
  791 int                     uvm_availmem(bool);
  792 void                    uvm_page_numa_load(paddr_t, paddr_t, u_int);
  793 struct vm_page          *uvm_pagealloc_strat(struct uvm_object *,
  794                             voff_t, struct vm_anon *, int, int, int);
  795 #define uvm_pagealloc(obj, off, anon, flags) \
  796             uvm_pagealloc_strat((obj), (off), (anon), (flags), \
  797                                 UVM_PGA_STRAT_NORMAL, 0)
  798 void                    uvm_pagereplace(struct vm_page *,
  799                             struct vm_page *);
  800 int                     uvm_pagerealloc(struct vm_page *,
  801                             struct uvm_object *, voff_t);
  802 void                    uvm_setpagesize(void);
  803 
  804 /* uvm_pager.c */
  805 void                    uvm_aio_aiodone(struct buf *);
  806 void                    uvm_aio_aiodone_pages(struct vm_page **, int, bool,
  807                             int);
  808 
  809 /* uvm_pdaemon.c */
  810 void                    uvm_pageout(void *);
  811 struct work;
  812 void                    uvm_aiodone_worker(struct work *, void *);
  813 void                    uvm_pageout_start(int);
  814 void                    uvm_pageout_done(int);
  815 void                    uvm_estimatepageable(int *, int *);
  816 
  817 /* uvm_pglist.c */
  818 int                     uvm_pglistalloc(psize_t, paddr_t, paddr_t,
  819                             paddr_t, paddr_t, struct pglist *, int, int);
  820 void                    uvm_pglistfree(struct pglist *);
  821 
  822 /* uvm_swap.c */
  823 void                    uvm_swap_init(void);
  824 
  825 /* uvm_unix.c */
  826 int                     uvm_grow(struct proc *, vaddr_t);
  827 
  828 /* uvm_user.c */
  829 void                    uvm_deallocate(struct vm_map *, vaddr_t, vsize_t);
  830 
  831 /* uvm_vnode.c */
  832 struct uvm_page_array;
  833 void                    uvm_vnp_setsize(struct vnode *, voff_t);
  834 void                    uvm_vnp_setwritesize(struct vnode *, voff_t);
  835 int                     uvn_findpages(struct uvm_object *, voff_t,
  836                             unsigned int *, struct vm_page **,
  837                             struct uvm_page_array *, unsigned int);
  838 bool                    uvn_text_p(struct uvm_object *);
  839 bool                    uvn_needs_writefault_p(struct uvm_object *);
  840 
  841 /* kern_malloc.c */
  842 void                    kmeminit_nkmempages(void);
  843 extern int              nkmempages;
  844 
  845 #endif /* _KERNEL */
  846 
  847 #endif /* _UVM_UVM_EXTERN_H_ */

Cache object: 9b9db9616c715332360eb2ac08f5cf33


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.