The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sys/pool.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: pool.h,v 1.64 2008/07/04 16:38:59 ad Exp $     */
    2 
    3 /*-
    4  * Copyright (c) 1997, 1998, 1999, 2000, 2007 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
    9  * Simulation Facility, NASA Ames Research Center.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30  * POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #ifndef _SYS_POOL_H_
   34 #define _SYS_POOL_H_
   35 
   36 #ifdef _KERNEL
   37 #define __POOL_EXPOSE
   38 #endif
   39 
   40 #if defined(_KERNEL_OPT)
   41 #include "opt_pool.h"
   42 #endif
   43 
   44 #ifdef __POOL_EXPOSE
   45 #include <sys/mutex.h>
   46 #include <sys/condvar.h>
   47 #include <sys/queue.h>
   48 #include <sys/time.h>
   49 #include <sys/tree.h>
   50 #include <sys/callback.h>
   51 #endif
   52 
   53 #define POOL_PADDR_INVALID      ((paddr_t) -1)
   54 
   55 #ifdef __POOL_EXPOSE
   56 struct pool;
   57 
   58 struct pool_allocator {
   59         void            *(*pa_alloc)(struct pool *, int);
   60         void            (*pa_free)(struct pool *, void *);
   61         unsigned int    pa_pagesz;
   62 
   63         /* The following fields are for internal use only. */
   64         kmutex_t        pa_lock;
   65         TAILQ_HEAD(, pool) pa_list;     /* list of pools using this allocator */
   66         int             pa_flags;
   67 #define PA_INITIALIZED  0x01
   68         int             pa_pagemask;
   69         int             pa_pageshift;
   70         struct vm_map *pa_backingmap;
   71 #if defined(_KERNEL)
   72         struct vm_map **pa_backingmapptr;
   73         SLIST_ENTRY(pool_allocator) pa_q;
   74 #endif /* defined(_KERNEL) */
   75 };
   76 
   77 LIST_HEAD(pool_pagelist,pool_item_header);
   78 
   79 struct pool {
   80         TAILQ_ENTRY(pool)
   81                         pr_poollist;
   82         struct pool_pagelist
   83                         pr_emptypages;  /* Empty pages */
   84         struct pool_pagelist
   85                         pr_fullpages;   /* Full pages */
   86         struct pool_pagelist
   87                         pr_partpages;   /* Partially-allocated pages */
   88         struct pool_item_header *pr_curpage;
   89         struct pool     *pr_phpool;     /* Pool item header pool */
   90         struct pool_cache *pr_cache;    /* Cache for this pool */
   91         unsigned int    pr_size;        /* Size of item */
   92         unsigned int    pr_align;       /* Requested alignment, must be 2^n */
   93         unsigned int    pr_itemoffset;  /* Align this offset in item */
   94         unsigned int    pr_minitems;    /* minimum # of items to keep */
   95         unsigned int    pr_minpages;    /* same in page units */
   96         unsigned int    pr_maxpages;    /* maximum # of pages to keep */
   97         unsigned int    pr_npages;      /* # of pages allocated */
   98         unsigned int    pr_itemsperpage;/* # items that fit in a page */
   99         unsigned int    pr_slack;       /* unused space in a page */
  100         unsigned int    pr_nitems;      /* number of available items in pool */
  101         unsigned int    pr_nout;        /* # items currently allocated */
  102         unsigned int    pr_hardlimit;   /* hard limit to number of allocated
  103                                            items */
  104         unsigned int    pr_refcnt;      /* ref count for pagedaemon, etc */
  105         struct pool_allocator *pr_alloc;/* back-end allocator */
  106         TAILQ_ENTRY(pool) pr_alloc_list;/* link on allocator's pool list */
  107 
  108         /* Drain hook. */
  109         void            (*pr_drain_hook)(void *, int);
  110         void            *pr_drain_hook_arg;
  111 
  112         const char      *pr_wchan;      /* tsleep(9) identifier */
  113         unsigned int    pr_flags;       /* r/w flags */
  114         unsigned int    pr_roflags;     /* r/o flags */
  115 #define PR_WAITOK       0x01    /* Note: matches KM_SLEEP */
  116 #define PR_NOWAIT       0x02    /* Note: matches KM_NOSLEEP */
  117 #define PR_WANTED       0x04
  118 #define PR_PHINPAGE     0x40
  119 #define PR_LOGGING      0x80
  120 #define PR_LIMITFAIL    0x100   /* even if waiting, fail if we hit limit */
  121 #define PR_RECURSIVE    0x200   /* pool contains pools, for vmstat(8) */
  122 #define PR_NOTOUCH      0x400   /* don't use free items to keep internal state*/
  123 #define PR_NOALIGN      0x800   /* don't assume backend alignment */
  124 #define PR_LARGECACHE   0x1000  /* use large cache groups */
  125 
  126         /*
  127          * `pr_lock' protects the pool's data structures when removing
  128          * items from or returning items to the pool, or when reading
  129          * or updating read/write fields in the pool descriptor.
  130          *
  131          * We assume back-end page allocators provide their own locking
  132          * scheme.  They will be called with the pool descriptor _unlocked_,
  133          * since the page allocators may block.
  134          */
  135         kmutex_t        pr_lock;
  136         kcondvar_t      pr_cv;
  137         int             pr_ipl;
  138 
  139         SPLAY_HEAD(phtree, pool_item_header) pr_phtree;
  140 
  141         int             pr_maxcolor;    /* Cache colouring */
  142         int             pr_curcolor;
  143         int             pr_phoffset;    /* Offset in page of page header */
  144 
  145         /*
  146          * Warning message to be issued, and a per-time-delta rate cap,
  147          * if the hard limit is reached.
  148          */
  149         const char      *pr_hardlimit_warning;
  150         struct timeval  pr_hardlimit_ratecap;
  151         struct timeval  pr_hardlimit_warning_last;
  152 
  153         /*
  154          * Instrumentation
  155          */
  156         unsigned long   pr_nget;        /* # of successful requests */
  157         unsigned long   pr_nfail;       /* # of unsuccessful requests */
  158         unsigned long   pr_nput;        /* # of releases */
  159         unsigned long   pr_npagealloc;  /* # of pages allocated */
  160         unsigned long   pr_npagefree;   /* # of pages released */
  161         unsigned int    pr_hiwat;       /* max # of pages in pool */
  162         unsigned long   pr_nidle;       /* # of idle pages */
  163 
  164         /*
  165          * Diagnostic aides.
  166          */
  167         struct pool_log *pr_log;
  168         int             pr_curlogentry;
  169         int             pr_logsize;
  170 
  171         const char      *pr_entered_file; /* reentrancy check */
  172         long            pr_entered_line;
  173 
  174         struct callback_entry pr_reclaimerentry;
  175         void            *pr_freecheck;
  176         void            *pr_qcache;
  177 };
  178 
  179 /*
  180  * Cache group sizes, assuming 4-byte paddr_t on !_LP64.
  181  * All groups will be aligned to CACHE_LINE_SIZE.
  182  */
  183 #ifdef _LP64
  184 #define PCG_NOBJECTS_NORMAL     15      /* 256 byte group */
  185 #define PCG_NOBJECTS_LARGE      63      /* 1024 byte group */
  186 #else
  187 #define PCG_NOBJECTS_NORMAL     14      /* 124 byte group */
  188 #define PCG_NOBJECTS_LARGE      62      /* 508 byte group */
  189 #endif
  190 
  191 typedef struct pcgpair {
  192         void    *pcgo_va;               /* object virtual address */
  193         paddr_t pcgo_pa;                /* object physical address */
  194 } pcgpair_t;
  195 
  196 /* The pool cache group. */
  197 typedef struct pool_cache_group {
  198         struct pool_cache_group *pcg_next;      /* link to next group */
  199         u_int                   pcg_avail;      /* # available objects */
  200         u_int                   pcg_size;       /* max number objects */
  201         pcgpair_t               pcg_objects[1]; /* the objects */
  202 } pcg_t;
  203 
  204 typedef struct pool_cache_cpu {
  205         uint64_t                cc_misses;
  206         uint64_t                cc_hits;
  207         struct pool_cache_group *cc_current;
  208         struct pool_cache_group *cc_previous;   
  209         struct pool_cache       *cc_cache;
  210         int                     cc_ipl;
  211         int                     cc_cpuindex;
  212 #ifdef _KERNEL
  213         ipl_cookie_t            cc_iplcookie;
  214 #endif
  215 } pool_cache_cpu_t;
  216 
  217 struct pool_cache {
  218         /* Pool layer. */
  219         struct pool     pc_pool;
  220         
  221         /* Cache layer. */
  222         kmutex_t        pc_lock;        /* locks cache layer */
  223         TAILQ_ENTRY(pool_cache)
  224                         pc_cachelist;   /* entry on global cache list */
  225         pcg_t           *pc_emptygroups;/* list of empty cache groups */
  226         pcg_t           *pc_fullgroups; /* list of full cache groups */
  227         pcg_t           *pc_partgroups; /* groups for reclamation */
  228         struct pool     *pc_pcgpool;    /* Pool of cache groups */
  229         int             pc_pcgsize;     /* Use large cache groups? */
  230         int             pc_ncpu;        /* number cpus set up */
  231         int             (*pc_ctor)(void *, void *, int);
  232         void            (*pc_dtor)(void *, void *);
  233         void            *pc_arg;        /* for ctor/ctor */
  234         uint64_t        pc_hits;        /* cache layer hits */
  235         uint64_t        pc_misses;      /* cache layer misses */
  236         uint64_t        pc_contended;   /* contention events on cache */
  237         unsigned int    pc_nempty;      /* empty groups in cache */
  238         unsigned int    pc_nfull;       /* full groups in cache */
  239         unsigned int    pc_npart;       /* partial groups in cache */
  240         unsigned int    pc_refcnt;      /* ref count for pagedaemon, etc */
  241         void            *pc_freecheck;
  242 
  243         /* CPU layer. */
  244         pool_cache_cpu_t pc_cpu0 __aligned(CACHE_LINE_SIZE);
  245         void            *pc_cpus[MAXCPUS] __aligned(CACHE_LINE_SIZE);
  246 };
  247 
  248 #endif /* __POOL_EXPOSE */
  249 
  250 typedef struct pool_cache *pool_cache_t;
  251 
  252 #ifdef _KERNEL
  253 /*
  254  * pool_allocator_kmem is the default that all pools get unless
  255  * otherwise specified.  pool_allocator_nointr is provided for
  256  * pools that know they will never be accessed in interrupt
  257  * context.
  258  */
  259 extern struct pool_allocator pool_allocator_kmem;
  260 extern struct pool_allocator pool_allocator_nointr;
  261 extern struct pool_allocator pool_allocator_meta;
  262 #ifdef POOL_SUBPAGE
  263 /* The above are subpage allocators in this case. */
  264 extern struct pool_allocator pool_allocator_kmem_fullpage;
  265 extern struct pool_allocator pool_allocator_nointr_fullpage;
  266 #endif
  267 
  268 struct link_pool_init { /* same as args to pool_init() */
  269         struct pool *pp;
  270         size_t size;
  271         u_int align;
  272         u_int align_offset;
  273         int flags;
  274         const char *wchan;
  275         struct pool_allocator *palloc;
  276         int ipl;
  277 };
  278 #define POOL_INIT(pp, size, align, align_offset, flags, wchan, palloc, ipl)\
  279 struct pool pp;                                                         \
  280 static const struct link_pool_init _link_ ## pp[1] = {                  \
  281         { &pp, size, align, align_offset, flags, wchan, palloc, ipl }   \
  282 };                                                                      \
  283 __link_set_add_rodata(pools, _link_ ## pp)
  284 
  285 void            pool_subsystem_init(void);
  286 
  287 void            pool_init(struct pool *, size_t, u_int, u_int,
  288                     int, const char *, struct pool_allocator *, int);
  289 void            pool_destroy(struct pool *);
  290 
  291 void            pool_set_drain_hook(struct pool *,
  292                     void (*)(void *, int), void *);
  293 
  294 void            *pool_get(struct pool *, int);
  295 void            pool_put(struct pool *, void *);
  296 int             pool_reclaim(struct pool *);
  297 
  298 #ifdef POOL_DIAGNOSTIC
  299 /*
  300  * These versions do reentrancy checking.
  301  */
  302 void            *_pool_get(struct pool *, int, const char *, long);
  303 void            _pool_put(struct pool *, void *, const char *, long);
  304 int             _pool_reclaim(struct pool *, const char *, long);
  305 #define         pool_get(h, f)  _pool_get((h), (f), __FILE__, __LINE__)
  306 #define         pool_put(h, v)  _pool_put((h), (v), __FILE__, __LINE__)
  307 #define         pool_reclaim(h) _pool_reclaim((h), __FILE__, __LINE__)
  308 #endif /* POOL_DIAGNOSTIC */
  309 
  310 int             pool_prime(struct pool *, int);
  311 void            pool_setlowat(struct pool *, int);
  312 void            pool_sethiwat(struct pool *, int);
  313 void            pool_sethardlimit(struct pool *, int, const char *, int);
  314 void            pool_drain_start(struct pool **, uint64_t *);
  315 void            pool_drain_end(struct pool *, uint64_t);
  316 
  317 /*
  318  * Debugging and diagnostic aides.
  319  */
  320 void            pool_print(struct pool *, const char *);
  321 void            pool_printit(struct pool *, const char *,
  322                     void (*)(const char *, ...));
  323 void            pool_printall(const char *, void (*)(const char *, ...));
  324 int             pool_chk(struct pool *, const char *);
  325 
  326 /*
  327  * Pool cache routines.
  328  */
  329 pool_cache_t    pool_cache_init(size_t, u_int, u_int, u_int, const char *,
  330                     struct pool_allocator *, int, int (*)(void *, void *, int),
  331                     void (*)(void *, void *), void *);
  332 void            pool_cache_bootstrap(pool_cache_t, size_t, u_int, u_int, u_int,
  333                     const char *, struct pool_allocator *, int,
  334                     int (*)(void *, void *, int), void (*)(void *, void *),
  335                     void *);
  336 void            pool_cache_destroy(pool_cache_t);
  337 void            *pool_cache_get_paddr(pool_cache_t, int, paddr_t *);
  338 void            pool_cache_put_paddr(pool_cache_t, void *, paddr_t);
  339 void            pool_cache_destruct_object(pool_cache_t, void *);
  340 void            pool_cache_invalidate(pool_cache_t);
  341 bool            pool_cache_reclaim(pool_cache_t);
  342 void            pool_cache_set_drain_hook(pool_cache_t,
  343                     void (*)(void *, int), void *);
  344 void            pool_cache_setlowat(pool_cache_t, int);
  345 void            pool_cache_sethiwat(pool_cache_t, int);
  346 void            pool_cache_sethardlimit(pool_cache_t, int, const char *, int);
  347 void            pool_cache_cpu_init(struct cpu_info *);
  348 
  349 #define         pool_cache_get(pc, f) pool_cache_get_paddr((pc), (f), NULL)
  350 #define         pool_cache_put(pc, o) pool_cache_put_paddr((pc), (o), \
  351                                           POOL_PADDR_INVALID)
  352 
  353 void            pool_whatis(uintptr_t, void (*)(const char *, ...));
  354 #endif /* _KERNEL */
  355 
  356 #endif /* _SYS_POOL_H_ */

Cache object: c826da7cb2e457cf5270c9a560b7e519


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.