The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_zone.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * (MPSAFE)
    3  *
    4  * Copyright (c) 1997, 1998 John S. Dyson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *      notice immediately at the beginning of the file, without modification,
   12  *      this list of conditions, and the following disclaimer.
   13  * 2. Absolutely no warranty of function or purpose is made by the author
   14  *      John S. Dyson.
   15  *
   16  * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
   17  */
   18 
   19 #include <sys/param.h>
   20 #include <sys/queue.h>
   21 #include <sys/systm.h>
   22 #include <sys/kernel.h>
   23 #include <sys/lock.h>
   24 #include <sys/malloc.h>
   25 #include <sys/sysctl.h>
   26 #include <sys/vmmeter.h>
   27 
   28 #include <vm/vm.h>
   29 #include <vm/vm_object.h>
   30 #include <vm/vm_page.h>
   31 #include <vm/vm_map.h>
   32 #include <vm/vm_kern.h>
   33 #include <vm/vm_extern.h>
   34 #include <vm/vm_zone.h>
   35 
   36 #include <sys/spinlock2.h>
   37 #include <vm/vm_page2.h>
   38 
   39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
   40 
   41 #define ZONE_ERROR_INVALID 0
   42 #define ZONE_ERROR_NOTFREE 1
   43 #define ZONE_ERROR_ALREADYFREE 2
   44 
   45 #define ZONE_ROUNDING   32
   46 
   47 #define ZENTRY_FREE     0x12342378
   48 
   49 int zone_burst = 32;
   50 
   51 static void *zget(vm_zone_t z);
   52 
   53 /*
   54  * Return an item from the specified zone.   This function is non-blocking for
   55  * ZONE_INTERRUPT zones.
   56  *
   57  * No requirements.
   58  */
   59 void *
   60 zalloc(vm_zone_t z)
   61 {
   62         globaldata_t gd = mycpu;
   63         void *item;
   64         int n;
   65 
   66 #ifdef INVARIANTS
   67         if (z == NULL)
   68                 zerror(ZONE_ERROR_INVALID);
   69 #endif
   70 retry:
   71         /*
   72          * Avoid spinlock contention by allocating from a per-cpu queue
   73          */
   74         if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
   75                 crit_enter_gd(gd);
   76                 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
   77                         item = z->zitems_pcpu[gd->gd_cpuid];
   78 #ifdef INVARIANTS
   79                         KASSERT(item != NULL,
   80                                 ("zitems_pcpu unexpectedly NULL"));
   81                         if (((void **)item)[1] != (void *)ZENTRY_FREE)
   82                                 zerror(ZONE_ERROR_NOTFREE);
   83                         ((void **)item)[1] = NULL;
   84 #endif
   85                         z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
   86                         --z->zfreecnt_pcpu[gd->gd_cpuid];
   87                         z->znalloc++;
   88                         crit_exit_gd(gd);
   89                         return item;
   90                 }
   91                 crit_exit_gd(gd);
   92         }
   93 
   94         /*
   95          * Per-zone spinlock for the remainder.  Always load at least one
   96          * item.
   97          */
   98         spin_lock(&z->zlock);
   99         if (z->zfreecnt > z->zfreemin) {
  100                 n = zone_burst;
  101                 do {
  102                         item = z->zitems;
  103 #ifdef INVARIANTS
  104                         KASSERT(item != NULL, ("zitems unexpectedly NULL"));
  105                         if (((void **)item)[1] != (void *)ZENTRY_FREE)
  106                                 zerror(ZONE_ERROR_NOTFREE);
  107 #endif
  108                         z->zitems = ((void **)item)[0];
  109                         z->zfreecnt--;
  110                         ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
  111                         z->zitems_pcpu[gd->gd_cpuid] = item;
  112                         ++z->zfreecnt_pcpu[gd->gd_cpuid];
  113                 } while (--n > 0 && z->zfreecnt > z->zfreemin);
  114                 spin_unlock(&z->zlock);
  115                 goto retry;
  116         } else {
  117                 spin_unlock(&z->zlock);
  118                 item = zget(z);
  119                 /*
  120                  * PANICFAIL allows the caller to assume that the zalloc()
  121                  * will always succeed.  If it doesn't, we panic here.
  122                  */
  123                 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
  124                         panic("zalloc(%s) failed", z->zname);
  125         }
  126         return item;
  127 }
  128 
  129 /*
  130  * Free an item to the specified zone.   
  131  *
  132  * No requirements.
  133  */
  134 void
  135 zfree(vm_zone_t z, void *item)
  136 {
  137         globaldata_t gd = mycpu;
  138         int zmax;
  139 
  140         /*
  141          * Avoid spinlock contention by freeing into a per-cpu queue
  142          */
  143         if ((zmax = z->zmax) != 0)
  144                 zmax = zmax / ncpus / 16;
  145         if (zmax < 64)
  146                 zmax = 64;
  147 
  148         if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
  149                 crit_enter_gd(gd);
  150                 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
  151 #ifdef INVARIANTS
  152                 if (((void **)item)[1] == (void *)ZENTRY_FREE)
  153                         zerror(ZONE_ERROR_ALREADYFREE);
  154                 ((void **)item)[1] = (void *)ZENTRY_FREE;
  155 #endif
  156                 z->zitems_pcpu[gd->gd_cpuid] = item;
  157                 ++z->zfreecnt_pcpu[gd->gd_cpuid];
  158                 crit_exit_gd(gd);
  159                 return;
  160         }
  161 
  162         /*
  163          * Per-zone spinlock for the remainder.
  164          */
  165         spin_lock(&z->zlock);
  166         ((void **)item)[0] = z->zitems;
  167 #ifdef INVARIANTS
  168         if (((void **)item)[1] == (void *)ZENTRY_FREE)
  169                 zerror(ZONE_ERROR_ALREADYFREE);
  170         ((void **)item)[1] = (void *)ZENTRY_FREE;
  171 #endif
  172         z->zitems = item;
  173         z->zfreecnt++;
  174         spin_unlock(&z->zlock);
  175 }
  176 
  177 /*
  178  * This file comprises a very simple zone allocator.  This is used
  179  * in lieu of the malloc allocator, where needed or more optimal.
  180  *
  181  * Note that the initial implementation of this had coloring, and
  182  * absolutely no improvement (actually perf degradation) occurred.
  183  *
  184  * Note also that the zones are type stable.  The only restriction is
  185  * that the first two longwords of a data structure can be changed
  186  * between allocations.  Any data that must be stable between allocations
  187  * must reside in areas after the first two longwords.
  188  *
  189  * zinitna, zinit, zbootinit are the initialization routines.
  190  * zalloc, zfree, are the allocation/free routines.
  191  */
  192 
  193 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
  194 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
  195 static int zone_kmem_pages, zone_kern_pages;
  196 static long zone_kmem_kvaspace;
  197 
  198 /*
  199  * Create a zone, but don't allocate the zone structure.  If the
  200  * zone had been previously created by the zone boot code, initialize
  201  * various parts of the zone code.
  202  *
  203  * If waits are not allowed during allocation (e.g. during interrupt
  204  * code), a-priori allocate the kernel virtual space, and allocate
  205  * only pages when needed.
  206  *
  207  * Arguments:
  208  * z            pointer to zone structure.
  209  * obj          pointer to VM object (opt).
  210  * name         name of zone.
  211  * size         size of zone entries.
  212  * nentries     number of zone entries allocated (only ZONE_INTERRUPT.)
  213  * flags        ZONE_INTERRUPT -- items can be allocated at interrupt time.
  214  * zalloc       number of pages allocated when memory is needed.
  215  *
  216  * Note that when using ZONE_INTERRUPT, the size of the zone is limited
  217  * by the nentries argument.  The size of the memory allocatable is
  218  * unlimited if ZONE_INTERRUPT is not set.
  219  *
  220  * No requirements.
  221  */
  222 int
  223 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
  224         int nentries, int flags, int zalloc)
  225 {
  226         size_t totsize;
  227 
  228         /*
  229          * Only zones created with zinit() are destroyable.
  230          */
  231         if (z->zflags & ZONE_DESTROYABLE)
  232                 panic("zinitna: can't create destroyable zone");
  233 
  234         /*
  235          * NOTE: We can only adjust zsize if we previously did not
  236          *       use zbootinit().
  237          */
  238         if ((z->zflags & ZONE_BOOT) == 0) {
  239                 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
  240                 spin_init(&z->zlock);
  241                 z->zfreecnt = 0;
  242                 z->ztotal = 0;
  243                 z->zmax = 0;
  244                 z->zname = name;
  245                 z->znalloc = 0;
  246                 z->zitems = NULL;
  247 
  248                 lwkt_gettoken(&vm_token);
  249                 LIST_INSERT_HEAD(&zlist, z, zlink);
  250                 lwkt_reltoken(&vm_token);
  251 
  252                 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
  253                 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
  254         }
  255 
  256         z->zkmvec = NULL;
  257         z->zkmcur = z->zkmmax = 0;
  258         z->zflags |= flags;
  259 
  260         /*
  261          * If we cannot wait, allocate KVA space up front, and we will fill
  262          * in pages as needed.  This is particularly required when creating
  263          * an allocation space for map entries in kernel_map, because we
  264          * do not want to go into a recursion deadlock with 
  265          * vm_map_entry_reserve().
  266          */
  267         if (z->zflags & ZONE_INTERRUPT) {
  268                 totsize = round_page((size_t)z->zsize * nentries);
  269                 atomic_add_long(&zone_kmem_kvaspace, totsize);
  270 
  271                 z->zkva = kmem_alloc_pageable(&kernel_map, totsize);
  272                 if (z->zkva == 0) {
  273                         LIST_REMOVE(z, zlink);
  274                         return 0;
  275                 }
  276 
  277                 z->zpagemax = totsize / PAGE_SIZE;
  278                 if (obj == NULL) {
  279                         z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
  280                 } else {
  281                         z->zobj = obj;
  282                         _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
  283                         vm_object_drop(obj);
  284                 }
  285                 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
  286                                 VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
  287                 z->zmax += nentries;
  288         } else {
  289                 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
  290                 z->zmax = 0;
  291         }
  292 
  293 
  294         if (z->zsize > PAGE_SIZE)
  295                 z->zfreemin = 1;
  296         else
  297                 z->zfreemin = PAGE_SIZE / z->zsize;
  298 
  299         z->zpagecount = 0;
  300         if (zalloc)
  301                 z->zalloc = zalloc;
  302         else
  303                 z->zalloc = 1;
  304 
  305         /*
  306          * Populate the interrrupt zone at creation time rather than
  307          * on first allocation, as this is a potentially long operation.
  308          */
  309         if (z->zflags & ZONE_INTERRUPT) {
  310                 void *buf;
  311 
  312                 buf = zget(z);
  313                 zfree(z, buf);
  314         }
  315 
  316         return 1;
  317 }
  318 
  319 /*
  320  * Subroutine same as zinitna, except zone data structure is allocated
  321  * automatically by malloc.  This routine should normally be used, except
  322  * in certain tricky startup conditions in the VM system -- then
  323  * zbootinit and zinitna can be used.  Zinit is the standard zone
  324  * initialization call.
  325  *
  326  * No requirements.
  327  */
  328 vm_zone_t
  329 zinit(char *name, int size, int nentries, int flags, int zalloc)
  330 {
  331         vm_zone_t z;
  332 
  333         z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
  334         if (z == NULL)
  335                 return NULL;
  336 
  337         z->zflags = 0;
  338         if (zinitna(z, NULL, name, size, nentries,
  339                     flags & ~ZONE_DESTROYABLE, zalloc) == 0) {
  340                 kfree(z, M_ZONE);
  341                 return NULL;
  342         }
  343 
  344         if (flags & ZONE_DESTROYABLE)
  345                 z->zflags |= ZONE_DESTROYABLE;
  346 
  347         return z;
  348 }
  349 
  350 /*
  351  * Initialize a zone before the system is fully up.  This routine should
  352  * only be called before full VM startup.
  353  *
  354  * Called from the low level boot code only.
  355  */
  356 void
  357 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
  358 {
  359         int i;
  360 
  361         bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
  362         bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
  363 
  364         z->zname = name;
  365         z->zsize = size;
  366         z->zpagemax = 0;
  367         z->zobj = NULL;
  368         z->zflags = ZONE_BOOT;
  369         z->zfreemin = 0;
  370         z->zallocflag = 0;
  371         z->zpagecount = 0;
  372         z->zalloc = 0;
  373         z->znalloc = 0;
  374         spin_init(&z->zlock);
  375 
  376         bzero(item, (size_t)nitems * z->zsize);
  377         z->zitems = NULL;
  378         for (i = 0; i < nitems; i++) {
  379                 ((void **)item)[0] = z->zitems;
  380 #ifdef INVARIANTS
  381                 ((void **)item)[1] = (void *)ZENTRY_FREE;
  382 #endif
  383                 z->zitems = item;
  384                 item = (uint8_t *)item + z->zsize;
  385         }
  386         z->zfreecnt = nitems;
  387         z->zmax = nitems;
  388         z->ztotal = nitems;
  389 
  390         lwkt_gettoken(&vm_token);
  391         LIST_INSERT_HEAD(&zlist, z, zlink);
  392         lwkt_reltoken(&vm_token);
  393 }
  394 
  395 /*
  396  * Release all resources owned by zone created with zinit().
  397  *
  398  * No requirements.
  399  */
  400 void
  401 zdestroy(vm_zone_t z)
  402 {
  403         vm_page_t m;
  404         int i;
  405 
  406         if (z == NULL)
  407                 panic("zdestroy: null zone");
  408         if ((z->zflags & ZONE_DESTROYABLE) == 0)
  409                 panic("zdestroy: undestroyable zone");
  410 
  411         lwkt_gettoken(&vm_token);
  412         LIST_REMOVE(z, zlink);
  413         lwkt_reltoken(&vm_token);
  414 
  415         /*
  416          * Release virtual mappings, physical memory and update sysctl stats.
  417          */
  418         if (z->zflags & ZONE_INTERRUPT) {
  419                 /*
  420                  * Pages mapped via pmap_kenter() must be removed from the
  421                  * kernel_pmap() before calling kmem_free() to avoid issues
  422                  * with kernel_pmap.pm_stats.resident_count.
  423                  */
  424                 pmap_qremove(z->zkva, z->zpagemax);
  425                 vm_object_hold(z->zobj);
  426                 for (i = 0; i < z->zpagecount; ++i) {
  427                         m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd");
  428                         vm_page_unwire(m, 0);
  429                         vm_page_free(m);
  430                 }
  431 
  432                 /*
  433                  * Free the mapping.
  434                  */
  435                 kmem_free(&kernel_map, z->zkva,
  436                           (size_t)z->zpagemax * PAGE_SIZE);
  437                 atomic_subtract_long(&zone_kmem_kvaspace,
  438                                      (size_t)z->zpagemax * PAGE_SIZE);
  439 
  440                 /*
  441                  * Free the backing object and physical pages.
  442                  */
  443                 vm_object_deallocate(z->zobj);
  444                 vm_object_drop(z->zobj);
  445                 atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
  446         } else {
  447                 for (i=0; i < z->zkmcur; i++) {
  448                         kmem_free(&kernel_map, z->zkmvec[i],
  449                                   (size_t)z->zalloc * PAGE_SIZE);
  450                         atomic_subtract_int(&zone_kern_pages, z->zalloc);
  451                 }
  452                 if (z->zkmvec != NULL)
  453                         kfree(z->zkmvec, M_ZONE);
  454         }
  455 
  456         spin_uninit(&z->zlock);
  457         kfree(z, M_ZONE);
  458 }
  459 
  460 
  461 /*
  462  * void *zalloc(vm_zone_t zone) --
  463  *      Returns an item from a specified zone.  May not be called from a
  464  *      FAST interrupt or IPI function.
  465  *
  466  * void zfree(vm_zone_t zone, void *item) --
  467  *      Frees an item back to a specified zone.  May not be called from a
  468  *      FAST interrupt or IPI function.
  469  */
  470 
  471 /*
  472  * Internal zone routine.  Not to be called from external (non vm_zone) code.
  473  *
  474  * No requirements.
  475  */
  476 static void *
  477 zget(vm_zone_t z)
  478 {
  479         int i;
  480         vm_page_t m;
  481         int nitems;
  482         int npages;
  483         int savezpc;
  484         size_t nbytes;
  485         size_t noffset;
  486         void *item;
  487 
  488         if (z == NULL)
  489                 panic("zget: null zone");
  490 
  491         if (z->zflags & ZONE_INTERRUPT) {
  492                 /*
  493                  * Interrupt zones do not mess with the kernel_map, they
  494                  * simply populate an existing mapping.
  495                  *
  496                  * First reserve the required space.
  497                  */
  498                 vm_object_hold(z->zobj);
  499                 noffset = (size_t)z->zpagecount * PAGE_SIZE;
  500                 noffset -= noffset % z->zsize;
  501                 savezpc = z->zpagecount;
  502                 if (z->zpagecount + z->zalloc > z->zpagemax)
  503                         z->zpagecount = z->zpagemax;
  504                 else
  505                         z->zpagecount += z->zalloc;
  506                 item = (char *)z->zkva + noffset;
  507                 npages = z->zpagecount - savezpc;
  508                 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
  509                          z->zsize;
  510                 atomic_add_int(&zone_kmem_pages, npages);
  511 
  512                 /*
  513                  * Now allocate the pages.  Note that we can block in the
  514                  * loop, so we've already done all the necessary calculations
  515                  * and reservations above.
  516                  */
  517                 for (i = 0; i < npages; ++i) {
  518                         vm_offset_t zkva;
  519 
  520                         m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag);
  521                         KKASSERT(m != NULL);
  522                         /* note: z might be modified due to blocking */
  523 
  524                         KKASSERT(m->queue == PQ_NONE);
  525                         m->valid = VM_PAGE_BITS_ALL;
  526                         vm_page_wire(m);
  527                         vm_page_wakeup(m);
  528 
  529                         zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
  530                         pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
  531                         bzero((void *)zkva, PAGE_SIZE);
  532                 }
  533                 vm_object_drop(z->zobj);
  534         } else if (z->zflags & ZONE_SPECIAL) {
  535                 /*
  536                  * The special zone is the one used for vm_map_entry_t's.
  537                  * We have to avoid an infinite recursion in 
  538                  * vm_map_entry_reserve() by using vm_map_entry_kreserve()
  539                  * instead.  The map entries are pre-reserved by the kernel
  540                  * by vm_map_entry_reserve_cpu_init().
  541                  */
  542                 nbytes = (size_t)z->zalloc * PAGE_SIZE;
  543 
  544                 item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE);
  545 
  546                 /* note: z might be modified due to blocking */
  547                 if (item != NULL) {
  548                         zone_kern_pages += z->zalloc;   /* not MP-safe XXX */
  549                         bzero(item, nbytes);
  550                 } else {
  551                         nbytes = 0;
  552                 }
  553                 nitems = nbytes / z->zsize;
  554         } else {
  555                 /*
  556                  * Otherwise allocate KVA from the kernel_map.
  557                  */
  558                 nbytes = (size_t)z->zalloc * PAGE_SIZE;
  559 
  560                 item = (void *)kmem_alloc3(&kernel_map, nbytes, 0);
  561 
  562                 /* note: z might be modified due to blocking */
  563                 if (item != NULL) {
  564                         zone_kern_pages += z->zalloc;   /* not MP-safe XXX */
  565                         bzero(item, nbytes);
  566 
  567                         if (z->zflags & ZONE_DESTROYABLE) {
  568                                 if (z->zkmcur == z->zkmmax) {
  569                                         z->zkmmax =
  570                                                 z->zkmmax==0 ? 1 : z->zkmmax*2;
  571                                         z->zkmvec = krealloc(z->zkmvec,
  572                                             z->zkmmax * sizeof(z->zkmvec[0]),
  573                                             M_ZONE, M_WAITOK);
  574                                 }
  575                                 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
  576                         }
  577                 } else {
  578                         nbytes = 0;
  579                 }
  580                 nitems = nbytes / z->zsize;
  581         }
  582 
  583         spin_lock(&z->zlock);
  584         z->ztotal += nitems;
  585         /*
  586          * Save one for immediate allocation
  587          */
  588         if (nitems != 0) {
  589                 nitems -= 1;
  590                 for (i = 0; i < nitems; i++) {
  591                         ((void **)item)[0] = z->zitems;
  592 #ifdef INVARIANTS
  593                         ((void **)item)[1] = (void *)ZENTRY_FREE;
  594 #endif
  595                         z->zitems = item;
  596                         item = (uint8_t *)item + z->zsize;
  597                 }
  598                 z->zfreecnt += nitems;
  599                 z->znalloc++;
  600         } else if (z->zfreecnt > 0) {
  601                 item = z->zitems;
  602                 z->zitems = ((void **)item)[0];
  603 #ifdef INVARIANTS
  604                 if (((void **)item)[1] != (void *)ZENTRY_FREE)
  605                         zerror(ZONE_ERROR_NOTFREE);
  606                 ((void **) item)[1] = NULL;
  607 #endif
  608                 z->zfreecnt--;
  609                 z->znalloc++;
  610         } else {
  611                 item = NULL;
  612         }
  613         spin_unlock(&z->zlock);
  614 
  615         /*
  616          * A special zone may have used a kernel-reserved vm_map_entry.  If
  617          * so we have to be sure to recover our reserve so we don't run out.
  618          * We will panic if we run out.
  619          */
  620         if (z->zflags & ZONE_SPECIAL)
  621                 vm_map_entry_reserve(0);
  622 
  623         return item;
  624 }
  625 
  626 /*
  627  * No requirements.
  628  */
  629 static int
  630 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
  631 {
  632         int error=0;
  633         vm_zone_t curzone;
  634         char tmpbuf[128];
  635         char tmpname[14];
  636 
  637         ksnprintf(tmpbuf, sizeof(tmpbuf),
  638             "\nITEM            SIZE     LIMIT    USED    FREE  REQUESTS\n");
  639         error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
  640         if (error)
  641                 return (error);
  642 
  643         lwkt_gettoken(&vm_token);
  644         LIST_FOREACH(curzone, &zlist, zlink) {
  645                 int i;
  646                 int n;
  647                 int len;
  648                 int offset;
  649                 int freecnt;
  650 
  651                 len = strlen(curzone->zname);
  652                 if (len >= (sizeof(tmpname) - 1))
  653                         len = (sizeof(tmpname) - 1);
  654                 for(i = 0; i < sizeof(tmpname) - 1; i++)
  655                         tmpname[i] = ' ';
  656                 tmpname[i] = 0;
  657                 memcpy(tmpname, curzone->zname, len);
  658                 tmpname[len] = ':';
  659                 offset = 0;
  660                 if (curzone == LIST_FIRST(&zlist)) {
  661                         offset = 1;
  662                         tmpbuf[0] = '\n';
  663                 }
  664                 freecnt = curzone->zfreecnt;
  665                 for (n = 0; n < ncpus; ++n)
  666                         freecnt += curzone->zfreecnt_pcpu[n];
  667 
  668                 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
  669                         "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
  670                         tmpname, curzone->zsize, curzone->zmax,
  671                         (curzone->ztotal - freecnt),
  672                         freecnt, curzone->znalloc);
  673 
  674                 len = strlen((char *)tmpbuf);
  675                 if (LIST_NEXT(curzone, zlink) == NULL)
  676                         tmpbuf[len - 1] = 0;
  677 
  678                 error = SYSCTL_OUT(req, tmpbuf, len);
  679 
  680                 if (error)
  681                         break;
  682         }
  683         lwkt_reltoken(&vm_token);
  684         return (error);
  685 }
  686 
  687 #if defined(INVARIANTS)
  688 
  689 /*
  690  * Debugging only.
  691  */
  692 void
  693 zerror(int error)
  694 {
  695         char *msg;
  696 
  697         switch (error) {
  698         case ZONE_ERROR_INVALID:
  699                 msg = "zone: invalid zone";
  700                 break;
  701         case ZONE_ERROR_NOTFREE:
  702                 msg = "zone: entry not free";
  703                 break;
  704         case ZONE_ERROR_ALREADYFREE:
  705                 msg = "zone: freeing free entry";
  706                 break;
  707         default:
  708                 msg = "zone: invalid error";
  709                 break;
  710         }
  711         panic("%s", msg);
  712 }
  713 #endif
  714 
  715 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
  716         NULL, 0, sysctl_vm_zone, "A", "Zone Info");
  717 
  718 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
  719         CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
  720 SYSCTL_INT(_vm, OID_AUTO, zone_burst,
  721         CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
  722 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
  723         CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
  724 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
  725         CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");

Cache object: 43c6c65f096f9adfc998f1fa5a10fe8e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.