The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/bsd/vm/vnode_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
    5  * 
    6  * This file contains Original Code and/or Modifications of Original Code
    7  * as defined in and that are subject to the Apple Public Source License
    8  * Version 2.0 (the 'License'). You may not use this file except in
    9  * compliance with the License. The rights granted to you under the License
   10  * may not be used to create, or enable the creation or redistribution of,
   11  * unlawful or unlicensed copies of an Apple operating system, or to
   12  * circumvent, violate, or enable the circumvention or violation of, any
   13  * terms of an Apple operating system software license agreement.
   14  * 
   15  * Please obtain a copy of the License at
   16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
   17  * 
   18  * The Original Code and all software distributed under the License are
   19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   23  * Please see the License for the specific language governing rights and
   24  * limitations under the License.
   25  * 
   26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
   27  */
   28 /* 
   29  * Mach Operating System
   30  * Copyright (c) 1987 Carnegie-Mellon University
   31  * All rights reserved.  The CMU software License Agreement specifies
   32  * the terms and conditions for use and redistribution.
   33  */
   34 /*
   35  *      File:   vnode_pager.c
   36  *
   37  *      "Swap" pager that pages to/from vnodes.  Also
   38  *      handles demand paging from files.
   39  *
   40  */
   41 
   42 #include <mach/boolean.h>
   43 #include <sys/param.h>
   44 #include <sys/systm.h>
   45 #include <sys/user.h>
   46 #include <sys/proc.h>
   47 #include <sys/kauth.h>
   48 #include <sys/buf.h>
   49 #include <sys/uio.h>
   50 #include <sys/vnode_internal.h>
   51 #include <sys/namei.h>
   52 #include <sys/mount_internal.h> /* needs internal due to fhandle_t */
   53 #include <sys/ubc_internal.h>
   54 #include <sys/lock.h>
   55 
   56 #include <mach/mach_types.h>
   57 #include <mach/memory_object_types.h>
   58 #include <mach/memory_object_control.h>
   59 #include <mach/vm_map.h>
   60 #include <mach/mach_vm.h>
   61 #include <mach/upl.h>
   62 #include <mach/sdt.h>
   63 
   64 #include <vm/vm_map.h>
   65 #include <vm/vm_kern.h>
   66 #include <kern/zalloc.h>
   67 #include <kern/kalloc.h>
   68 #include <libkern/libkern.h>
   69 
   70 #include <vm/vnode_pager.h>
   71 #include <vm/vm_pageout.h>
   72 
   73 #include <kern/assert.h>
   74 #include <sys/kdebug.h>
   75 #include <machine/spl.h>
   76 
   77 #include <nfs/rpcv2.h>
   78 #include <nfs/nfsproto.h>
   79 #include <nfs/nfs.h>
   80 
   81 #include <vm/vm_protos.h>
   82 
   83 
   84 uint32_t
   85 vnode_pager_isinuse(struct vnode *vp)
   86 {
   87         if (vp->v_usecount > vp->v_kusecount)
   88                 return (1);
   89         return (0);
   90 }
   91 
   92 uint32_t
   93 vnode_pager_return_hard_throttle_limit(struct vnode *vp, uint32_t *limit, uint32_t hard_throttle)
   94 {
   95         return(cluster_hard_throttle_limit(vp, limit, hard_throttle));
   96 }
   97 
   98 vm_object_offset_t
   99 vnode_pager_get_filesize(struct vnode *vp)
  100 {
  101 
  102         return (vm_object_offset_t) ubc_getsize(vp);
  103 }
  104 
  105 kern_return_t
  106 vnode_pager_get_pathname(
  107         struct vnode    *vp,
  108         char            *pathname,
  109         vm_size_t       *length_p)
  110 {
  111         int     error, len;
  112 
  113         len = (int) *length_p;
  114         error = vn_getpath(vp, pathname, &len);
  115         if (error != 0) {
  116                 return KERN_FAILURE;
  117         }
  118         *length_p = (vm_size_t) len;
  119         return KERN_SUCCESS;
  120 }
  121 
  122 kern_return_t
  123 vnode_pager_get_filename(
  124         struct vnode    *vp,
  125         const char      **filename)
  126 {
  127         *filename = vp->v_name;
  128         return KERN_SUCCESS;
  129 }
  130 
  131 kern_return_t
  132 vnode_pager_get_cs_blobs(
  133         struct vnode    *vp,
  134         void            **blobs)
  135 {
  136         *blobs = ubc_get_cs_blobs(vp);
  137         return KERN_SUCCESS;
  138 }
  139 
  140 pager_return_t
  141 vnode_pageout(struct vnode *vp,
  142         upl_t                   upl,
  143         upl_offset_t            upl_offset,
  144         vm_object_offset_t      f_offset,
  145         upl_size_t              size,
  146         int                     flags,
  147         int                     *errorp)
  148 {
  149         int             result = PAGER_SUCCESS;
  150         int             error = 0;
  151         int             error_ret = 0;
  152         daddr64_t blkno;
  153         int isize;
  154         int pg_index;
  155         int base_index;
  156         upl_offset_t offset;
  157         upl_page_info_t *pl;
  158         vfs_context_t ctx = vfs_context_current();      /* pager context */
  159 
  160         isize = (int)size;
  161 
  162         if (isize <= 0) {
  163                 result    = PAGER_ERROR;
  164                 error_ret = EINVAL;
  165                 goto out;
  166         }
  167 
  168         if (UBCINFOEXISTS(vp) == 0) {
  169                 result    = PAGER_ERROR;
  170                 error_ret = EINVAL;
  171 
  172                 if (upl && !(flags & UPL_NOCOMMIT))
  173                         ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
  174                 goto out;
  175         }
  176         if ( !(flags & UPL_VNODE_PAGER)) {
  177                 /*
  178                  * This is a pageout from the default pager,
  179                  * just go ahead and call vnop_pageout since
  180                  * it has already sorted out the dirty ranges
  181                  */
  182                 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
  183                                       size, 1, 0, 0, 0);
  184 
  185                 if ( (error_ret = VNOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
  186                                                (size_t)size, flags, ctx)) )
  187                         result = PAGER_ERROR;
  188 
  189                 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
  190                                       size, 1, 0, 0, 0);
  191 
  192                 goto out;
  193         }
  194         if (upl == NULL) {
  195                 int                     request_flags;
  196 
  197                 if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEOUTV2) {
  198                         /*
  199                          * filesystem has requested the new form of VNOP_PAGEOUT for file
  200                          * backed objects... we will not grab the UPL befofe calling VNOP_PAGEOUT...
  201                          * it is the fileystem's responsibility to grab the range we're denoting
  202                          * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
  203                          * take any locks it needs, before effectively locking the pages into a UPL...
  204                          */
  205                         KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
  206                                               size, (int)f_offset, 0, 0, 0);
  207 
  208                         if ( (error_ret = VNOP_PAGEOUT(vp, NULL, upl_offset, (off_t)f_offset,
  209                                                        size, flags, ctx)) ) {
  210                                 result = PAGER_ERROR;
  211                         }
  212                         KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
  213                                               size, 0, 0, 0, 0);
  214 
  215                         goto out;
  216                 }
  217                 if (flags & UPL_MSYNC)
  218                         request_flags = UPL_UBC_MSYNC | UPL_RET_ONLY_DIRTY;
  219                 else
  220                         request_flags = UPL_UBC_PAGEOUT | UPL_RET_ONLY_DIRTY;
  221                 
  222                 ubc_create_upl(vp, f_offset, size, &upl, &pl, request_flags);
  223 
  224                 if (upl == (upl_t)NULL) {
  225                         result    = PAGER_ERROR;
  226                         error_ret = EINVAL;
  227                         goto out;
  228                 }
  229                 upl_offset = 0;
  230         } else 
  231                 pl = ubc_upl_pageinfo(upl);
  232 
  233         /*
  234          * we come here for pageouts to 'real' files and
  235          * for msyncs...  the upl may not contain any
  236          * dirty pages.. it's our responsibility to sort
  237          * through it and find the 'runs' of dirty pages
  238          * to call VNOP_PAGEOUT on...
  239          */
  240         if (ubc_getsize(vp) == 0) {
  241                 /*
  242                  * if the file has been effectively deleted, then
  243                  * we need to go through the UPL and invalidate any
  244                  * buffer headers we might have that reference any
  245                  * of it's pages
  246                  */
  247                 for (offset = upl_offset; isize; isize -= PAGE_SIZE, offset += PAGE_SIZE) {
  248 #if NFSCLIENT
  249                         if (vp->v_tag == VT_NFS)
  250                                 /* check with nfs if page is OK to drop */
  251                                 error = nfs_buf_page_inval(vp, (off_t)f_offset);
  252                         else
  253 #endif
  254                         {
  255                                 blkno = ubc_offtoblk(vp, (off_t)f_offset);
  256                                 error = buf_invalblkno(vp, blkno, 0);
  257                         }
  258                         if (error) {
  259                                 if ( !(flags & UPL_NOCOMMIT))
  260                                         ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
  261                                 if (error_ret == 0)
  262                                         error_ret = error;
  263                                 result = PAGER_ERROR;
  264 
  265                         } else if ( !(flags & UPL_NOCOMMIT)) {
  266                                 ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY);
  267                         }
  268                         f_offset += PAGE_SIZE;
  269                 }
  270                 goto out;
  271         }
  272         /*
  273          * Ignore any non-present pages at the end of the
  274          * UPL so that we aren't looking at a upl that 
  275          * may already have been freed by the preceeding
  276          * aborts/completions.
  277          */
  278         base_index = upl_offset / PAGE_SIZE;
  279 
  280         for (pg_index = (upl_offset + isize) / PAGE_SIZE; pg_index > base_index;) {
  281                 if (upl_page_present(pl, --pg_index))
  282                         break;
  283                 if (pg_index == base_index) {
  284                         /*
  285                          * no pages were returned, so release
  286                          * our hold on the upl and leave
  287                          */
  288                         if ( !(flags & UPL_NOCOMMIT))
  289                                 ubc_upl_abort_range(upl, upl_offset, isize, UPL_ABORT_FREE_ON_EMPTY);
  290 
  291                         goto out;
  292                 }
  293         }
  294         isize = ((pg_index + 1) - base_index) * PAGE_SIZE;
  295 
  296         offset = upl_offset;
  297         pg_index = base_index;
  298 
  299         while (isize) {
  300                 int  xsize;
  301                 int  num_of_pages;
  302 
  303                 if ( !upl_page_present(pl, pg_index)) {
  304                         /*
  305                          * we asked for RET_ONLY_DIRTY, so it's possible
  306                          * to get back empty slots in the UPL
  307                          * just skip over them
  308                          */
  309                         f_offset += PAGE_SIZE;
  310                         offset   += PAGE_SIZE;
  311                         isize    -= PAGE_SIZE;
  312                         pg_index++;
  313 
  314                         continue;
  315                 }
  316                 if ( !upl_dirty_page(pl, pg_index)) {
  317                         /*
  318                          * if the page is not dirty and reached here it is
  319                          * marked precious or it is due to invalidation in
  320                          * memory_object_lock request as part of truncation
  321                          * We also get here from vm_object_terminate()
  322                          * So all you need to do in these
  323                          * cases is to invalidate incore buffer if it is there
  324                          * Note we must not sleep here if the buffer is busy - that is
  325                          * a lock inversion which causes deadlock.
  326                          */
  327 #if NFSCLIENT
  328                         if (vp->v_tag == VT_NFS)
  329                                 /* check with nfs if page is OK to drop */
  330                                 error = nfs_buf_page_inval(vp, (off_t)f_offset);
  331                         else
  332 #endif
  333                         {
  334                                 blkno = ubc_offtoblk(vp, (off_t)f_offset);
  335                                 error = buf_invalblkno(vp, blkno, 0);
  336                         }
  337                         if (error) {
  338                                 if ( !(flags & UPL_NOCOMMIT))
  339                                         ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
  340                                 if (error_ret == 0)
  341                                         error_ret = error;
  342                                 result = PAGER_ERROR;
  343 
  344                         } else if ( !(flags & UPL_NOCOMMIT)) {
  345                                 ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY);
  346                         }
  347                         f_offset += PAGE_SIZE;
  348                         offset   += PAGE_SIZE;
  349                         isize    -= PAGE_SIZE;
  350                         pg_index++;
  351 
  352                         continue;
  353                 }
  354                 num_of_pages = 1;
  355                 xsize = isize - PAGE_SIZE;
  356 
  357                 while (xsize) {
  358                         if ( !upl_dirty_page(pl, pg_index + num_of_pages))
  359                                 break;
  360                         num_of_pages++;
  361                         xsize -= PAGE_SIZE;
  362                 }
  363                 xsize = num_of_pages * PAGE_SIZE;
  364 
  365                 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, 
  366                                       xsize, (int)f_offset, 0, 0, 0);
  367 
  368                 if ( (error = VNOP_PAGEOUT(vp, upl, offset, (off_t)f_offset,
  369                                            xsize, flags, ctx)) ) {
  370                         if (error_ret == 0)
  371                                 error_ret = error;
  372                         result = PAGER_ERROR;
  373                 }
  374                 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, 
  375                                       xsize, 0, 0, 0, 0);
  376 
  377                 f_offset += xsize;
  378                 offset   += xsize;
  379                 isize    -= xsize;
  380                 pg_index += num_of_pages;
  381         }
  382 out:
  383         if (errorp)
  384                 *errorp = error_ret;
  385 
  386         return (result);
  387 }
  388 
  389 
  390 pager_return_t
  391 vnode_pagein(
  392         struct vnode            *vp,
  393         upl_t                   upl,
  394         upl_offset_t            upl_offset,
  395         vm_object_offset_t      f_offset,
  396         upl_size_t              size,
  397         int                     flags,
  398         int                     *errorp)
  399 {
  400         struct uthread  *ut;
  401         upl_page_info_t *pl;
  402         int             result = PAGER_SUCCESS;
  403         int             error = 0;
  404         int             pages_in_upl;
  405         int             start_pg;
  406         int             last_pg;
  407         int             first_pg;
  408         int             xsize;
  409         int             must_commit = 1;
  410 
  411         if (flags & UPL_NOCOMMIT)
  412                 must_commit = 0;
  413 
  414         if (UBCINFOEXISTS(vp) == 0) {
  415                 result = PAGER_ERROR;
  416                 error  = PAGER_ERROR;
  417 
  418                 if (upl && must_commit)
  419                         ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
  420 
  421                 goto out;
  422         }
  423         if (upl == (upl_t)NULL) {
  424                 flags &= ~UPL_NOCOMMIT;
  425 
  426                 if (size > (MAX_UPL_SIZE * PAGE_SIZE)) {
  427                         result = PAGER_ERROR;
  428                         error  = PAGER_ERROR;
  429                         goto out;
  430                 }
  431                 if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEINV2) {
  432                         /*
  433                          * filesystem has requested the new form of VNOP_PAGEIN for file
  434                          * backed objects... we will not grab the UPL befofe calling VNOP_PAGEIN...
  435                          * it is the fileystem's responsibility to grab the range we're denoting
  436                          * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
  437                          * take any locks it needs, before effectively locking the pages into a UPL...
  438                          * so we pass a NULL into the filesystem instead of a UPL pointer... the 'upl_offset'
  439                          * is used to identify the "must have" page in the extent... the filesystem is free
  440                          * to clip the extent to better fit the underlying FS blocksize if it desires as 
  441                          * long as it continues to include the "must have" page... 'f_offset' + 'upl_offset'
  442                          * identifies that page
  443                          */
  444                         if ( (error = VNOP_PAGEIN(vp, NULL, upl_offset, (off_t)f_offset,
  445                                                   size, flags, vfs_context_current())) ) {
  446                                 result = PAGER_ERROR;
  447                                 error  = PAGER_ERROR;
  448                         }
  449                         goto out;
  450                 }
  451                 ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_UBC_PAGEIN | UPL_RET_ONLY_ABSENT);
  452 
  453                 if (upl == (upl_t)NULL) {
  454                         result =  PAGER_ABSENT;
  455                         error = PAGER_ABSENT;
  456                         goto out;
  457                 }
  458                 upl_offset = 0;
  459                 first_pg = 0;
  460                 
  461                 /*
  462                  * if we get here, we've created the upl and
  463                  * are responsible for commiting/aborting it
  464                  * regardless of what the caller has passed in
  465                  */
  466                 must_commit = 1;
  467         } else {
  468                 pl = ubc_upl_pageinfo(upl);
  469                 first_pg = upl_offset / PAGE_SIZE;
  470         }
  471         pages_in_upl = size / PAGE_SIZE;
  472         DTRACE_VM2(pgpgin, int, pages_in_upl, (uint64_t *), NULL);
  473 
  474         /*
  475          * before we start marching forward, we must make sure we end on 
  476          * a present page, otherwise we will be working with a freed
  477          * upl
  478          */
  479         for (last_pg = pages_in_upl - 1; last_pg >= first_pg; last_pg--) {
  480                 if (upl_page_present(pl, last_pg))
  481                         break;
  482                 if (last_pg == first_pg) {
  483                         /*
  484                          * empty UPL, no pages are present
  485                          */
  486                         if (must_commit)
  487                                 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
  488                         goto out;
  489                 }
  490         }
  491         pages_in_upl = last_pg + 1;
  492         last_pg = first_pg;
  493 
  494         while (last_pg < pages_in_upl) {
  495                 /*
  496                  * skip over missing pages...
  497                  */
  498                 for ( ; last_pg < pages_in_upl; last_pg++) {
  499                         if (upl_page_present(pl, last_pg))
  500                                 break;
  501                 }
  502                 /*
  503                  * skip over 'valid' pages... we don't want to issue I/O for these
  504                  */
  505                 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
  506                         if (!upl_valid_page(pl, last_pg))
  507                                 break;
  508                 }
  509                 if (last_pg > start_pg) {
  510                         /*
  511                          * we've found a range of valid pages
  512                          * if we've got COMMIT responsibility
  513                          * commit this range of pages back to the
  514                          * cache unchanged
  515                          */
  516                         xsize = (last_pg - start_pg) * PAGE_SIZE;
  517 
  518                         if (must_commit)
  519                                 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY);
  520                 }
  521                 if (last_pg == pages_in_upl)
  522                         /*
  523                          * we're done... all pages that were present
  524                          * have either had I/O issued on them or 
  525                          * were aborted unchanged...
  526                          */
  527                         break;
  528 
  529                 if (!upl_page_present(pl, last_pg)) {
  530                         /*
  531                          * we found a range of valid pages 
  532                          * terminated by a missing page...
  533                          * bump index to the next page and continue on
  534                          */
  535                         last_pg++;
  536                         continue;
  537                 }
  538                 /*
  539                  * scan from the found invalid page looking for a valid
  540                  * or non-present page before the end of the upl is reached, if we
  541                  * find one, then it will be the last page of the request to
  542                  * 'cluster_io'
  543                  */
  544                 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
  545                         if (upl_valid_page(pl, last_pg) || !upl_page_present(pl, last_pg))
  546                                 break;
  547                 }
  548                 if (last_pg > start_pg) {
  549                         int xoff;
  550                         xsize = (last_pg - start_pg) * PAGE_SIZE;
  551                         xoff  = start_pg * PAGE_SIZE;
  552 
  553                         if ( (error = VNOP_PAGEIN(vp, upl, (upl_offset_t) xoff,
  554                                                (off_t)f_offset + xoff,
  555                                                xsize, flags, vfs_context_current())) ) {
  556                                 /*
  557                                  * Usually this UPL will be aborted/committed by the lower cluster layer.
  558                                  * In the case of decmpfs, however, we may return an error (EAGAIN) to avoid
  559                                  * a deadlock with another thread already inflating the file. In that case,
  560                                  * we must take care of our UPL at this layer itself.
  561                                  */
  562                                 if (must_commit) {
  563                                         if(error == EAGAIN) {
  564                                                 ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_RESTART);
  565                                         }
  566                                 }
  567                                 result = PAGER_ERROR;
  568                                 error  = PAGER_ERROR;
  569 
  570                         }
  571                 }
  572         }
  573 out:
  574         if (errorp)
  575                 *errorp = result;
  576 
  577         ut = get_bsdthread_info(current_thread());
  578 
  579         if (ut->uu_lowpri_window) {
  580                 /*
  581                  * task is marked as a low priority I/O type
  582                  * and the I/O we issued while in this page fault
  583                  * collided with normal I/O operations... we'll
  584                  * delay in order to mitigate the impact of this
  585                  * task on the normal operation of the system
  586                  */
  587                 throttle_lowpri_io(TRUE);
  588         }
  589         return (error);
  590 }
  591 
  592 void
  593 vnode_pager_shutdown(void)
  594 {
  595         int i;
  596         vnode_t vp;
  597 
  598         for(i = 0; i < MAX_BACKING_STORE; i++) {
  599                 vp = (vnode_t)(bs_port_table[i]).vp;
  600                 if (vp) {
  601                         (bs_port_table[i]).vp = 0;
  602 
  603                         /* get rid of macx_swapon() reference */
  604                         vnode_rele(vp);
  605                 }
  606         }
  607 }
  608 
  609 
  610 void *
  611 upl_get_internal_page_list(upl_t upl)
  612 {
  613   return(UPL_GET_INTERNAL_PAGE_LIST(upl));
  614 
  615 }

Cache object: 5d44b6f62c031c817624eda42fc6a96b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.