The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_bio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: uvm_bio.c,v 1.126 2021/04/01 06:26:26 simonb Exp $     */
    2 
    3 /*
    4  * Copyright (c) 1998 Chuck Silvers.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. The name of the author may not be used to endorse or promote products
   16  *    derived from this software without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  */
   31 
   32 /*
   33  * uvm_bio.c: buffered i/o object mapping cache
   34  */
   35 
   36 #include <sys/cdefs.h>
   37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.126 2021/04/01 06:26:26 simonb Exp $");
   38 
   39 #include "opt_uvmhist.h"
   40 #include "opt_ubc.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/kmem.h>
   45 #include <sys/kernel.h>
   46 #include <sys/proc.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/vnode.h>
   49 #include <sys/bitops.h>         /* for ilog2() */
   50 
   51 #include <uvm/uvm.h>
   52 #include <uvm/uvm_pdpolicy.h>
   53 
   54 #ifdef PMAP_DIRECT
   55 #  define UBC_USE_PMAP_DIRECT
   56 #endif
   57 
   58 /*
   59  * local functions
   60  */
   61 
   62 static int      ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
   63                           int, int, vm_prot_t, int);
   64 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
   65 static int      ubchash_stats(struct hashstat_sysctl *hs, bool fill);
   66 #ifdef UBC_USE_PMAP_DIRECT
   67 static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t,
   68                           int, int);
   69 static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int);
   70 
   71 /* XXX disabled by default until the kinks are worked out. */
   72 bool ubc_direct = false;
   73 #endif
   74 
   75 /*
   76  * local data structues
   77  */
   78 
   79 #define UBC_HASH(uobj, offset)                                          \
   80         (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
   81                                 ubc_object.hashmask)
   82 
   83 #define UBC_QUEUE(offset)                                               \
   84         (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &    \
   85                              (UBC_NQUEUES - 1)])
   86 
   87 #define UBC_UMAP_ADDR(u)                                                \
   88         (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
   89 
   90 
   91 #define UMAP_PAGES_LOCKED       0x0001
   92 #define UMAP_MAPPING_CACHED     0x0002
   93 
   94 struct ubc_map {
   95         struct uvm_object *     uobj;           /* mapped object */
   96         voff_t                  offset;         /* offset into uobj */
   97         voff_t                  writeoff;       /* write offset */
   98         vsize_t                 writelen;       /* write len */
   99         int                     refcount;       /* refcount on mapping */
  100         int                     flags;          /* extra state */
  101         int                     advice;
  102 
  103         LIST_ENTRY(ubc_map)     hash;           /* hash table */
  104         TAILQ_ENTRY(ubc_map)    inactive;       /* inactive queue */
  105         LIST_ENTRY(ubc_map)     list;           /* per-object list */
  106 };
  107 
  108 TAILQ_HEAD(ubc_inactive_head, ubc_map);
  109 static struct ubc_object {
  110         struct uvm_object uobj;         /* glue for uvm_map() */
  111         char *kva;                      /* where ubc_object is mapped */
  112         struct ubc_map *umap;           /* array of ubc_map's */
  113 
  114         LIST_HEAD(, ubc_map) *hash;     /* hashtable for cached ubc_map's */
  115         u_long hashmask;                /* mask for hashtable */
  116 
  117         struct ubc_inactive_head *inactive;
  118                                         /* inactive queues for ubc_map's */
  119 } ubc_object;
  120 
  121 const struct uvm_pagerops ubc_pager = {
  122         .pgo_fault = ubc_fault,
  123         /* ... rest are NULL */
  124 };
  125 
  126 /* Use value at least as big as maximum page size supported by architecture */
  127 #define UBC_MAX_WINSHIFT        \
  128     ((1 << UBC_WINSHIFT) > MAX_PAGE_SIZE ? UBC_WINSHIFT : ilog2(MAX_PAGE_SIZE))
  129 
  130 int ubc_nwins = UBC_NWINS;
  131 const int ubc_winshift = UBC_MAX_WINSHIFT;
  132 const int ubc_winsize = 1 << UBC_MAX_WINSHIFT;
  133 #if defined(PMAP_PREFER)
  134 int ubc_nqueues;
  135 #define UBC_NQUEUES ubc_nqueues
  136 #else
  137 #define UBC_NQUEUES 1
  138 #endif
  139 
  140 #if defined(UBC_STATS)
  141 
  142 #define UBC_EVCNT_DEFINE(name) \
  143 struct evcnt ubc_evcnt_##name = \
  144 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
  145 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
  146 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
  147 
  148 #else /* defined(UBC_STATS) */
  149 
  150 #define UBC_EVCNT_DEFINE(name)  /* nothing */
  151 #define UBC_EVCNT_INCR(name)    /* nothing */
  152 
  153 #endif /* defined(UBC_STATS) */
  154 
  155 UBC_EVCNT_DEFINE(wincachehit)
  156 UBC_EVCNT_DEFINE(wincachemiss)
  157 UBC_EVCNT_DEFINE(faultbusy)
  158 
  159 /*
  160  * ubc_init
  161  *
  162  * init pager private data structures.
  163  */
  164 
  165 void
  166 ubc_init(void)
  167 {
  168         /*
  169          * Make sure ubc_winshift is sane.
  170          */
  171         KASSERT(ubc_winshift >= PAGE_SHIFT);
  172 
  173         /*
  174          * init ubc_object.
  175          * alloc and init ubc_map's.
  176          * init inactive queues.
  177          * alloc and init hashtable.
  178          * map in ubc_object.
  179          */
  180 
  181         uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
  182 
  183         ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
  184             KM_SLEEP);
  185         if (ubc_object.umap == NULL)
  186                 panic("ubc_init: failed to allocate ubc_map");
  187 
  188         vaddr_t va = (vaddr_t)1L;
  189 #ifdef PMAP_PREFER
  190         PMAP_PREFER(0, &va, 0, 0);      /* kernel is never topdown */
  191         ubc_nqueues = va >> ubc_winshift;
  192         if (ubc_nqueues == 0) {
  193                 ubc_nqueues = 1;
  194         }
  195 #endif
  196         ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
  197             sizeof(struct ubc_inactive_head), KM_SLEEP);
  198         for (int i = 0; i < UBC_NQUEUES; i++) {
  199                 TAILQ_INIT(&ubc_object.inactive[i]);
  200         }
  201         for (int i = 0; i < ubc_nwins; i++) {
  202                 struct ubc_map *umap;
  203                 umap = &ubc_object.umap[i];
  204                 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
  205                                   umap, inactive);
  206         }
  207 
  208         ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
  209             &ubc_object.hashmask);
  210         for (int i = 0; i <= ubc_object.hashmask; i++) {
  211                 LIST_INIT(&ubc_object.hash[i]);
  212         }
  213 
  214         if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
  215                     ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
  216                     UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
  217                                 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
  218                 panic("ubc_init: failed to map ubc_object");
  219         }
  220 
  221         hashstat_register("ubchash", ubchash_stats);
  222 }
  223 
  224 void
  225 ubchist_init(void)
  226 {
  227 
  228         UVMHIST_INIT(ubchist, 300);
  229 }
  230 
  231 /*
  232  * ubc_fault_page: helper of ubc_fault to handle a single page.
  233  *
  234  * => Caller has UVM object locked.
  235  * => Caller will perform pmap_update().
  236  */
  237 
  238 static inline int
  239 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
  240     struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
  241 {
  242         vm_prot_t mask;
  243         int error;
  244         bool rdonly;
  245 
  246         KASSERT(rw_write_held(pg->uobject->vmobjlock));
  247 
  248         KASSERT((pg->flags & PG_FAKE) == 0);
  249         if (pg->flags & PG_RELEASED) {
  250                 uvm_pagefree(pg);
  251                 return 0;
  252         }
  253         if (pg->loan_count != 0) {
  254 
  255                 /*
  256                  * Avoid unneeded loan break, if possible.
  257                  */
  258 
  259                 if ((access_type & VM_PROT_WRITE) == 0) {
  260                         prot &= ~VM_PROT_WRITE;
  261                 }
  262                 if (prot & VM_PROT_WRITE) {
  263                         struct vm_page *newpg;
  264 
  265                         newpg = uvm_loanbreak(pg);
  266                         if (newpg == NULL) {
  267                                 uvm_page_unbusy(&pg, 1);
  268                                 return ENOMEM;
  269                         }
  270                         pg = newpg;
  271                 }
  272         }
  273 
  274         /*
  275          * Note that a page whose backing store is partially allocated
  276          * is marked as PG_RDONLY.
  277          *
  278          * it's a responsibility of ubc_alloc's caller to allocate backing
  279          * blocks before writing to the window.
  280          */
  281 
  282         KASSERT((pg->flags & PG_RDONLY) == 0 ||
  283             (access_type & VM_PROT_WRITE) == 0 ||
  284             pg->offset < umap->writeoff ||
  285             pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
  286 
  287         rdonly = uvm_pagereadonly_p(pg);
  288         mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
  289 
  290         error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
  291             prot & mask, PMAP_CANFAIL | (access_type & mask));
  292 
  293         uvm_pagelock(pg);
  294         uvm_pageactivate(pg);
  295         uvm_pagewakeup(pg);
  296         uvm_pageunlock(pg);
  297         pg->flags &= ~PG_BUSY;
  298         UVM_PAGE_OWN(pg, NULL);
  299 
  300         return error;
  301 }
  302 
  303 /*
  304  * ubc_fault: fault routine for ubc mapping
  305  */
  306 
  307 static int
  308 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
  309     int ign3, int ign4, vm_prot_t access_type, int flags)
  310 {
  311         struct uvm_object *uobj;
  312         struct ubc_map *umap;
  313         vaddr_t va, eva, ubc_offset, slot_offset;
  314         struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
  315         int i, error, npages;
  316         vm_prot_t prot;
  317 
  318         UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
  319 
  320         /*
  321          * no need to try with PGO_LOCKED...
  322          * we don't need to have the map locked since we know that
  323          * no one will mess with it until our reference is released.
  324          */
  325 
  326         if (flags & PGO_LOCKED) {
  327                 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
  328                 flags &= ~PGO_LOCKED;
  329         }
  330 
  331         va = ufi->orig_rvaddr;
  332         ubc_offset = va - (vaddr_t)ubc_object.kva;
  333         umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
  334         KASSERT(umap->refcount != 0);
  335         KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
  336         slot_offset = ubc_offset & (ubc_winsize - 1);
  337 
  338         /*
  339          * some platforms cannot write to individual bytes atomically, so
  340          * software has to do read/modify/write of larger quantities instead.
  341          * this means that the access_type for "write" operations
  342          * can be VM_PROT_READ, which confuses us mightily.
  343          *
  344          * deal with this by resetting access_type based on the info
  345          * that ubc_alloc() stores for us.
  346          */
  347 
  348         access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
  349         UVMHIST_LOG(ubchist, "va %#jx ubc_offset %#jx access_type %jd",
  350             va, ubc_offset, access_type, 0);
  351 
  352         if ((access_type & VM_PROT_WRITE) != 0) {
  353 #ifndef PRIxOFF         /* XXX */
  354 #define PRIxOFF "jx"    /* XXX */
  355 #endif                  /* XXX */
  356                 KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
  357                     "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
  358                     slot_offset, (intmax_t)umap->writeoff);
  359                 KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
  360                     "out of range write: slot=%#"PRIxVADDR
  361                         " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
  362                     slot_offset, (intmax_t)umap->writeoff, umap->writelen);
  363         }
  364 
  365         /* no umap locking needed since we have a ref on the umap */
  366         uobj = umap->uobj;
  367 
  368         if ((access_type & VM_PROT_WRITE) == 0) {
  369                 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
  370         } else {
  371                 npages = (round_page(umap->offset + umap->writeoff +
  372                     umap->writelen) - (umap->offset + slot_offset))
  373                     >> PAGE_SHIFT;
  374                 flags |= PGO_PASTEOF;
  375         }
  376 
  377 again:
  378         memset(pgs, 0, sizeof (pgs));
  379         rw_enter(uobj->vmobjlock, RW_WRITER);
  380 
  381         UVMHIST_LOG(ubchist, "slot_offset %#jx writeoff %#jx writelen %#jx ",
  382             slot_offset, umap->writeoff, umap->writelen, 0);
  383         UVMHIST_LOG(ubchist, "getpages uobj %#jx offset %#jx npages %jd",
  384             (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
  385 
  386         error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
  387             &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
  388             PGO_NOTIMESTAMP);
  389         UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
  390             0);
  391 
  392         if (error == EAGAIN) {
  393                 kpause("ubc_fault", false, hz >> 2, NULL);
  394                 goto again;
  395         }
  396         if (error) {
  397                 return error;
  398         }
  399 
  400         /*
  401          * For virtually-indexed, virtually-tagged caches we should avoid
  402          * creating writable mappings when we do not absolutely need them,
  403          * since the "compatible alias" trick does not work on such caches.
  404          * Otherwise, we can always map the pages writable.
  405          */
  406 
  407 #ifdef PMAP_CACHE_VIVT
  408         prot = VM_PROT_READ | access_type;
  409 #else
  410         prot = VM_PROT_READ | VM_PROT_WRITE;
  411 #endif
  412 
  413         va = ufi->orig_rvaddr;
  414         eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
  415 
  416         UVMHIST_LOG(ubchist, "va %#jx eva %#jx", va, eva, 0, 0);
  417 
  418         /*
  419          * Note: normally all returned pages would have the same UVM object.
  420          * However, layered file-systems and e.g. tmpfs, may return pages
  421          * which belong to underlying UVM object.  In such case, lock is
  422          * shared amongst the objects.
  423          */
  424         rw_enter(uobj->vmobjlock, RW_WRITER);
  425         for (i = 0; va < eva; i++, va += PAGE_SIZE) {
  426                 struct vm_page *pg;
  427 
  428                 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
  429                     0, 0);
  430                 pg = pgs[i];
  431 
  432                 if (pg == NULL || pg == PGO_DONTCARE) {
  433                         continue;
  434                 }
  435                 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
  436                 error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
  437                 if (error) {
  438                         /*
  439                          * Flush (there might be pages entered), drop the lock,
  440                          * and perform uvm_wait().  Note: page will re-fault.
  441                          */
  442                         pmap_update(ufi->orig_map->pmap);
  443                         rw_exit(uobj->vmobjlock);
  444                         uvm_wait("ubc_fault");
  445                         rw_enter(uobj->vmobjlock, RW_WRITER);
  446                 }
  447         }
  448         /* Must make VA visible before the unlock. */
  449         pmap_update(ufi->orig_map->pmap);
  450         rw_exit(uobj->vmobjlock);
  451 
  452         return 0;
  453 }
  454 
  455 /*
  456  * local functions
  457  */
  458 
  459 static struct ubc_map *
  460 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
  461 {
  462         struct ubc_map *umap;
  463 
  464         LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
  465                 if (umap->uobj == uobj && umap->offset == offset) {
  466                         return umap;
  467                 }
  468         }
  469         return NULL;
  470 }
  471 
  472 
  473 /*
  474  * ubc interface functions
  475  */
  476 
  477 /*
  478  * ubc_alloc:  allocate a file mapping window
  479  */
  480 
  481 static void * __noinline
  482 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
  483     int flags, struct vm_page **pgs, int *npagesp)
  484 {
  485         vaddr_t slot_offset, va;
  486         struct ubc_map *umap;
  487         voff_t umap_offset;
  488         int error;
  489         UVMHIST_FUNC(__func__);
  490         UVMHIST_CALLARGS(ubchist, "uobj %#jx offset %#jx len %#jx",
  491             (uintptr_t)uobj, offset, *lenp, 0);
  492 
  493         KASSERT(*lenp > 0);
  494         umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
  495         slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
  496         *lenp = MIN(*lenp, ubc_winsize - slot_offset);
  497         KASSERT(*lenp > 0);
  498 
  499         rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
  500 again:
  501         /*
  502          * The UVM object is already referenced.
  503          * Lock order: UBC object -> ubc_map::uobj.
  504          */
  505         umap = ubc_find_mapping(uobj, umap_offset);
  506         if (umap == NULL) {
  507                 struct uvm_object *oobj;
  508 
  509                 UBC_EVCNT_INCR(wincachemiss);
  510                 umap = TAILQ_FIRST(UBC_QUEUE(offset));
  511                 if (umap == NULL) {
  512                         rw_exit(ubc_object.uobj.vmobjlock);
  513                         kpause("ubc_alloc", false, hz >> 2, NULL);
  514                         rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
  515                         goto again;
  516                 }
  517 
  518                 va = UBC_UMAP_ADDR(umap);
  519                 oobj = umap->uobj;
  520 
  521                 /*
  522                  * Remove from old hash (if any), add to new hash.
  523                  */
  524 
  525                 if (oobj != NULL) {
  526                         /*
  527                          * Mapping must be removed before the list entry,
  528                          * since there is a race with ubc_purge().
  529                          */
  530                         if (umap->flags & UMAP_MAPPING_CACHED) {
  531                                 umap->flags &= ~UMAP_MAPPING_CACHED;
  532                                 rw_enter(oobj->vmobjlock, RW_WRITER);
  533                                 pmap_remove(pmap_kernel(), va,
  534                                     va + ubc_winsize);
  535                                 pmap_update(pmap_kernel());
  536                                 rw_exit(oobj->vmobjlock);
  537                         }
  538                         LIST_REMOVE(umap, hash);
  539                         LIST_REMOVE(umap, list);
  540                 } else {
  541                         KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
  542                 }
  543                 umap->uobj = uobj;
  544                 umap->offset = umap_offset;
  545                 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
  546                     umap, hash);
  547                 LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
  548         } else {
  549                 UBC_EVCNT_INCR(wincachehit);
  550                 va = UBC_UMAP_ADDR(umap);
  551         }
  552 
  553         if (umap->refcount == 0) {
  554                 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
  555         }
  556 
  557         if (flags & UBC_WRITE) {
  558                 KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
  559                     "ubc_alloc: concurrent writes to uobj %p", uobj);
  560                 umap->writeoff = slot_offset;
  561                 umap->writelen = *lenp;
  562         }
  563 
  564         umap->refcount++;
  565         umap->advice = advice;
  566         rw_exit(ubc_object.uobj.vmobjlock);
  567         UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags %#jx",
  568             (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
  569 
  570         if (flags & UBC_FAULTBUSY) {
  571                 int npages = (*lenp + (offset & (PAGE_SIZE - 1)) +
  572                     PAGE_SIZE - 1) >> PAGE_SHIFT;
  573                 int gpflags =
  574                     PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
  575                     PGO_NOTIMESTAMP;
  576                 int i;
  577                 KDASSERT(flags & UBC_WRITE);
  578                 KASSERT(npages <= *npagesp);
  579                 KASSERT(umap->refcount == 1);
  580 
  581                 UBC_EVCNT_INCR(faultbusy);
  582 again_faultbusy:
  583                 rw_enter(uobj->vmobjlock, RW_WRITER);
  584                 if (umap->flags & UMAP_MAPPING_CACHED) {
  585                         umap->flags &= ~UMAP_MAPPING_CACHED;
  586                         pmap_remove(pmap_kernel(), va, va + ubc_winsize);
  587                 }
  588                 memset(pgs, 0, *npagesp * sizeof(pgs[0]));
  589 
  590                 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
  591                     &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
  592                 UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
  593                 if (error) {
  594                         /*
  595                          * Flush: the mapping above might have been removed.
  596                          */
  597                         pmap_update(pmap_kernel());
  598                         goto out;
  599                 }
  600                 for (i = 0; i < npages; i++) {
  601                         struct vm_page *pg = pgs[i];
  602 
  603                         KASSERT(pg->uobject == uobj);
  604                         if (pg->loan_count != 0) {
  605                                 rw_enter(uobj->vmobjlock, RW_WRITER);
  606                                 if (pg->loan_count != 0) {
  607                                         pg = uvm_loanbreak(pg);
  608                                 }
  609                                 if (pg == NULL) {
  610                                         pmap_kremove(va, ubc_winsize);
  611                                         pmap_update(pmap_kernel());
  612                                         uvm_page_unbusy(pgs, npages);
  613                                         rw_exit(uobj->vmobjlock);
  614                                         uvm_wait("ubc_alloc");
  615                                         goto again_faultbusy;
  616                                 }
  617                                 rw_exit(uobj->vmobjlock);
  618                                 pgs[i] = pg;
  619                         }
  620                         pmap_kenter_pa(
  621                             va + trunc_page(slot_offset) + (i << PAGE_SHIFT),
  622                             VM_PAGE_TO_PHYS(pg),
  623                             VM_PROT_READ | VM_PROT_WRITE, 0);
  624                 }
  625                 pmap_update(pmap_kernel());
  626                 umap->flags |= UMAP_PAGES_LOCKED;
  627                 *npagesp = npages;
  628         } else {
  629                 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
  630         }
  631 
  632 out:
  633         return (void *)(va + slot_offset);
  634 }
  635 
  636 /*
  637  * ubc_release:  free a file mapping window.
  638  */
  639 
  640 static void __noinline
  641 ubc_release(void *va, int flags, struct vm_page **pgs, int npages)
  642 {
  643         struct ubc_map *umap;
  644         struct uvm_object *uobj;
  645         vaddr_t umapva;
  646         bool unmapped;
  647         UVMHIST_FUNC(__func__);
  648         UVMHIST_CALLARGS(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
  649 
  650         umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
  651         umapva = UBC_UMAP_ADDR(umap);
  652         uobj = umap->uobj;
  653         KASSERT(uobj != NULL);
  654 
  655         if (umap->flags & UMAP_PAGES_LOCKED) {
  656                 const voff_t endoff = umap->writeoff + umap->writelen;
  657                 const voff_t zerolen = round_page(endoff) - endoff;
  658 
  659                 KASSERT(npages == (round_page(endoff) -
  660                     trunc_page(umap->writeoff)) >> PAGE_SHIFT);
  661                 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
  662                 if (zerolen) {
  663                         memset((char *)umapva + endoff, 0, zerolen);
  664                 }
  665                 umap->flags &= ~UMAP_PAGES_LOCKED;
  666                 rw_enter(uobj->vmobjlock, RW_WRITER);
  667                 for (u_int i = 0; i < npages; i++) {
  668                         struct vm_page *pg = pgs[i];
  669 #ifdef DIAGNOSTIC
  670                         paddr_t pa;
  671                         bool rv;
  672                         rv = pmap_extract(pmap_kernel(), umapva +
  673                             umap->writeoff + (i << PAGE_SHIFT), &pa);
  674                         KASSERT(rv);
  675                         KASSERT(PHYS_TO_VM_PAGE(pa) == pg);
  676 #endif
  677                         pg->flags &= ~PG_FAKE;
  678                         KASSERTMSG(uvm_pagegetdirty(pg) ==
  679                             UVM_PAGE_STATUS_DIRTY,
  680                             "page %p not dirty", pg);
  681                         KASSERT(pg->loan_count == 0);
  682                         if (uvmpdpol_pageactivate_p(pg)) {
  683                                 uvm_pagelock(pg);
  684                                 uvm_pageactivate(pg);
  685                                 uvm_pageunlock(pg);
  686                         }
  687                 }
  688                 pmap_kremove(umapva, ubc_winsize);
  689                 pmap_update(pmap_kernel());
  690                 uvm_page_unbusy(pgs, npages);
  691                 rw_exit(uobj->vmobjlock);
  692                 unmapped = true;
  693         } else {
  694                 unmapped = false;
  695         }
  696 
  697         rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
  698         umap->writeoff = 0;
  699         umap->writelen = 0;
  700         umap->refcount--;
  701         if (umap->refcount == 0) {
  702                 if (flags & UBC_UNMAP) {
  703                         /*
  704                          * Invalidate any cached mappings if requested.
  705                          * This is typically used to avoid leaving
  706                          * incompatible cache aliases around indefinitely.
  707                          */
  708                         rw_enter(uobj->vmobjlock, RW_WRITER);
  709                         pmap_remove(pmap_kernel(), umapva,
  710                                     umapva + ubc_winsize);
  711                         pmap_update(pmap_kernel());
  712                         rw_exit(uobj->vmobjlock);
  713 
  714                         umap->flags &= ~UMAP_MAPPING_CACHED;
  715                         LIST_REMOVE(umap, hash);
  716                         LIST_REMOVE(umap, list);
  717                         umap->uobj = NULL;
  718                         TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
  719                             inactive);
  720                 } else {
  721                         if (!unmapped) {
  722                                 umap->flags |= UMAP_MAPPING_CACHED;
  723                         }
  724                         TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
  725                             inactive);
  726                 }
  727         }
  728         UVMHIST_LOG(ubchist, "umap %#jx refs %jd", (uintptr_t)umap,
  729             umap->refcount, 0, 0);
  730         rw_exit(ubc_object.uobj.vmobjlock);
  731 }
  732 
  733 /*
  734  * ubc_uiomove: move data to/from an object.
  735  */
  736 
  737 int
  738 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
  739     int flags)
  740 {
  741         const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
  742         struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
  743         voff_t off;
  744         int error, npages;
  745 
  746         KASSERT(todo <= uio->uio_resid);
  747         KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
  748             ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
  749 
  750 #ifdef UBC_USE_PMAP_DIRECT
  751         /*
  752          * during direct access pages need to be held busy to prevent them
  753          * changing identity, and therefore if we read or write an object
  754          * into a mapped view of same we could deadlock while faulting.
  755          *
  756          * avoid the problem by disallowing direct access if the object
  757          * might be visible somewhere via mmap().
  758          *
  759          * XXX concurrent reads cause thundering herd issues with PG_BUSY.
  760          * In the future enable by default for writes or if ncpu<=2, and
  761          * make the toggle override that.
  762          */
  763         if ((ubc_direct && (flags & UBC_ISMAPPED) == 0) ||
  764             (flags & UBC_FAULTBUSY) != 0) {
  765                 return ubc_uiomove_direct(uobj, uio, todo, advice, flags);
  766         }
  767 #endif
  768 
  769         off = uio->uio_offset;
  770         error = 0;
  771         while (todo > 0) {
  772                 vsize_t bytelen = todo;
  773                 void *win;
  774 
  775                 npages = __arraycount(pgs);
  776                 win = ubc_alloc(uobj, off, &bytelen, advice, flags, pgs,
  777                     &npages);
  778                 if (error == 0) {
  779                         error = uiomove(win, bytelen, uio);
  780                 }
  781                 if (error != 0 && overwrite) {
  782                         /*
  783                          * if we haven't initialized the pages yet,
  784                          * do it now.  it's safe to use memset here
  785                          * because we just mapped the pages above.
  786                          */
  787                         memset(win, 0, bytelen);
  788                 }
  789                 ubc_release(win, flags, pgs, npages);
  790                 off += bytelen;
  791                 todo -= bytelen;
  792                 if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
  793                         break;
  794                 }
  795         }
  796 
  797         return error;
  798 }
  799 
  800 /*
  801  * ubc_zerorange: set a range of bytes in an object to zero.
  802  */
  803 
  804 void
  805 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
  806 {
  807         struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
  808         int npages;
  809 
  810 #ifdef UBC_USE_PMAP_DIRECT
  811         if (ubc_direct || (flags & UBC_FAULTBUSY) != 0) {
  812                 ubc_zerorange_direct(uobj, off, len, flags);
  813                 return;
  814         }
  815 #endif
  816 
  817         /*
  818          * XXXUBC invent kzero() and use it
  819          */
  820 
  821         while (len) {
  822                 void *win;
  823                 vsize_t bytelen = len;
  824 
  825                 npages = __arraycount(pgs);
  826                 win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE,
  827                     pgs, &npages);
  828                 memset(win, 0, bytelen);
  829                 ubc_release(win, flags, pgs, npages);
  830 
  831                 off += bytelen;
  832                 len -= bytelen;
  833         }
  834 }
  835 
  836 #ifdef UBC_USE_PMAP_DIRECT
  837 /* Copy data using direct map */
  838 
  839 /*
  840  * ubc_alloc_direct:  allocate a file mapping window using direct map
  841  */
  842 static int __noinline
  843 ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp,
  844     int advice, int flags, struct vm_page **pgs, int *npages)
  845 {
  846         voff_t pgoff;
  847         int error;
  848         int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO;
  849         int access_type = VM_PROT_READ;
  850         UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
  851 
  852         if (flags & UBC_WRITE) {
  853                 if (flags & UBC_FAULTBUSY)
  854                         gpflags |= PGO_OVERWRITE | PGO_NOBLOCKALLOC;
  855 #if 0
  856                 KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj));
  857 #endif
  858 
  859                 /*
  860                  * Tell genfs_getpages() we already have the journal lock,
  861                  * allow allocation past current EOF.
  862                  */
  863                 gpflags |= PGO_JOURNALLOCKED | PGO_PASTEOF;
  864                 access_type |= VM_PROT_WRITE;
  865         } else {
  866                 /* Don't need the empty blocks allocated, PG_RDONLY is okay */
  867                 gpflags |= PGO_NOBLOCKALLOC;
  868         }
  869 
  870         pgoff = (offset & PAGE_MASK);
  871         *lenp = MIN(*lenp, ubc_winsize - pgoff);
  872 
  873 again:
  874         *npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT;
  875         KASSERT((*npages * PAGE_SIZE) <= ubc_winsize);
  876         KASSERT(*lenp + pgoff <= ubc_winsize);
  877         memset(pgs, 0, *npages * sizeof(pgs[0]));
  878 
  879         rw_enter(uobj->vmobjlock, RW_WRITER);
  880         error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
  881             npages, 0, access_type, advice, gpflags);
  882         UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0);
  883         if (error) {
  884                 if (error == EAGAIN) {
  885                         kpause("ubc_alloc_directg", false, hz >> 2, NULL);
  886                         goto again;
  887                 }
  888                 return error;
  889         }
  890 
  891         rw_enter(uobj->vmobjlock, RW_WRITER);
  892         for (int i = 0; i < *npages; i++) {
  893                 struct vm_page *pg = pgs[i];
  894 
  895                 KASSERT(pg != NULL);
  896                 KASSERT(pg != PGO_DONTCARE);
  897                 KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE));
  898                 KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock);
  899 
  900                 /* Avoid breaking loan if possible, only do it on write */
  901                 if ((flags & UBC_WRITE) && pg->loan_count != 0) {
  902                         pg = uvm_loanbreak(pg);
  903                         if (pg == NULL) {
  904                                 uvm_page_unbusy(pgs, *npages);
  905                                 rw_exit(uobj->vmobjlock);
  906                                 uvm_wait("ubc_alloc_directl");
  907                                 goto again;
  908                         }
  909                         pgs[i] = pg;
  910                 }
  911 
  912                 /* Page must be writable by now */
  913                 KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0);
  914 
  915                 /*
  916                  * XXX For aobj pages.  No managed mapping - mark the page
  917                  * dirty.
  918                  */
  919                 if ((flags & UBC_WRITE) != 0) {
  920                         uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
  921                 }
  922         }
  923         rw_exit(uobj->vmobjlock);
  924 
  925         return 0;
  926 }
  927 
  928 static void __noinline
  929 ubc_direct_release(struct uvm_object *uobj,
  930         int flags, struct vm_page **pgs, int npages)
  931 {
  932         rw_enter(uobj->vmobjlock, RW_WRITER);
  933         for (int i = 0; i < npages; i++) {
  934                 struct vm_page *pg = pgs[i];
  935 
  936                 pg->flags &= ~PG_BUSY;
  937                 UVM_PAGE_OWN(pg, NULL);
  938                 if (pg->flags & PG_RELEASED) {
  939                         pg->flags &= ~PG_RELEASED;
  940                         uvm_pagefree(pg);
  941                         continue;
  942                 }
  943 
  944                 if (uvm_pagewanted_p(pg) || uvmpdpol_pageactivate_p(pg)) {
  945                         uvm_pagelock(pg);
  946                         uvm_pageactivate(pg);
  947                         uvm_pagewakeup(pg);
  948                         uvm_pageunlock(pg);
  949                 }
  950 
  951                 /* Page was changed, no longer fake and neither clean. */
  952                 if (flags & UBC_WRITE) {
  953                         KASSERTMSG(uvm_pagegetdirty(pg) ==
  954                             UVM_PAGE_STATUS_DIRTY,
  955                             "page %p not dirty", pg);
  956                         pg->flags &= ~PG_FAKE;
  957                 }
  958         }
  959         rw_exit(uobj->vmobjlock);
  960 }
  961 
  962 static int
  963 ubc_uiomove_process(void *win, size_t len, void *arg)
  964 {
  965         struct uio *uio = (struct uio *)arg;
  966 
  967         return uiomove(win, len, uio);
  968 }
  969 
  970 static int
  971 ubc_zerorange_process(void *win, size_t len, void *arg)
  972 {
  973         memset(win, 0, len);
  974         return 0;
  975 }
  976 
  977 static int __noinline
  978 ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
  979     int flags)
  980 {
  981         const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
  982         voff_t off;
  983         int error, npages;
  984         struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
  985 
  986         KASSERT(todo <= uio->uio_resid);
  987         KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
  988             ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
  989 
  990         off = uio->uio_offset;
  991         error = 0;
  992         while (todo > 0) {
  993                 vsize_t bytelen = todo;
  994 
  995                 error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags,
  996                     pgs, &npages);
  997                 if (error != 0) {
  998                         /* can't do anything, failed to get the pages */
  999                         break;
 1000                 }
 1001 
 1002                 if (error == 0) {
 1003                         error = uvm_direct_process(pgs, npages, off, bytelen,
 1004                             ubc_uiomove_process, uio);
 1005                 }
 1006 
 1007                 if (overwrite) {
 1008                         voff_t endoff;
 1009 
 1010                         /*
 1011                          * if we haven't initialized the pages yet due to an
 1012                          * error above, do it now.
 1013                          */
 1014                         if (error != 0) {
 1015                                 (void) uvm_direct_process(pgs, npages, off,
 1016                                     bytelen, ubc_zerorange_process, NULL);
 1017                         }
 1018 
 1019                         off += bytelen;
 1020                         todo -= bytelen;
 1021                         endoff = off & (PAGE_SIZE - 1);
 1022 
 1023                         /*
 1024                          * zero out the remaining portion of the final page
 1025                          * (if any).
 1026                          */
 1027                         if (todo == 0 && endoff != 0) {
 1028                                 vsize_t zlen = PAGE_SIZE - endoff;
 1029                                 (void) uvm_direct_process(pgs + npages - 1, 1,
 1030                                     off, zlen, ubc_zerorange_process, NULL);
 1031                         }
 1032                 } else {
 1033                         off += bytelen;
 1034                         todo -= bytelen;
 1035                 }
 1036 
 1037                 ubc_direct_release(uobj, flags, pgs, npages);
 1038 
 1039                 if (error != 0 && ISSET(flags, UBC_PARTIALOK)) {
 1040                         break;
 1041                 }
 1042         }
 1043 
 1044         return error;
 1045 }
 1046 
 1047 static void __noinline
 1048 ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags)
 1049 {
 1050         int error, npages;
 1051         struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
 1052 
 1053         flags |= UBC_WRITE;
 1054 
 1055         error = 0;
 1056         while (todo > 0) {
 1057                 vsize_t bytelen = todo;
 1058 
 1059                 error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL,
 1060                     flags, pgs, &npages);
 1061                 if (error != 0) {
 1062                         /* can't do anything, failed to get the pages */
 1063                         break;
 1064                 }
 1065 
 1066                 error = uvm_direct_process(pgs, npages, off, bytelen,
 1067                     ubc_zerorange_process, NULL);
 1068 
 1069                 ubc_direct_release(uobj, flags, pgs, npages);
 1070 
 1071                 off += bytelen;
 1072                 todo -= bytelen;
 1073         }
 1074 }
 1075 
 1076 #endif /* UBC_USE_PMAP_DIRECT */
 1077 
 1078 /*
 1079  * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
 1080  */
 1081 
 1082 void
 1083 ubc_purge(struct uvm_object *uobj)
 1084 {
 1085         struct ubc_map *umap;
 1086         vaddr_t va;
 1087 
 1088         KASSERT(uobj->uo_npages == 0);
 1089 
 1090         /*
 1091          * Safe to check without lock held, as ubc_alloc() removes
 1092          * the mapping and list entry in the correct order.
 1093          */
 1094         if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
 1095                 return;
 1096         }
 1097         rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
 1098         while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
 1099                 KASSERT(umap->refcount == 0);
 1100                 for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
 1101                         KASSERT(!pmap_extract(pmap_kernel(),
 1102                             va + UBC_UMAP_ADDR(umap), NULL));
 1103                 }
 1104                 LIST_REMOVE(umap, list);
 1105                 LIST_REMOVE(umap, hash);
 1106                 umap->flags &= ~UMAP_MAPPING_CACHED;
 1107                 umap->uobj = NULL;
 1108         }
 1109         rw_exit(ubc_object.uobj.vmobjlock);
 1110 }
 1111 
 1112 static int
 1113 ubchash_stats(struct hashstat_sysctl *hs, bool fill)
 1114 {
 1115         struct ubc_map *umap;
 1116         uint64_t chain;
 1117 
 1118         strlcpy(hs->hash_name, "ubchash", sizeof(hs->hash_name));
 1119         strlcpy(hs->hash_desc, "ubc object hash", sizeof(hs->hash_desc));
 1120         if (!fill)
 1121                 return 0;
 1122 
 1123         hs->hash_size = ubc_object.hashmask + 1;
 1124 
 1125         for (size_t i = 0; i < hs->hash_size; i++) {
 1126                 chain = 0;
 1127                 rw_enter(ubc_object.uobj.vmobjlock, RW_READER);
 1128                 LIST_FOREACH(umap, &ubc_object.hash[i], hash) {
 1129                         chain++;
 1130                 }
 1131                 rw_exit(ubc_object.uobj.vmobjlock);
 1132                 if (chain > 0) {
 1133                         hs->hash_used++;
 1134                         hs->hash_items += chain;
 1135                         if (chain > hs->hash_maxchain)
 1136                                 hs->hash_maxchain = chain;
 1137                 }
 1138                 preempt_point();
 1139         }
 1140 
 1141         return 0;
 1142 }

Cache object: 93f561df632b7b798df23277d37db71d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.