The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/bsd/kern/ubc_subr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /* 
   26  *      File:   ubc_subr.c
   27  *      Author: Umesh Vaishampayan [umeshv@apple.com]
   28  *              05-Aug-1999     umeshv  Created.
   29  *
   30  *      Functions related to Unified Buffer cache.
   31  *
   32  * Caller of UBC functions MUST have a valid reference on the vnode.
   33  *
   34  */ 
   35 
   36 #undef DIAGNOSTIC
   37 #define DIAGNOSTIC 1
   38 
   39 #include <sys/types.h>
   40 #include <sys/param.h>
   41 #include <sys/systm.h>
   42 #include <sys/lock.h>
   43 #include <sys/ubc.h>
   44 #include <sys/mount.h>
   45 #include <sys/vnode.h>
   46 #include <sys/ubc.h>
   47 #include <sys/ucred.h>
   48 #include <sys/proc.h>
   49 #include <sys/buf.h>
   50 
   51 #include <mach/mach_types.h>
   52 #include <mach/memory_object_types.h>
   53 
   54 #include <kern/zalloc.h>
   55 
   56 #if DIAGNOSTIC
   57 #if defined(assert)
   58 #undef assert()
   59 #endif
   60 #define assert(cond)    \
   61     ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
   62 #else
   63 #include <kern/assert.h>
   64 #endif /* DIAGNOSTIC */
   65 
   66 struct zone     *ubc_info_zone;
   67 
   68 /* lock for changes to struct UBC */
   69 static __inline__ void
   70 ubc_lock(struct vnode *vp)
   71 {
   72         /* For now, just use the v_interlock */
   73         simple_lock(&vp->v_interlock);
   74 }
   75 
   76 /* unlock */
   77 static __inline__ void
   78 ubc_unlock(struct vnode *vp)
   79 {
   80         /* For now, just use the v_interlock */
   81         simple_unlock(&vp->v_interlock);
   82 }
   83 
   84 /*
   85  * Serialize the requests to the VM
   86  * Returns:
   87  *              0       -       Failure
   88  *              1       -       Sucessful in acquiring the lock
   89  *              2       -       Sucessful in acquiring the lock recursively
   90  *                              do not call ubc_unbusy()
   91  *                              [This is strange, but saves 4 bytes in struct ubc_info]
   92  */
   93 static int
   94 ubc_busy(struct vnode *vp)
   95 {
   96         register struct ubc_info        *uip;
   97 
   98         if (!UBCINFOEXISTS(vp))
   99                 return (0);
  100 
  101         uip = vp->v_ubcinfo;
  102 
  103         while (ISSET(uip->ui_flags, UI_BUSY)) {
  104 
  105                 if (uip->ui_owner == (void *)current_act())
  106                         return (2);
  107 
  108                 SET(uip->ui_flags, UI_WANTED);
  109                 (void) tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "ubcbusy", 0);
  110 
  111                 if (!UBCINFOEXISTS(vp))
  112                         return (0);
  113         }
  114         uip->ui_owner = (void *)current_act();
  115 
  116         SET(uip->ui_flags, UI_BUSY);
  117 
  118         return (1);
  119 }
  120 
  121 static void
  122 ubc_unbusy(struct vnode *vp)
  123 {
  124         register struct ubc_info        *uip;
  125 
  126         if (!UBCINFOEXISTS(vp)) {
  127                 wakeup((caddr_t)&vp->v_ubcinfo);
  128                 return;
  129         }
  130         uip = vp->v_ubcinfo;
  131         CLR(uip->ui_flags, UI_BUSY);
  132         uip->ui_owner = (void *)NULL;
  133 
  134         if (ISSET(uip->ui_flags, UI_WANTED)) {
  135                 CLR(uip->ui_flags, UI_WANTED);
  136                 wakeup((caddr_t)&vp->v_ubcinfo);
  137         }
  138 }
  139 
  140 /*
  141  *      Initialization of the zone for Unified Buffer Cache.
  142  */
  143 __private_extern__ void
  144 ubc_init()
  145 {
  146         int     i;
  147 
  148         i = (vm_size_t) sizeof (struct ubc_info);
  149         /* XXX  the number of elements should be tied in to maxvnodes */
  150         ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
  151         return;
  152 }
  153 
  154 /*
  155  *      Initialize a ubc_info structure for a vnode.
  156  */
  157 int
  158 ubc_info_init(struct vnode *vp)
  159 {
  160         register struct ubc_info        *uip;
  161         void *  pager;
  162         struct vattr    vattr;
  163         struct proc *p = current_proc();
  164         int error = 0;
  165         kern_return_t kret;
  166         memory_object_control_t control;
  167 
  168         if (!UBCISVALID(vp))
  169                 return (EINVAL);
  170 
  171         ubc_lock(vp);
  172         if (ISSET(vp->v_flag,  VUINIT)) {
  173                 /*
  174                  * other thread is already doing this
  175                  * wait till done
  176                  */
  177                 while (ISSET(vp->v_flag,  VUINIT)) {
  178                         SET(vp->v_flag, VUWANT); /* XXX overloaded! */
  179                         ubc_unlock(vp);
  180                         (void) tsleep((caddr_t)vp, PINOD, "ubcinfo", 0);
  181                         ubc_lock(vp);
  182                 }
  183                 ubc_unlock(vp);
  184                 return (0);
  185         } else {
  186                 SET(vp->v_flag, VUINIT);
  187         }
  188 
  189         uip = vp->v_ubcinfo;
  190         if ((uip == UBC_INFO_NULL) || (uip == UBC_NOINFO)) {
  191                 ubc_unlock(vp);
  192                 uip = (struct ubc_info *) zalloc(ubc_info_zone);
  193                 uip->ui_pager = MEMORY_OBJECT_NULL;
  194                 uip->ui_control = MEMORY_OBJECT_CONTROL_NULL;
  195                 uip->ui_flags = UI_INITED;
  196                 uip->ui_vnode = vp;
  197                 uip->ui_ucred = NOCRED;
  198                 uip->ui_refcount = 1;
  199                 uip->ui_size = 0;
  200                 uip->ui_mapped = 0;
  201                 uip->ui_owner = (void *)NULL;
  202                 ubc_lock(vp);
  203         }
  204 #if DIAGNOSTIC
  205         else
  206                 Debugger("ubc_info_init: already");
  207 #endif /* DIAGNOSTIC */
  208         
  209         assert(uip->ui_flags != UI_NONE);
  210         assert(uip->ui_vnode == vp);
  211 
  212 #if 0
  213         if(ISSET(uip->ui_flags, UI_HASPAGER))
  214                 goto done;
  215 #endif /* 0 */
  216 
  217         /* now set this ubc_info in the vnode */
  218         vp->v_ubcinfo = uip;
  219         SET(uip->ui_flags, UI_HASPAGER);
  220         ubc_unlock(vp);
  221         pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
  222         assert(pager);
  223         ubc_setpager(vp, pager);
  224 
  225         /*
  226          * Note: We can not use VOP_GETATTR() to get accurate
  227          * value of ui_size. Thanks to NFS.
  228          * nfs_getattr() can call vinvalbuf() and in this case
  229          * ubc_info is not set up to deal with that.
  230          * So use bogus size.
  231          */
  232 
  233         /*
  234          * create a vnode - vm_object association
  235          * memory_object_create_named() creates a "named" reference on the
  236          * memory object we hold this reference as long as the vnode is
  237          * "alive."  Since memory_object_create_named() took its own reference
  238          * on the vnode pager we passed it, we can drop the reference
  239          * vnode_pager_setup() returned here.
  240          */
  241         kret = memory_object_create_named(pager,
  242                 (memory_object_size_t)uip->ui_size, &control);
  243         vnode_pager_deallocate(pager); 
  244         if (kret != KERN_SUCCESS)
  245                 panic("ubc_info_init: memory_object_create_named returned %d", kret);
  246 
  247         assert(control);
  248         uip->ui_control = control;      /* cache the value of the mo control */
  249         SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
  250         /* create a pager reference on the vnode */
  251         error = vnode_pager_vget(vp);
  252         if (error)
  253                 panic("ubc_info_init: vnode_pager_vget error = %d", error);
  254 
  255         /* initialize the size */
  256         error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
  257 
  258         ubc_lock(vp);
  259         uip->ui_size = (error ? 0: vattr.va_size);
  260 
  261 done:
  262         CLR(vp->v_flag, VUINIT);
  263         if (ISSET(vp->v_flag, VUWANT)) {
  264                 CLR(vp->v_flag, VUWANT);
  265                 ubc_unlock(vp);
  266                 wakeup((caddr_t)vp);
  267         } else 
  268                 ubc_unlock(vp);
  269 
  270         return (error);
  271 }
  272 
  273 /* Free the ubc_info */
  274 static void
  275 ubc_info_free(struct ubc_info *uip)
  276 {
  277         struct ucred *credp;
  278         
  279         credp = uip->ui_ucred;
  280         if (credp != NOCRED) {
  281                 uip->ui_ucred = NOCRED;
  282                 crfree(credp);
  283         }
  284 
  285         if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
  286                 memory_object_control_deallocate(uip->ui_control);
  287 
  288         zfree(ubc_info_zone, (vm_offset_t)uip);
  289         return;
  290 }
  291 
  292 void
  293 ubc_info_deallocate(struct ubc_info *uip)
  294 {
  295 
  296         assert(uip->ui_refcount > 0);
  297 
  298     if (uip->ui_refcount-- == 1) {
  299                 struct vnode *vp;
  300 
  301                 vp = uip->ui_vnode;
  302                 if (ISSET(uip->ui_flags, UI_WANTED)) {
  303                         CLR(uip->ui_flags, UI_WANTED);
  304                         wakeup((caddr_t)&vp->v_ubcinfo);
  305                 }
  306 
  307                 ubc_info_free(uip);
  308         }
  309 }
  310 
  311 /*
  312  * Communicate with VM the size change of the file
  313  * returns 1 on success, 0 on failure
  314  */
  315 int
  316 ubc_setsize(struct vnode *vp, off_t nsize)
  317 {
  318         off_t osize;    /* ui_size before change */
  319         off_t lastpg, olastpgend, lastoff;
  320         struct ubc_info *uip;
  321         memory_object_control_t control;
  322         kern_return_t kret;
  323 
  324         if (nsize < (off_t)0)
  325                 return (0);
  326 
  327         if (UBCINVALID(vp))
  328                 return (0);
  329 
  330         if (!UBCINFOEXISTS(vp))
  331                 return (0);
  332 
  333         uip = vp->v_ubcinfo;
  334         osize = uip->ui_size;   /* call ubc_getsize() ??? */
  335         /* Update the size before flushing the VM */
  336         uip->ui_size = nsize;
  337 
  338         if (nsize >= osize)     /* Nothing more to do */
  339                 return (1);             /* return success */
  340 
  341         /*
  342          * When the file shrinks, invalidate the pages beyond the
  343          * new size. Also get rid of garbage beyond nsize on the
  344          * last page. The ui_size already has the nsize. This
  345          * insures that the pageout would not write beyond the new
  346          * end of the file.
  347          */
  348 
  349         lastpg = trunc_page_64(nsize);
  350         olastpgend = round_page_64(osize);
  351         control = uip->ui_control;
  352         assert(control);
  353         lastoff = (nsize & PAGE_MASK_64);
  354 
  355         /*
  356          * If length is multiple of page size, we should not flush
  357          * invalidating is sufficient
  358          */
  359          if (!lastoff) {
  360         /* invalidate last page and old contents beyond nsize */
  361         kret = memory_object_lock_request(control,
  362                     (memory_object_offset_t)lastpg,
  363                     (memory_object_size_t)(olastpgend - lastpg),
  364                     MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
  365                     VM_PROT_NO_CHANGE);
  366         if (kret != KERN_SUCCESS)
  367             printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
  368 
  369                 return ((kret == KERN_SUCCESS) ? 1 : 0);
  370          }
  371 
  372         /* flush the last page */
  373         kret = memory_object_lock_request(control,
  374                                 (memory_object_offset_t)lastpg,
  375                                 PAGE_SIZE_64,
  376                                 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
  377                                 VM_PROT_NO_CHANGE);
  378 
  379         if (kret == KERN_SUCCESS) {
  380                 /* invalidate last page and old contents beyond nsize */
  381                 kret = memory_object_lock_request(control,
  382                                         (memory_object_offset_t)lastpg,
  383                                         (memory_object_size_t)(olastpgend - lastpg),
  384                                         MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
  385                                         VM_PROT_NO_CHANGE);
  386                 if (kret != KERN_SUCCESS)
  387                         printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
  388         } else
  389                 printf("ubc_setsize: flush failed (error = %d)\n", kret);
  390 
  391         return ((kret == KERN_SUCCESS) ? 1 : 0);
  392 }
  393 
  394 /*
  395  * Get the size of the file
  396  */
  397 off_t
  398 ubc_getsize(struct vnode *vp)
  399 {
  400         return (vp->v_ubcinfo->ui_size);
  401 }
  402 
  403 /*
  404  * Caller indicate that the object corresponding to the vnode 
  405  * can not be cached in object cache. Make it so.
  406  * returns 1 on success, 0 on failure
  407  */
  408 int
  409 ubc_uncache(struct vnode *vp)
  410 {
  411         kern_return_t kret;
  412         struct ubc_info *uip;
  413         int    recursed;
  414         memory_object_control_t control;
  415         memory_object_perf_info_data_t   perf;
  416 
  417         if (!UBCINFOEXISTS(vp))
  418                 return (0);
  419 
  420         if ((recursed = ubc_busy(vp)) == 0)
  421                 return (0);
  422 
  423         uip = vp->v_ubcinfo;
  424 
  425         assert(uip != UBC_INFO_NULL);
  426 
  427         /*
  428          * AGE it so that vfree() can make sure that it
  429          * would get recycled soon after the last reference is gone
  430          * This will insure that .nfs turds would not linger
  431          */
  432         vagevp(vp);
  433 
  434         /* set the "do not cache" bit */
  435         SET(uip->ui_flags, UI_DONTCACHE);
  436 
  437         control = uip->ui_control;
  438         assert(control);
  439 
  440         perf.cluster_size = PAGE_SIZE; /* XXX use real cluster_size. */
  441         perf.may_cache = FALSE;
  442         kret = memory_object_change_attributes(control,
  443                                 MEMORY_OBJECT_PERFORMANCE_INFO,
  444                                 (memory_object_info_t) &perf,
  445                                 MEMORY_OBJECT_PERF_INFO_COUNT);
  446 
  447         if (kret != KERN_SUCCESS) {
  448                 printf("ubc_uncache: memory_object_change_attributes_named "
  449                         "kret = %d", kret);
  450                 if (recursed == 1)
  451                         ubc_unbusy(vp);
  452                 return (0);
  453         }
  454 
  455         ubc_release_named(vp);
  456 
  457         if (recursed == 1)
  458                 ubc_unbusy(vp);
  459         return (1);
  460 }
  461 
  462 /*
  463  * call ubc_clean() and ubc_uncache() on all the vnodes
  464  * for this mount point.
  465  * returns 1 on success, 0 on failure
  466  */
  467 __private_extern__ int
  468 ubc_umount(struct mount *mp)
  469 {
  470         struct proc *p = current_proc();
  471         struct vnode *vp, *nvp;
  472         int ret = 1;
  473 
  474 loop:
  475         simple_lock(&mntvnode_slock);
  476         for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
  477                 if (vp->v_mount != mp) {
  478                         simple_unlock(&mntvnode_slock);
  479                         goto loop;
  480                 }
  481                 nvp = vp->v_mntvnodes.le_next;
  482                 simple_unlock(&mntvnode_slock);
  483                 if (UBCINFOEXISTS(vp)) {
  484 
  485                         /*
  486                          * Must get a valid reference on the vnode
  487                          * before callig UBC functions
  488                          */
  489                         if (vget(vp, 0, p)) {
  490                                 ret = 0;
  491                                 simple_lock(&mntvnode_slock);
  492                                 continue; /* move on to the next vnode */
  493                         }
  494                         ret &= ubc_clean(vp, 0); /* do not invalidate */
  495                         ret &= ubc_uncache(vp);
  496                         vrele(vp);
  497                 }
  498                 simple_lock(&mntvnode_slock);
  499         }
  500         simple_unlock(&mntvnode_slock);
  501         return (ret);
  502 }
  503 
  504 /*
  505  * Call ubc_unmount() for all filesystems.
  506  * The list is traversed in reverse order
  507  * of mounting to avoid dependencies.
  508  */
  509 __private_extern__ void
  510 ubc_unmountall()
  511 {
  512         struct mount *mp, *nmp;
  513 
  514         /*
  515          * Since this only runs when rebooting, it is not interlocked.
  516          */
  517         for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
  518                 nmp = mp->mnt_list.cqe_prev;
  519                 (void) ubc_umount(mp);
  520         }
  521 }
  522 
  523 /* Get the credentials */
  524 struct ucred *
  525 ubc_getcred(struct vnode *vp)
  526 {
  527         struct ubc_info *uip;
  528 
  529         uip = vp->v_ubcinfo;
  530 
  531         if (UBCINVALID(vp))
  532                 return (NOCRED);
  533 
  534         return (uip->ui_ucred);
  535 }
  536 
  537 /*
  538  * Set the credentials
  539  * existing credentials are not changed
  540  * returns 1 on success and 0 on failure
  541  */
  542 int
  543 ubc_setcred(struct vnode *vp, struct proc *p)
  544 {
  545         struct ubc_info *uip;
  546         struct ucred *credp;
  547 
  548         uip = vp->v_ubcinfo;
  549 
  550         if (UBCINVALID(vp))
  551                 return (0); 
  552 
  553         credp = uip->ui_ucred;
  554         if (credp == NOCRED) {
  555                 crhold(p->p_ucred);
  556                 uip->ui_ucred = p->p_ucred;
  557         } 
  558 
  559         return (1);
  560 }
  561 
  562 /* Get the pager */
  563 __private_extern__ memory_object_t
  564 ubc_getpager(struct vnode *vp)
  565 {
  566         struct ubc_info *uip;
  567 
  568         uip = vp->v_ubcinfo;
  569 
  570         if (UBCINVALID(vp))
  571                 return (0);
  572 
  573         return (uip->ui_pager);
  574 }
  575 
  576 /*
  577  * Get the memory object associated with this vnode
  578  * If the vnode was reactivated, memory object would not exist.
  579  * Unless "do not rectivate" was specified, look it up using the pager.
  580  * If hold was requested create an object reference of one does not
  581  * exist already.
  582  */
  583 
  584 memory_object_control_t
  585 ubc_getobject(struct vnode *vp, int flags)
  586 {
  587         struct ubc_info *uip;
  588         int    recursed;
  589         memory_object_control_t control;
  590 
  591         if (UBCINVALID(vp))
  592                 return (0);
  593 
  594         if (flags & UBC_FOR_PAGEOUT)
  595                 return(vp->v_ubcinfo->ui_control);
  596 
  597         if ((recursed = ubc_busy(vp)) == 0)
  598                 return (0);
  599 
  600         uip = vp->v_ubcinfo;
  601         control = uip->ui_control;
  602 
  603         if ((flags & UBC_HOLDOBJECT) && (!ISSET(uip->ui_flags, UI_HASOBJREF))) {
  604 
  605                 /*
  606                  * Take a temporary reference on the ubc info so that it won't go
  607                  * away during our recovery attempt.
  608                  */
  609                 ubc_lock(vp);
  610                 uip->ui_refcount++;
  611                 ubc_unlock(vp);
  612                 if (memory_object_recover_named(control, TRUE) == KERN_SUCCESS) {
  613                         SET(uip->ui_flags, UI_HASOBJREF);
  614                 } else {
  615                         control = MEMORY_OBJECT_CONTROL_NULL;
  616                 }
  617                 if (recursed == 1)
  618                         ubc_unbusy(vp);
  619                 ubc_info_deallocate(uip);
  620 
  621         } else {
  622                 if (recursed == 1)
  623                         ubc_unbusy(vp);
  624         }
  625 
  626         return (control);
  627 }
  628 
  629 /* Set the pager */
  630 int
  631 ubc_setpager(struct vnode *vp, memory_object_t pager)
  632 {
  633         struct ubc_info *uip;
  634 
  635         uip = vp->v_ubcinfo;
  636 
  637         if (UBCINVALID(vp))
  638                 return (0);
  639 
  640         uip->ui_pager = pager;
  641         return (1);
  642 }
  643 
  644 int 
  645 ubc_setflags(struct vnode * vp, int  flags)
  646 {
  647         struct ubc_info *uip;
  648 
  649         if (UBCINVALID(vp))
  650                 return (0);
  651 
  652         uip = vp->v_ubcinfo;
  653 
  654         SET(uip->ui_flags, flags);
  655 
  656         return (1);     
  657 } 
  658 
  659 int 
  660 ubc_clearflags(struct vnode * vp, int  flags)
  661 {
  662         struct ubc_info *uip;
  663 
  664         if (UBCINVALID(vp))
  665                 return (0);
  666 
  667         uip = vp->v_ubcinfo;
  668 
  669         CLR(uip->ui_flags, flags);
  670 
  671         return (1);     
  672 } 
  673 
  674 
  675 int 
  676 ubc_issetflags(struct vnode * vp, int  flags)
  677 {
  678         struct ubc_info *uip;
  679 
  680         if (UBCINVALID(vp))
  681                 return (0);
  682 
  683         uip = vp->v_ubcinfo;
  684 
  685         return (ISSET(uip->ui_flags, flags));
  686 } 
  687 
  688 off_t
  689 ubc_blktooff(struct vnode *vp, daddr_t blkno)
  690 {
  691         off_t file_offset;
  692         int error;
  693 
  694     if (UBCINVALID(vp))
  695         return ((off_t)-1);
  696 
  697         error = VOP_BLKTOOFF(vp, blkno, &file_offset);
  698         if (error)
  699                 file_offset = -1;
  700 
  701         return (file_offset);
  702 }
  703 
  704 daddr_t
  705 ubc_offtoblk(struct vnode *vp, off_t offset)
  706 {
  707         daddr_t blkno;
  708         int error = 0;
  709 
  710     if (UBCINVALID(vp)) { 
  711         return ((daddr_t)-1);
  712     }   
  713 
  714         error = VOP_OFFTOBLK(vp, offset, &blkno);
  715         if (error)
  716                 blkno = -1;
  717 
  718         return (blkno);
  719 }
  720 
  721 /*
  722  * Cause the file data in VM to be pushed out to the storage
  723  * it also causes all currently valid pages to be released
  724  * returns 1 on success, 0 on failure
  725  */
  726 int
  727 ubc_clean(struct vnode *vp, int invalidate)
  728 {
  729         off_t size;
  730         struct ubc_info *uip;
  731         memory_object_control_t control;
  732         kern_return_t kret;
  733         int flags = 0;
  734 
  735         if (UBCINVALID(vp))
  736                 return (0);
  737 
  738         if (!UBCINFOEXISTS(vp))
  739                 return (0);
  740 
  741         /*
  742          * if invalidate was requested, write dirty data and then discard
  743          * the resident pages
  744          */
  745         if (invalidate)
  746                 flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
  747 
  748         uip = vp->v_ubcinfo;
  749         size = uip->ui_size;    /* call ubc_getsize() ??? */
  750 
  751         control = uip->ui_control;
  752         assert(control);
  753 
  754         cluster_release(vp);
  755         vp->v_clen = 0;
  756 
  757         /* Write the dirty data in the file and discard cached pages */
  758         kret = memory_object_lock_request(control,
  759                                 (memory_object_offset_t)0,
  760                                 (memory_object_size_t)round_page_64(size),
  761                                 MEMORY_OBJECT_RETURN_ALL, flags,
  762                                 VM_PROT_NO_CHANGE);
  763 
  764         if (kret != KERN_SUCCESS)
  765                 printf("ubc_clean: clean failed (error = %d)\n", kret);
  766 
  767         return ((kret == KERN_SUCCESS) ? 1 : 0);
  768 }
  769 
  770 /*
  771  * Cause the file data in VM to be pushed out to the storage
  772  * currently valid pages are NOT invalidated
  773  * returns 1 on success, 0 on failure
  774  */
  775 int
  776 ubc_pushdirty(struct vnode *vp)
  777 {
  778         off_t size;
  779         struct ubc_info *uip;
  780         memory_object_control_t control;
  781         kern_return_t kret;
  782 
  783         if (UBCINVALID(vp))
  784                 return (0);
  785 
  786         if (!UBCINFOEXISTS(vp))
  787                 return (0);
  788 
  789         uip = vp->v_ubcinfo;
  790         size = uip->ui_size;    /* call ubc_getsize() ??? */
  791 
  792         control = uip->ui_control;
  793         assert(control);
  794 
  795         vp->v_flag &= ~VHASDIRTY;
  796         vp->v_clen = 0;
  797 
  798         /* Write the dirty data in the file and discard cached pages */
  799         kret = memory_object_lock_request(control,
  800                                 (memory_object_offset_t)0,
  801                                 (memory_object_size_t)round_page_64(size),
  802                                 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
  803                                 VM_PROT_NO_CHANGE);
  804 
  805         if (kret != KERN_SUCCESS)
  806                 printf("ubc_pushdirty: flush failed (error = %d)\n", kret);
  807 
  808         return ((kret == KERN_SUCCESS) ? 1 : 0);
  809 }
  810 
  811 /*
  812  * Cause the file data in VM to be pushed out to the storage
  813  * currently valid pages are NOT invalidated
  814  * returns 1 on success, 0 on failure
  815  */
  816 int
  817 ubc_pushdirty_range(struct vnode *vp, off_t offset, off_t size)
  818 {
  819         struct ubc_info *uip;
  820         memory_object_control_t control;
  821         kern_return_t kret;
  822 
  823         if (UBCINVALID(vp))
  824                 return (0);
  825 
  826         if (!UBCINFOEXISTS(vp))
  827                 return (0);
  828 
  829         uip = vp->v_ubcinfo;
  830 
  831         control = uip->ui_control;
  832         assert(control);
  833 
  834         /* Write any dirty pages in the requested range of the file: */
  835         kret = memory_object_lock_request(control,
  836                                 (memory_object_offset_t)offset,
  837                                 (memory_object_size_t)round_page_64(size),
  838                                 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
  839                                 VM_PROT_NO_CHANGE);
  840 
  841         if (kret != KERN_SUCCESS)
  842                 printf("ubc_pushdirty_range: flush failed (error = %d)\n", kret);
  843 
  844         return ((kret == KERN_SUCCESS) ? 1 : 0);
  845 }
  846 
  847 /*
  848  * Make sure the vm object does not vanish 
  849  * returns 1 if the hold count was incremented
  850  * returns 0 if the hold count was not incremented
  851  * This return value should be used to balance 
  852  * ubc_hold() and ubc_rele().
  853  */
  854 int
  855 ubc_hold(struct vnode *vp)
  856 {
  857         struct ubc_info *uip;
  858         int    recursed;
  859         memory_object_control_t object;
  860 
  861 retry:
  862 
  863         if (UBCINVALID(vp))
  864                 return (0);
  865 
  866         ubc_lock(vp);
  867         if (ISSET(vp->v_flag,  VUINIT)) {
  868                 /*
  869                  * other thread is not done initializing this
  870                  * yet, wait till it's done and try again
  871                  */
  872                 while (ISSET(vp->v_flag,  VUINIT)) {
  873                         SET(vp->v_flag, VUWANT); /* XXX overloaded! */
  874                         ubc_unlock(vp);
  875                         (void) tsleep((caddr_t)vp, PINOD, "ubchold", 0);
  876                         ubc_lock(vp);
  877                 }
  878                 ubc_unlock(vp);
  879                 goto retry;
  880         }
  881         ubc_unlock(vp);
  882 
  883         if ((recursed = ubc_busy(vp)) == 0) {
  884                 /* must be invalid or dying vnode */
  885                 assert(UBCINVALID(vp) ||
  886                         ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)));
  887                 return (0);
  888         }
  889 
  890         uip = vp->v_ubcinfo;
  891         assert(uip->ui_control != MEMORY_OBJECT_CONTROL_NULL);
  892 
  893         ubc_lock(vp);
  894         uip->ui_refcount++;
  895         ubc_unlock(vp);
  896 
  897         if (!ISSET(uip->ui_flags, UI_HASOBJREF)) {
  898                 if (memory_object_recover_named(uip->ui_control, TRUE)
  899                         != KERN_SUCCESS) {
  900                         if (recursed == 1)
  901                                 ubc_unbusy(vp);
  902                         ubc_info_deallocate(uip);
  903                         return (0);
  904                 }
  905                 SET(uip->ui_flags, UI_HASOBJREF);
  906         }
  907         if (recursed == 1)
  908                 ubc_unbusy(vp);
  909 
  910         assert(uip->ui_refcount > 0);
  911 
  912         return (1);
  913 }
  914 
  915 /*
  916  * Drop the holdcount.
  917  * release the reference on the vm object if the this is "uncached"
  918  * ubc_info.
  919  */
  920 void
  921 ubc_rele(struct vnode *vp)
  922 {
  923         struct ubc_info *uip;
  924 
  925         if (UBCINVALID(vp))
  926                 return;
  927 
  928         if (!UBCINFOEXISTS(vp)) {
  929                 /* nothing more to do for a dying vnode */
  930                 if ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE))
  931                         return;
  932                 panic("ubc_rele: can not");
  933         }
  934 
  935         uip = vp->v_ubcinfo;
  936 
  937         if (uip->ui_refcount == 1)
  938                 panic("ubc_rele: ui_refcount");
  939 
  940         --uip->ui_refcount;
  941 
  942         if ((uip->ui_refcount == 1)
  943                 && ISSET(uip->ui_flags, UI_DONTCACHE))
  944                 (void) ubc_release_named(vp);
  945 
  946         return;
  947 }
  948 
  949 /*
  950  * The vnode is mapped explicitly, mark it so.
  951  */
  952 __private_extern__ void
  953 ubc_map(struct vnode *vp)
  954 {
  955         struct ubc_info *uip;
  956 
  957         if (UBCINVALID(vp))
  958                 return;
  959 
  960         if (!UBCINFOEXISTS(vp))
  961                 return;
  962 
  963         ubc_lock(vp);
  964         uip = vp->v_ubcinfo;
  965 
  966         SET(uip->ui_flags, UI_WASMAPPED);
  967         uip->ui_mapped = 1;
  968         ubc_unlock(vp);
  969 
  970         return;
  971 }
  972 
  973 /*
  974  * Release the memory object reference on the vnode
  975  * only if it is not in use
  976  * Return 1 if the reference was released, 0 otherwise.
  977  */
  978 int
  979 ubc_release_named(struct vnode *vp)
  980 {
  981         struct ubc_info *uip;
  982         int    recursed;
  983         memory_object_control_t control;
  984         kern_return_t kret = KERN_FAILURE;
  985 
  986         if (UBCINVALID(vp))
  987                 return (0);
  988 
  989         if ((recursed = ubc_busy(vp)) == 0)
  990                 return (0);
  991         uip = vp->v_ubcinfo;
  992 
  993         /* can not release held or mapped vnodes */
  994         if (ISSET(uip->ui_flags, UI_HASOBJREF) && 
  995                 (uip->ui_refcount == 1) && !uip->ui_mapped) {
  996                 control = uip->ui_control;
  997                 assert(control);
  998 
  999                 // XXXdbg
 1000                 if (vp->v_flag & VDELETED) {
 1001                     ubc_setsize(vp, (off_t)0);
 1002                 }
 1003 
 1004                 CLR(uip->ui_flags, UI_HASOBJREF);
 1005                 kret = memory_object_release_name(control,
 1006                                 MEMORY_OBJECT_RESPECT_CACHE);
 1007         }
 1008 
 1009         if (recursed == 1)
 1010                 ubc_unbusy(vp);
 1011         return ((kret != KERN_SUCCESS) ? 0 : 1);
 1012 }
 1013 
 1014 /*
 1015  * This function used to called by extensions directly.  Some may
 1016  * still exist with this behavior.  In those cases, we will do the
 1017  * release as part of reclaiming or cleaning the vnode.  We don't
 1018  * need anything explicit - so just stub this out until those callers
 1019  * get cleaned up.
 1020  */
 1021 int
 1022 ubc_release(
 1023         struct vnode    *vp)
 1024 {
 1025         return 0;
 1026 }
 1027 
 1028 /*
 1029  * destroy the named reference for a given vnode
 1030  */
 1031 __private_extern__ int
 1032 ubc_destroy_named(
 1033         struct vnode    *vp)
 1034 {
 1035         memory_object_control_t control;
 1036         struct proc *p;
 1037         struct ubc_info *uip;
 1038         kern_return_t kret;
 1039 
 1040         /*
 1041          * We may already have had the object terminated
 1042          * and the ubcinfo released as a side effect of
 1043          * some earlier processing.  If so, pretend we did
 1044          * it, because it probably was a result of our
 1045          * efforts.
 1046          */
 1047         if (!UBCINFOEXISTS(vp))
 1048                 return (1);
 1049 
 1050         uip = vp->v_ubcinfo;
 1051 
 1052         /* can not destroy held vnodes */
 1053         if (uip->ui_refcount > 1)
 1054                 return (0);
 1055 
 1056         /* 
 1057          * Terminate the memory object.
 1058          * memory_object_destroy() will result in
 1059          * vnode_pager_no_senders(). 
 1060          * That will release the pager reference
 1061          * and the vnode will move to the free list.
 1062          */
 1063         control = ubc_getobject(vp, UBC_HOLDOBJECT);
 1064         if (control != MEMORY_OBJECT_CONTROL_NULL) {
 1065 
 1066                 if (ISSET(vp->v_flag, VTERMINATE))
 1067                         panic("ubc_destroy_named: already teminating");
 1068                 SET(vp->v_flag, VTERMINATE);
 1069 
 1070                 kret = memory_object_destroy(control, 0);
 1071                 if (kret != KERN_SUCCESS)
 1072                         return (0);
 1073 
 1074                 /* 
 1075                  * memory_object_destroy() is asynchronous
 1076                  * with respect to vnode_pager_no_senders().
 1077                  * wait for vnode_pager_no_senders() to clear
 1078                  * VTERMINATE
 1079                  */
 1080                 while (ISSET(vp->v_flag, VTERMINATE)) {
 1081                         SET(vp->v_flag, VTERMWANT);
 1082                         (void)tsleep((caddr_t)&vp->v_ubcinfo,
 1083                                                  PINOD, "ubc_destroy_named", 0);
 1084                 }
 1085         }
 1086         return (1);
 1087 }
 1088 
 1089 
 1090 /*
 1091  * Invalidate a range in the memory object that backs this
 1092  * vnode. The offset is truncated to the page boundary and the
 1093  * size is adjusted to include the last page in the range.
 1094  */
 1095 int
 1096 ubc_invalidate(struct vnode *vp, off_t offset, size_t size)
 1097 {
 1098         struct ubc_info *uip;
 1099         memory_object_control_t control;
 1100         kern_return_t kret;
 1101         off_t toff;
 1102         size_t tsize;
 1103 
 1104         if (UBCINVALID(vp))
 1105                 return (0);
 1106 
 1107         if (!UBCINFOEXISTS(vp))
 1108                 return (0);
 1109 
 1110         toff = trunc_page_64(offset);
 1111         tsize = (size_t)(round_page_64(offset+size) - toff);
 1112         uip = vp->v_ubcinfo;
 1113         control = uip->ui_control;
 1114         assert(control);
 1115 
 1116         /* invalidate pages in the range requested */
 1117         kret = memory_object_lock_request(control,
 1118                                 (memory_object_offset_t)toff,
 1119                                 (memory_object_size_t)tsize,
 1120                                 MEMORY_OBJECT_RETURN_NONE,
 1121                                 (MEMORY_OBJECT_DATA_NO_CHANGE| MEMORY_OBJECT_DATA_FLUSH),
 1122                                 VM_PROT_NO_CHANGE);
 1123         if (kret != KERN_SUCCESS)
 1124                 printf("ubc_invalidate: invalidate failed (error = %d)\n", kret);
 1125 
 1126         return ((kret == KERN_SUCCESS) ? 1 : 0);
 1127 }
 1128 
 1129 /*
 1130  * Find out whether a vnode is in use by UBC
 1131  * Returns 1 if file is in use by UBC, 0 if not
 1132  */
 1133 int
 1134 ubc_isinuse(struct vnode *vp, int busycount)
 1135 {
 1136         if (!UBCINFOEXISTS(vp))
 1137                 return (0);
 1138 
 1139         if (busycount == 0) {
 1140                 printf("ubc_isinuse: called without a valid reference"
 1141                     ": v_tag = %d\v", vp->v_tag);
 1142                 vprint("ubc_isinuse", vp);
 1143                 return (0);
 1144         }
 1145 
 1146         if (vp->v_usecount > busycount+1)
 1147                 return (1);
 1148 
 1149         if ((vp->v_usecount == busycount+1)
 1150                 && (vp->v_ubcinfo->ui_mapped == 1))
 1151                 return (1);
 1152         else
 1153                 return (0);
 1154 }
 1155 
 1156 /*
 1157  * The backdoor routine to clear the ui_mapped.
 1158  * MUST only be called by the VM
 1159  *
 1160  * Note that this routine is not called under funnel. There are numerous
 1161  * things about the calling sequence that make this work on SMP.
 1162  * Any code change in those paths can break this.
 1163  *
 1164  */
 1165 __private_extern__ void
 1166 ubc_unmap(struct vnode *vp)
 1167 {
 1168         struct ubc_info *uip;
 1169         boolean_t       funnel_state;
 1170  
 1171         if (UBCINVALID(vp))
 1172                 return;
 1173 
 1174         if (!UBCINFOEXISTS(vp))
 1175                 return;
 1176 
 1177         ubc_lock(vp);
 1178         uip = vp->v_ubcinfo;
 1179         uip->ui_mapped = 0;
 1180         if ((uip->ui_refcount > 1) || !ISSET(uip->ui_flags, UI_DONTCACHE)) {
 1181                 ubc_unlock(vp);
 1182                 return;
 1183         }
 1184         ubc_unlock(vp);
 1185 
 1186         funnel_state = thread_funnel_set(kernel_flock, TRUE);
 1187         (void) ubc_release_named(vp);
 1188         (void) thread_funnel_set(kernel_flock, funnel_state);
 1189 }
 1190 
 1191 kern_return_t
 1192 ubc_page_op(
 1193         struct vnode    *vp,
 1194         off_t           f_offset,
 1195         int             ops,
 1196         ppnum_t *phys_entryp,
 1197         int             *flagsp)
 1198 {
 1199         memory_object_control_t         control;
 1200 
 1201         control = ubc_getobject(vp, UBC_FLAGS_NONE);
 1202         if (control == MEMORY_OBJECT_CONTROL_NULL)
 1203                 return KERN_INVALID_ARGUMENT;
 1204 
 1205         return (memory_object_page_op(control,
 1206                                       (memory_object_offset_t)f_offset,
 1207                                       ops,
 1208                                       phys_entryp,
 1209                                       flagsp));
 1210 }
 1211                                       
 1212 __private_extern__ kern_return_t
 1213 ubc_page_op_with_control(
 1214         memory_object_control_t  control,
 1215         off_t                    f_offset,
 1216         int                      ops,
 1217         ppnum_t                  *phys_entryp,
 1218         int                      *flagsp)
 1219 {
 1220         return (memory_object_page_op(control,
 1221                                       (memory_object_offset_t)f_offset,
 1222                                       ops,
 1223                                       phys_entryp,
 1224                                       flagsp));
 1225 }
 1226                                       
 1227 kern_return_t
 1228 ubc_range_op(
 1229         struct vnode    *vp,
 1230         off_t           f_offset_beg,
 1231         off_t           f_offset_end,
 1232         int             ops,
 1233         int             *range)
 1234 {
 1235         memory_object_control_t         control;
 1236 
 1237         control = ubc_getobject(vp, UBC_FLAGS_NONE);
 1238         if (control == MEMORY_OBJECT_CONTROL_NULL)
 1239                 return KERN_INVALID_ARGUMENT;
 1240 
 1241         return (memory_object_range_op(control,
 1242                                       (memory_object_offset_t)f_offset_beg,
 1243                                       (memory_object_offset_t)f_offset_end,
 1244                                       ops,
 1245                                       range));
 1246 }
 1247                                       
 1248 kern_return_t
 1249 ubc_create_upl(
 1250         struct vnode    *vp,
 1251         off_t                   f_offset,
 1252         long                    bufsize,
 1253         upl_t                   *uplp,
 1254         upl_page_info_t **plp,
 1255         int                             uplflags)
 1256 {
 1257         memory_object_control_t         control;
 1258         int                             count;
 1259         int                             ubcflags;
 1260         off_t                           file_offset;
 1261         kern_return_t                   kr;
 1262         
 1263         if (bufsize & 0xfff)
 1264                 return KERN_INVALID_ARGUMENT;
 1265 
 1266         if (uplflags & UPL_FOR_PAGEOUT) {
 1267                 uplflags &= ~UPL_FOR_PAGEOUT;
 1268                 ubcflags  =  UBC_FOR_PAGEOUT;
 1269         } else
 1270                 ubcflags = UBC_FLAGS_NONE;
 1271 
 1272         control = ubc_getobject(vp, ubcflags);
 1273         if (control == MEMORY_OBJECT_CONTROL_NULL)
 1274                 return KERN_INVALID_ARGUMENT;
 1275 
 1276         if (uplflags & UPL_WILL_BE_DUMPED) {
 1277                 uplflags &= ~UPL_WILL_BE_DUMPED;
 1278                 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
 1279         } else
 1280                 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
 1281         count = 0;
 1282         kr = memory_object_upl_request(control, f_offset, bufsize,
 1283                                                                    uplp, NULL, &count, uplflags);
 1284         if (plp != NULL)
 1285                         *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
 1286         return kr;
 1287 }
 1288                                       
 1289 
 1290 kern_return_t
 1291 ubc_upl_map(
 1292         upl_t           upl,
 1293         vm_offset_t     *dst_addr)
 1294 {
 1295         return (vm_upl_map(kernel_map, upl, dst_addr));
 1296 }
 1297 
 1298 
 1299 kern_return_t
 1300 ubc_upl_unmap(
 1301         upl_t   upl)
 1302 {
 1303         return(vm_upl_unmap(kernel_map, upl));
 1304 }
 1305 
 1306 kern_return_t
 1307 ubc_upl_commit(
 1308         upl_t                   upl)
 1309 {
 1310         upl_page_info_t *pl;
 1311         kern_return_t   kr;
 1312 
 1313         pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
 1314         kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
 1315         upl_deallocate(upl);
 1316         return kr;
 1317 }
 1318 
 1319 
 1320 kern_return_t
 1321 ubc_upl_commit_range(
 1322         upl_t                   upl,
 1323         vm_offset_t             offset,
 1324         vm_size_t               size,
 1325         int                             flags)
 1326 {
 1327         upl_page_info_t *pl;
 1328         boolean_t               empty;
 1329         kern_return_t   kr;
 1330 
 1331         if (flags & UPL_COMMIT_FREE_ON_EMPTY)
 1332                 flags |= UPL_COMMIT_NOTIFY_EMPTY;
 1333 
 1334         pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
 1335 
 1336         kr = upl_commit_range(upl, offset, size, flags,
 1337                                                   pl, MAX_UPL_TRANSFER, &empty);
 1338 
 1339         if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
 1340                 upl_deallocate(upl);
 1341 
 1342         return kr;
 1343 }
 1344         
 1345 kern_return_t
 1346 ubc_upl_abort_range(
 1347         upl_t                   upl,
 1348         vm_offset_t             offset,
 1349         vm_size_t               size,
 1350         int                             abort_flags)
 1351 {
 1352         kern_return_t   kr;
 1353         boolean_t               empty = FALSE;
 1354 
 1355         if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
 1356                 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
 1357 
 1358         kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
 1359 
 1360         if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
 1361                 upl_deallocate(upl);
 1362 
 1363         return kr;
 1364 }
 1365 
 1366 kern_return_t
 1367 ubc_upl_abort(
 1368         upl_t                   upl,
 1369         int                             abort_type)
 1370 {
 1371         kern_return_t   kr;
 1372 
 1373         kr = upl_abort(upl, abort_type);
 1374         upl_deallocate(upl);
 1375         return kr;
 1376 }
 1377 
 1378 upl_page_info_t *
 1379 ubc_upl_pageinfo(
 1380         upl_t                   upl)
 1381 {              
 1382         return (UPL_GET_INTERNAL_PAGE_LIST(upl));
 1383 }

Cache object: 0fe17e963f01db96fcec319840bf1ebd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.