The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/bsd/vm/vm_unix.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /* 
   26  * Mach Operating System
   27  * Copyright (c) 1987 Carnegie-Mellon University
   28  * All rights reserved.  The CMU software License Agreement specifies
   29  * the terms and conditions for use and redistribution.
   30  */
   31 
   32 /*
   33  */
   34 
   35 
   36 #include <meta_features.h>
   37 
   38 #include <kern/task.h>
   39 #include <kern/thread.h>
   40 #include <kern/debug.h>
   41 #include <kern/lock.h>
   42 #include <mach/time_value.h>
   43 #include <mach/vm_param.h>
   44 #include <mach/vm_prot.h>
   45 #include <mach/port.h>
   46 
   47 #include <sys/param.h>
   48 #include <sys/systm.h>
   49 #include <sys/dir.h>
   50 #include <sys/namei.h>
   51 #include <sys/proc.h>
   52 #include <sys/vm.h>
   53 #include <sys/file.h>
   54 #include <sys/vnode.h>
   55 #include <sys/buf.h>
   56 #include <sys/mount.h>
   57 #include <sys/trace.h>
   58 #include <sys/kernel.h>
   59 #include <sys/ubc.h>
   60 #include <sys/stat.h>
   61 
   62 #include <kern/kalloc.h>
   63 #include <vm/vm_map.h>
   64 #include <vm/vm_kern.h>
   65 
   66 #include <machine/spl.h>
   67 
   68 #include <mach/shared_memory_server.h>
   69 #include <vm/vm_shared_memory_server.h>
   70 
   71 
   72 extern zone_t lsf_zone;
   73 
   74 useracc(addr, len, prot)
   75         caddr_t addr;
   76         u_int   len;
   77         int     prot;
   78 {
   79         return (vm_map_check_protection(
   80                         current_map(),
   81                         trunc_page_32((unsigned int)addr), round_page_32((unsigned int)(addr+len)),
   82                         prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
   83 }
   84 
   85 vslock(addr, len)
   86         caddr_t addr;
   87         int     len;
   88 {
   89 kern_return_t kret;
   90         kret = vm_map_wire(current_map(), trunc_page_32((unsigned int)addr),
   91                         round_page_32((unsigned int)(addr+len)), 
   92                         VM_PROT_READ | VM_PROT_WRITE ,FALSE);
   93 
   94         switch (kret) {
   95         case KERN_SUCCESS:
   96                 return (0);
   97         case KERN_INVALID_ADDRESS:
   98         case KERN_NO_SPACE:
   99                 return (ENOMEM);
  100         case KERN_PROTECTION_FAILURE:
  101                 return (EACCES);
  102         default:
  103                 return (EINVAL);
  104         }
  105 }
  106 
  107 vsunlock(addr, len, dirtied)
  108         caddr_t addr;
  109         int     len;
  110         int dirtied;
  111 {
  112         pmap_t          pmap;
  113 #if FIXME  /* [ */
  114         vm_page_t       pg;
  115 #endif  /* FIXME ] */
  116         vm_offset_t     vaddr, paddr;
  117         kern_return_t kret;
  118 
  119 #if FIXME  /* [ */
  120         if (dirtied) {
  121                 pmap = get_task_pmap(current_task());
  122                 for (vaddr = trunc_page((unsigned int)(addr)); vaddr < round_page((unsigned int)(addr+len));
  123                                 vaddr += PAGE_SIZE) {
  124                         paddr = pmap_extract(pmap, vaddr);
  125                         pg = PHYS_TO_VM_PAGE(paddr);
  126                         vm_page_set_modified(pg);
  127                 }
  128         }
  129 #endif  /* FIXME ] */
  130 #ifdef  lint
  131         dirtied++;
  132 #endif  /* lint */
  133         kret = vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr)),
  134                                 round_page_32((unsigned int)(addr+len)), FALSE);
  135         switch (kret) {
  136         case KERN_SUCCESS:
  137                 return (0);
  138         case KERN_INVALID_ADDRESS:
  139         case KERN_NO_SPACE:
  140                 return (ENOMEM);
  141         case KERN_PROTECTION_FAILURE:
  142                 return (EACCES);
  143         default:
  144                 return (EINVAL);
  145         }
  146 }
  147 
  148 #if     defined(sun) || BALANCE || defined(m88k)
  149 #else   /*defined(sun) || BALANCE || defined(m88k)*/
  150 subyte(addr, byte)
  151         void * addr;
  152         int byte;
  153 {
  154         char character;
  155         
  156         character = (char)byte;
  157         return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
  158 }
  159 
  160 suibyte(addr, byte)
  161         void * addr;
  162         int byte;
  163 {
  164         char character;
  165         
  166         character = (char)byte;
  167         return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1);
  168 }
  169 
  170 int fubyte(addr)
  171         void * addr;
  172 {
  173         unsigned char byte;
  174 
  175         if (copyin(addr, (void *) &byte, sizeof(char)))
  176                 return(-1);
  177         return(byte);
  178 }
  179 
  180 int fuibyte(addr)
  181         void * addr;
  182 {
  183         unsigned char byte;
  184 
  185         if (copyin(addr, (void *) &(byte), sizeof(char)))
  186                 return(-1);
  187         return(byte);
  188 }
  189 
  190 suword(addr, word)
  191         void * addr;
  192         long word;
  193 {
  194         return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
  195 }
  196 
  197 long fuword(addr)
  198         void * addr;
  199 {
  200         long word;
  201 
  202         if (copyin(addr, (void *) &word, sizeof(int)))
  203                 return(-1);
  204         return(word);
  205 }
  206 
  207 /* suiword and fuiword are the same as suword and fuword, respectively */
  208 
  209 suiword(addr, word)
  210         void * addr;
  211         long word;
  212 {
  213         return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
  214 }
  215 
  216 long fuiword(addr)
  217         void * addr;
  218 {
  219         long word;
  220 
  221         if (copyin(addr, (void *) &word, sizeof(int)))
  222                 return(-1);
  223         return(word);
  224 }
  225 #endif  /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
  226 
  227 int
  228 swapon()
  229 {
  230         return(EOPNOTSUPP);
  231 }
  232 
  233 
  234 kern_return_t
  235 pid_for_task(t, x)
  236         mach_port_t     t;
  237         int     *x;
  238 {
  239         struct proc * p;
  240         task_t          t1;
  241         extern task_t port_name_to_task(mach_port_t t);
  242         int     pid = -1;
  243         kern_return_t   err = KERN_SUCCESS;
  244         boolean_t funnel_state;
  245 
  246         funnel_state = thread_funnel_set(kernel_flock, TRUE);
  247         t1 = port_name_to_task(t);
  248 
  249         if (t1 == TASK_NULL) {
  250                 err = KERN_FAILURE;
  251                 goto pftout;
  252         } else {
  253                 p = get_bsdtask_info(t1);
  254                 if (p) {
  255                         pid  = p->p_pid;
  256                         err = KERN_SUCCESS;
  257                 } else {
  258                         err = KERN_FAILURE;
  259                 }
  260         }
  261         task_deallocate(t1);
  262 pftout:
  263         (void) copyout((char *) &pid, (char *) x, sizeof(*x));
  264         thread_funnel_set(kernel_flock, funnel_state);
  265         return(err);
  266 }
  267 
  268 /*
  269  *      Routine:        task_for_pid
  270  *      Purpose:
  271  *              Get the task port for another "process", named by its
  272  *              process ID on the same host as "target_task".
  273  *
  274  *              Only permitted to privileged processes, or processes
  275  *              with the same user ID.
  276  */
  277 kern_return_t
  278 task_for_pid(target_tport, pid, t)
  279         mach_port_t     target_tport;
  280         int             pid;
  281         mach_port_t     *t;
  282 {
  283         struct proc     *p;
  284         struct proc *p1;
  285         task_t          t1;
  286         mach_port_t     tret;
  287         extern task_t port_name_to_task(mach_port_t tp);
  288         void * sright;
  289         int error = 0;
  290         boolean_t funnel_state;
  291 
  292         t1 = port_name_to_task(target_tport);
  293         if (t1 == TASK_NULL) {
  294                 (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t));
  295                 return(KERN_FAILURE);
  296         } 
  297 
  298         funnel_state = thread_funnel_set(kernel_flock, TRUE);
  299 
  300  restart:
  301         p1 = get_bsdtask_info(t1);
  302         if (
  303                 ((p = pfind(pid)) != (struct proc *) 0)
  304                 && (p1 != (struct proc *) 0)
  305                 && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) && 
  306                         ((p->p_cred->p_ruid == p1->p_cred->p_ruid)))
  307                 || !(suser(p1->p_ucred, &p1->p_acflag)))
  308                 && (p->p_stat != SZOMB)
  309                 ) {
  310                         if (p->task != TASK_NULL) {
  311                                 if (!task_reference_try(p->task)) {
  312                                         mutex_pause(); /* temp loss of funnel */
  313                                         goto restart;
  314                                 }
  315                                 sright = (void *)convert_task_to_port(p->task);
  316                                 tret = (void *)
  317                                         ipc_port_copyout_send(sright, 
  318                                            get_task_ipcspace(current_task()));
  319                         } else
  320                                 tret  = MACH_PORT_NULL;
  321                         (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
  322                 task_deallocate(t1);
  323                         error = KERN_SUCCESS;
  324                         goto tfpout;
  325         }
  326     task_deallocate(t1);
  327         tret = MACH_PORT_NULL;
  328         (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t));
  329         error = KERN_FAILURE;
  330 tfpout:
  331         thread_funnel_set(kernel_flock, funnel_state);
  332         return(error);
  333 }
  334 
  335 
  336 struct load_shared_file_args {
  337                 char            *filename;
  338                 caddr_t         mfa;
  339                 u_long          mfs;
  340                 caddr_t         *ba;
  341                 int             map_cnt;
  342                 sf_mapping_t    *mappings;
  343                 int             *flags;
  344 };
  345 
  346 int     ws_disabled = 1;
  347 
  348 int
  349 load_shared_file(
  350         struct proc             *p,
  351         struct load_shared_file_args *uap,
  352         register                *retval)
  353 {
  354         caddr_t         mapped_file_addr=uap->mfa;
  355         u_long          mapped_file_size=uap->mfs;
  356         caddr_t         *base_address=uap->ba;
  357         int             map_cnt=uap->map_cnt;
  358         sf_mapping_t       *mappings=uap->mappings;
  359         char            *filename=uap->filename;
  360         int             *flags=uap->flags;
  361         struct vnode            *vp = 0; 
  362         struct nameidata        nd, *ndp;
  363         char                    *filename_str;
  364         register int            error;
  365         kern_return_t           kr;
  366 
  367         struct vattr    vattr;
  368         memory_object_control_t file_control;
  369         sf_mapping_t    *map_list;
  370         caddr_t         local_base;
  371         int             local_flags;
  372         int             caller_flags;
  373         int             i;
  374         int             default_regions = 0;
  375         vm_size_t       dummy;
  376         kern_return_t   kret;
  377 
  378         shared_region_mapping_t shared_region;
  379         struct shared_region_task_mappings      task_mapping_info;
  380         shared_region_mapping_t next;
  381 
  382         ndp = &nd;
  383 
  384 
  385         /* Retrieve the base address */
  386         if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
  387                         goto lsf_bailout;
  388         }
  389         if (error = copyin(flags, &local_flags, sizeof (int))) {
  390                         goto lsf_bailout;
  391         }
  392 
  393         if(local_flags & QUERY_IS_SYSTEM_REGION) {
  394                         shared_region_mapping_t default_shared_region;
  395                         vm_get_shared_region(current_task(), &shared_region);
  396                         task_mapping_info.self = (vm_offset_t)shared_region;
  397 
  398                         shared_region_mapping_info(shared_region, 
  399                                         &(task_mapping_info.text_region), 
  400                                         &(task_mapping_info.text_size),
  401                                         &(task_mapping_info.data_region), 
  402                                         &(task_mapping_info.data_size), 
  403                                         &(task_mapping_info.region_mappings),
  404                                         &(task_mapping_info.client_base), 
  405                                         &(task_mapping_info.alternate_base),
  406                                         &(task_mapping_info.alternate_next), 
  407                                         &(task_mapping_info.fs_base),
  408                                         &(task_mapping_info.system),
  409                                         &(task_mapping_info.flags), &next);
  410 
  411                         default_shared_region =
  412                                 lookup_default_shared_region(
  413                                         ENV_DEFAULT_ROOT, 
  414                                         task_mapping_info.system);
  415                         if (shared_region == default_shared_region) {
  416                                 local_flags = SYSTEM_REGION_BACKED;
  417                         } else {
  418                                 local_flags = 0;
  419                         }
  420                         shared_region_mapping_dealloc(default_shared_region);
  421                         error = 0;
  422                         error = copyout(&local_flags, flags, sizeof (int));
  423                         goto lsf_bailout;
  424         }
  425         caller_flags = local_flags;
  426         kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
  427                         (vm_size_t)(MAXPATHLEN));
  428                 if (kret != KERN_SUCCESS) {
  429                         error = ENOMEM;
  430                         goto lsf_bailout;
  431                 }
  432         kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
  433                         (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
  434                 if (kret != KERN_SUCCESS) {
  435                         kmem_free(kernel_map, (vm_offset_t)filename_str, 
  436                                 (vm_size_t)(MAXPATHLEN));
  437                         error = ENOMEM;
  438                         goto lsf_bailout;
  439                 }
  440 
  441         if (error = 
  442                 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
  443                 goto lsf_bailout_free;
  444         }
  445 
  446         if (error = copyinstr(filename, 
  447                         filename_str, MAXPATHLEN, (size_t *)&dummy)) {
  448                 goto lsf_bailout_free;
  449         }
  450 
  451         /*
  452          * Get a vnode for the target file
  453          */
  454         NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
  455             filename_str, p);
  456 
  457         if ((error = namei(ndp))) {
  458                 goto lsf_bailout_free;
  459         }
  460 
  461         vp = ndp->ni_vp;
  462 
  463         if (vp->v_type != VREG) {
  464                 error = EINVAL;
  465                 goto lsf_bailout_free_vput;
  466         }
  467 
  468         UBCINFOCHECK("load_shared_file", vp);
  469 
  470         if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
  471                 goto lsf_bailout_free_vput;
  472         }
  473 
  474 
  475         file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
  476         if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
  477                 error = EINVAL;
  478                 goto lsf_bailout_free_vput;
  479         }
  480 
  481 #ifdef notdef
  482         if(vattr.va_size != mapped_file_size) {
  483                 error = EINVAL;
  484                 goto lsf_bailout_free_vput;
  485         }
  486 #endif
  487         if(p->p_flag & P_NOSHLIB) {
  488                 p->p_flag = p->p_flag & ~P_NOSHLIB;
  489         }
  490 
  491         /* load alternate regions if the caller has requested.  */
  492         /* Note: the new regions are "clean slates" */
  493         if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
  494                 error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
  495                 if (error) {
  496                         goto lsf_bailout_free_vput;
  497                 }
  498         }
  499 
  500         vm_get_shared_region(current_task(), &shared_region);
  501         task_mapping_info.self = (vm_offset_t)shared_region;
  502 
  503         shared_region_mapping_info(shared_region, 
  504                         &(task_mapping_info.text_region), 
  505                         &(task_mapping_info.text_size),
  506                         &(task_mapping_info.data_region), 
  507                         &(task_mapping_info.data_size), 
  508                         &(task_mapping_info.region_mappings),
  509                         &(task_mapping_info.client_base), 
  510                         &(task_mapping_info.alternate_base),
  511                         &(task_mapping_info.alternate_next), 
  512                         &(task_mapping_info.fs_base),
  513                         &(task_mapping_info.system),
  514                         &(task_mapping_info.flags), &next);
  515 
  516         {
  517                 shared_region_mapping_t default_shared_region;
  518                 default_shared_region =
  519                         lookup_default_shared_region(
  520                                 ENV_DEFAULT_ROOT, 
  521                                 task_mapping_info.system);
  522                 if(shared_region == default_shared_region) {
  523                         default_regions = 1;
  524                 }
  525                 shared_region_mapping_dealloc(default_shared_region);
  526         }
  527         /* If we are running on a removable file system we must not */
  528         /* be in a set of shared regions or the file system will not */
  529         /* be removable. */
  530         if(((vp->v_mount != rootvnode->v_mount) && (default_regions)) 
  531                 && (lsf_mapping_pool_gauge() < 75)) {
  532                                 /* We don't want to run out of shared memory */
  533                                 /* map entries by starting too many private versions */
  534                                 /* of the shared library structures */
  535                 int     error;
  536                 if(p->p_flag & P_NOSHLIB) {
  537                                 error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
  538                 } else {
  539                                 error = clone_system_shared_regions(TRUE, ENV_DEFAULT_ROOT);
  540                 }
  541                 if (error) {
  542                         goto lsf_bailout_free_vput;
  543                 }
  544                 local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
  545                 vm_get_shared_region(current_task(), &shared_region);
  546                 shared_region_mapping_info(shared_region, 
  547                         &(task_mapping_info.text_region), 
  548                         &(task_mapping_info.text_size),
  549                         &(task_mapping_info.data_region), 
  550                         &(task_mapping_info.data_size), 
  551                         &(task_mapping_info.region_mappings),
  552                         &(task_mapping_info.client_base), 
  553                         &(task_mapping_info.alternate_base),
  554                         &(task_mapping_info.alternate_next), 
  555                         &(task_mapping_info.fs_base),
  556                         &(task_mapping_info.system),
  557                         &(task_mapping_info.flags), &next);
  558         }
  559 
  560         /*  This is a work-around to allow executables which have been */
  561         /*  built without knowledge of the proper shared segment to    */
  562         /*  load.  This code has been architected as a shared region   */
  563         /*  handler, the knowledge of where the regions are loaded is  */
  564         /*  problematic for the extension of shared regions as it will */
  565         /*  not be easy to know what region an item should go into.    */
  566         /*  The code below however will get around a short term problem */
  567         /*  with executables which believe they are loading at zero.   */
  568 
  569         {
  570                 if (((unsigned int)local_base & 
  571                         (~(task_mapping_info.text_size - 1))) != 
  572                         task_mapping_info.client_base) {
  573                         if(local_flags & ALTERNATE_LOAD_SITE) {
  574                                 local_base = (caddr_t)(
  575                                         (unsigned int)local_base & 
  576                                            (task_mapping_info.text_size - 1));
  577                                 local_base = (caddr_t)((unsigned int)local_base
  578                                            | task_mapping_info.client_base);
  579                         } else {
  580                                 error = EINVAL;
  581                                 goto lsf_bailout_free_vput;
  582                         }
  583                 }
  584         }
  585 
  586 
  587         if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr, 
  588                         mapped_file_size, 
  589                         (vm_offset_t *)&local_base,
  590                         map_cnt, map_list, file_control, 
  591                         &task_mapping_info, &local_flags))) {
  592                 switch (kr) {
  593                         case KERN_FAILURE:
  594                                 error = EINVAL;
  595                                 break;
  596                         case KERN_INVALID_ARGUMENT:
  597                                 error = EINVAL;
  598                                 break;
  599                         case KERN_INVALID_ADDRESS:
  600                                 error = EACCES;
  601                                 break;
  602                         case KERN_PROTECTION_FAILURE:
  603                                 /* save EAUTH for authentication in this */
  604                                 /* routine */
  605                                 error = EPERM;
  606                                 break;
  607                         case KERN_NO_SPACE:
  608                                 error = ENOMEM;
  609                                 break;
  610                         default:
  611                                 error = EINVAL;
  612                 };
  613                 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
  614                         printf("load_shared_file:  Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control);
  615                         for(i=0; i<map_cnt; i++) {
  616                                 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
  617                                         , i, map_list[i].mapping_offset, 
  618                                         map_list[i].size, 
  619                                         map_list[i].file_offset, 
  620                                         map_list[i].protection);
  621                         }
  622                 }
  623         } else {
  624                 if(default_regions)
  625                         local_flags |= SYSTEM_REGION_BACKED;
  626                 if(!(error = copyout(&local_flags, flags, sizeof (int)))) {
  627                         error = copyout(&local_base, 
  628                                 base_address, sizeof (caddr_t));
  629                 }
  630         }
  631 
  632 lsf_bailout_free_vput:
  633         vput(vp);
  634 
  635 lsf_bailout_free:
  636         kmem_free(kernel_map, (vm_offset_t)filename_str, 
  637                                 (vm_size_t)(MAXPATHLEN));
  638         kmem_free(kernel_map, (vm_offset_t)map_list, 
  639                                 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
  640 
  641 lsf_bailout:
  642         return error;
  643 }
  644 
  645 struct reset_shared_file_args {
  646                 caddr_t         *ba;
  647                 int             map_cnt;
  648                 sf_mapping_t    *mappings;
  649 };
  650 
  651 int
  652 reset_shared_file(
  653         struct proc             *p,
  654         struct reset_shared_file_args *uap,
  655         register                *retval)
  656 {
  657         caddr_t         *base_address=uap->ba;
  658         int             map_cnt=uap->map_cnt;
  659         sf_mapping_t       *mappings=uap->mappings;
  660         register int            error;
  661         kern_return_t           kr;
  662 
  663         sf_mapping_t    *map_list;
  664         caddr_t         local_base;
  665         vm_offset_t     map_address;
  666         int             i;
  667         kern_return_t   kret;
  668 
  669         /* Retrieve the base address */
  670         if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
  671                         goto rsf_bailout;
  672         }
  673 
  674         if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK) 
  675                                         != GLOBAL_SHARED_TEXT_SEGMENT) {
  676                 error = EINVAL;
  677                 goto rsf_bailout;
  678         }
  679 
  680         kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
  681                         (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
  682                 if (kret != KERN_SUCCESS) {
  683                         error = ENOMEM;
  684                         goto rsf_bailout;
  685                 }
  686 
  687         if (error = 
  688                 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
  689 
  690                 kmem_free(kernel_map, (vm_offset_t)map_list, 
  691                                 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
  692                 goto rsf_bailout;
  693         }
  694         for (i = 0; i<map_cnt; i++) {
  695                 if((map_list[i].mapping_offset 
  696                                 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
  697                         map_address = (vm_offset_t)
  698                                 (local_base + map_list[i].mapping_offset);
  699                         vm_deallocate(current_map(), 
  700                                 map_address,
  701                                 map_list[i].size);
  702                         vm_map(current_map(), &map_address,
  703                                 map_list[i].size, 0, SHARED_LIB_ALIAS,
  704                                 shared_data_region_handle, 
  705                                 ((unsigned int)local_base 
  706                                    & SHARED_DATA_REGION_MASK) +
  707                                         (map_list[i].mapping_offset 
  708                                         & SHARED_DATA_REGION_MASK),
  709                                 TRUE, VM_PROT_READ, 
  710                                 VM_PROT_READ, VM_INHERIT_SHARE);
  711                 }
  712         }
  713 
  714         kmem_free(kernel_map, (vm_offset_t)map_list, 
  715                                 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
  716 
  717 rsf_bailout:
  718         return error;
  719 }
  720 
  721 struct new_system_shared_regions_args {
  722         int dummy;
  723 };
  724 
  725 int
  726 new_system_shared_regions(
  727         struct proc             *p,
  728         struct new_system_shared_regions_args *uap,
  729         register                *retval)
  730 {
  731         shared_region_mapping_t regions;
  732         shared_region_mapping_t new_regions;
  733 
  734         if(!(is_suser())) {
  735                 *retval = EINVAL;
  736                 return EINVAL;
  737         }
  738 
  739         /* clear all of our existing defaults */
  740         remove_all_shared_regions();
  741 
  742         *retval = 0;
  743         return 0;
  744 }
  745 
  746 
  747 
  748 int
  749 clone_system_shared_regions(shared_regions_active, base_vnode)
  750 {
  751         shared_region_mapping_t new_shared_region;
  752         shared_region_mapping_t next;
  753         shared_region_mapping_t old_shared_region;
  754         struct shared_region_task_mappings old_info;
  755         struct shared_region_task_mappings new_info;
  756 
  757         struct proc     *p;
  758 
  759         vm_get_shared_region(current_task(), &old_shared_region);
  760         old_info.self = (vm_offset_t)old_shared_region;
  761         shared_region_mapping_info(old_shared_region,
  762                 &(old_info.text_region),   
  763                 &(old_info.text_size),
  764                 &(old_info.data_region),
  765                 &(old_info.data_size),
  766                 &(old_info.region_mappings),
  767                 &(old_info.client_base),
  768                 &(old_info.alternate_base),
  769                 &(old_info.alternate_next), 
  770                 &(old_info.fs_base),
  771                 &(old_info.system),
  772                 &(old_info.flags), &next);
  773         if ((shared_regions_active) ||
  774                 (base_vnode == ENV_DEFAULT_ROOT)) {
  775            if (shared_file_create_system_region(&new_shared_region))
  776                 return (ENOMEM);
  777         } else {
  778            new_shared_region = 
  779                 lookup_default_shared_region(
  780                         base_vnode, old_info.system);
  781            if(new_shared_region == NULL) {
  782                 shared_file_boot_time_init(
  783                         base_vnode, old_info.system);
  784                 vm_get_shared_region(current_task(), &new_shared_region);
  785            } else {
  786                 vm_set_shared_region(current_task(), new_shared_region);
  787            }
  788            if(old_shared_region)
  789                 shared_region_mapping_dealloc(old_shared_region);
  790         }
  791         new_info.self = (vm_offset_t)new_shared_region;
  792         shared_region_mapping_info(new_shared_region,
  793                 &(new_info.text_region),   
  794                 &(new_info.text_size),
  795                 &(new_info.data_region),
  796                 &(new_info.data_size),
  797                 &(new_info.region_mappings),
  798                 &(new_info.client_base),
  799                 &(new_info.alternate_base),
  800                 &(new_info.alternate_next), 
  801                 &(new_info.fs_base),
  802                 &(new_info.system),
  803                 &(new_info.flags), &next);
  804         if(shared_regions_active) {
  805            if(vm_region_clone(old_info.text_region, new_info.text_region)) {
  806            panic("clone_system_shared_regions: shared region mis-alignment 1");
  807                 shared_region_mapping_dealloc(new_shared_region);
  808                 return(EINVAL);
  809            }
  810            if (vm_region_clone(old_info.data_region, new_info.data_region)) {
  811            panic("clone_system_shared_regions: shared region mis-alignment 2");
  812                 shared_region_mapping_dealloc(new_shared_region);
  813                 return(EINVAL);
  814            }
  815            shared_region_object_chain_attach(
  816                                 new_shared_region, old_shared_region);
  817         }
  818         if (vm_map_region_replace(current_map(), old_info.text_region, 
  819                         new_info.text_region, old_info.client_base, 
  820                         old_info.client_base+old_info.text_size)) {
  821         panic("clone_system_shared_regions: shared region mis-alignment 3");
  822                 shared_region_mapping_dealloc(new_shared_region);
  823                 return(EINVAL);
  824         }
  825         if(vm_map_region_replace(current_map(), old_info.data_region, 
  826                         new_info.data_region, 
  827                         old_info.client_base + old_info.text_size, 
  828                         old_info.client_base
  829                                 + old_info.text_size + old_info.data_size)) {
  830         panic("clone_system_shared_regions: shared region mis-alignment 4");
  831                 shared_region_mapping_dealloc(new_shared_region);
  832                 return(EINVAL);
  833         }
  834         vm_set_shared_region(current_task(), new_shared_region);
  835 
  836         /* consume the reference which wasn't accounted for in object */
  837         /* chain attach */
  838         if(!shared_regions_active)
  839                 shared_region_mapping_dealloc(old_shared_region);
  840 
  841         return(0);
  842 
  843 }
  844 
  845 extern vm_map_t bsd_pageable_map;
  846 
  847 /* header for the profile name file.  The profiled app info is held */
  848 /* in the data file and pointed to by elements in the name file     */
  849 
  850 struct profile_names_header {
  851         unsigned int    number_of_profiles;
  852         unsigned int    user_id;
  853         unsigned int    version;
  854         off_t           element_array;
  855         unsigned int    spare1;
  856         unsigned int    spare2;
  857         unsigned int    spare3;
  858 };
  859 
  860 struct profile_element {
  861         off_t           addr;
  862         vm_size_t       size;
  863         unsigned int    mod_date;
  864         unsigned int    inode;
  865         char name[12];
  866 };
  867 
  868 struct global_profile {
  869         struct vnode    *names_vp;
  870         struct vnode    *data_vp;
  871         vm_offset_t     buf_ptr;
  872         unsigned int    user;
  873         unsigned int    age;
  874         unsigned int    busy;
  875 };
  876 
  877 struct global_profile_cache {
  878         int                     max_ele;
  879         unsigned int            age;
  880         struct global_profile   profiles[3];
  881 };
  882 
  883 struct global_profile_cache global_user_profile_cache =
  884         {3, 0, NULL, NULL, NULL, 0, 0, 0,
  885                 NULL, NULL, NULL, 0, 0, 0,
  886                 NULL, NULL, NULL, 0, 0, 0 };
  887 
  888 /* BSD_OPEN_PAGE_CACHE_FILES:                                 */
  889 /* Caller provides a user id.  This id was used in            */
  890 /* prepare_profile_database to create two unique absolute     */
  891 /* file paths to the associated profile files.  These files   */
  892 /* are either opened or bsd_open_page_cache_files returns an  */
  893 /* error.  The header of the names file is then consulted.    */
  894 /* The header and the vnodes for the names and data files are */
  895 /* returned. */
  896 
  897 int
  898 bsd_open_page_cache_files(
  899         unsigned int    user,
  900         struct global_profile **profile)
  901 {
  902         char            *cache_path = "/var/vm/app_profile/";
  903         struct proc     *p;
  904         int             error;
  905         int             resid;
  906         off_t           resid_off;
  907         unsigned int    lru;
  908         vm_size_t       size;
  909 
  910         struct  vnode   *names_vp;
  911         struct  vnode   *data_vp;
  912         vm_offset_t     names_buf;
  913         vm_offset_t     buf_ptr;
  914 
  915         int             profile_names_length;
  916         int             profile_data_length;
  917         char            *profile_data_string;
  918         char            *profile_names_string;
  919         char            *substring;
  920 
  921         struct vattr    vattr;
  922 
  923         struct  profile_names_header *profile_header;
  924         kern_return_t   ret;
  925 
  926         struct nameidata nd_names;
  927         struct nameidata nd_data;
  928 
  929         int             i;
  930 
  931 
  932         p = current_proc();
  933 
  934 restart:
  935         for(i = 0; i<global_user_profile_cache.max_ele; i++) {
  936                 if((global_user_profile_cache.profiles[i].user == user) 
  937                         &&  (global_user_profile_cache.profiles[i].data_vp 
  938                                                                 != NULL)) {
  939                         *profile = &global_user_profile_cache.profiles[i];
  940                         /* already in cache, we're done */
  941                         if ((*profile)->busy) {
  942                                 /*
  943                                 * drop funnel and wait 
  944                                 */
  945                                 (void)tsleep((void *)
  946                                         *profile, 
  947                                         PRIBIO, "app_profile", 0);
  948                                 goto restart;
  949                         }
  950                         (*profile)->busy = 1;
  951                         (*profile)->age = global_user_profile_cache.age;
  952                         global_user_profile_cache.age+=1;
  953                         return 0;
  954                 }
  955         }
  956 
  957         lru = global_user_profile_cache.age;
  958         *profile = NULL;
  959         for(i = 0; i<global_user_profile_cache.max_ele; i++) {
  960                 /* Skip entry if it is in the process of being reused */
  961                 if(global_user_profile_cache.profiles[i].data_vp ==
  962                                                 (struct vnode *)0xFFFFFFFF)
  963                         continue;
  964                 /* Otherwise grab the first empty entry */
  965                 if(global_user_profile_cache.profiles[i].data_vp == NULL) {
  966                         *profile = &global_user_profile_cache.profiles[i];
  967                         (*profile)->age = global_user_profile_cache.age;
  968                         break;
  969                 }
  970                 /* Otherwise grab the oldest entry */
  971                 if(global_user_profile_cache.profiles[i].age < lru) {
  972                         lru = global_user_profile_cache.profiles[i].age;
  973                         *profile = &global_user_profile_cache.profiles[i];
  974                 }
  975         }
  976 
  977         /* Did we set it? */
  978         if (*profile == NULL) {
  979                 /*
  980                  * No entries are available; this can only happen if all
  981                  * of them are currently in the process of being reused;
  982                  * if this happens, we sleep on the address of the first
  983                  * element, and restart.  This is less than ideal, but we
  984                  * know it will work because we know that there will be a
  985                  * wakeup on any entry currently in the process of being
  986                  * reused.
  987                  *
  988                  * XXX Reccomend a two handed clock and more than 3 total
  989                  * XXX cache entries at some point in the future.
  990                  */
  991                 /*
  992                 * drop funnel and wait 
  993                 */
  994                 (void)tsleep((void *)
  995                  &global_user_profile_cache.profiles[0],
  996                         PRIBIO, "app_profile", 0);
  997                 goto restart;
  998         }
  999 
 1000         /*
 1001          * If it's currently busy, we've picked the one at the end of the
 1002          * LRU list, but it's currently being actively used.  We sleep on
 1003          * its address and restart.
 1004          */
 1005         if ((*profile)->busy) {
 1006                 /*
 1007                 * drop funnel and wait 
 1008                 */
 1009                 (void)tsleep((void *)
 1010                         *profile, 
 1011                         PRIBIO, "app_profile", 0);
 1012                 goto restart;
 1013         }
 1014         (*profile)->busy = 1;
 1015         (*profile)->user = user;
 1016 
 1017         /*
 1018          * put dummy value in for now to get competing request to wait
 1019          * above until we are finished
 1020          *
 1021          * Save the data_vp before setting it, so we can set it before
 1022          * we kmem_free() or vrele().  If we don't do this, then we
 1023          * have a potential funnel race condition we have to deal with.
 1024          */
 1025         data_vp = (*profile)->data_vp;
 1026         (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
 1027 
 1028         /*
 1029          * Age the cache here in all cases; this guarantees that we won't
 1030          * be reusing only one entry over and over, once the system reaches
 1031          * steady-state.
 1032          */
 1033         global_user_profile_cache.age+=1;
 1034 
 1035         if(data_vp != NULL) {
 1036                 kmem_free(kernel_map, 
 1037                                 (*profile)->buf_ptr, 4 * PAGE_SIZE);
 1038                 if ((*profile)->names_vp) {
 1039                         vrele((*profile)->names_vp);
 1040                         (*profile)->names_vp = NULL;
 1041                 }
 1042                 vrele(data_vp);
 1043         }
 1044         
 1045         /* Try to open the appropriate users profile files */
 1046         /* If neither file is present, try to create them  */
 1047         /* If one file is present and the other not, fail. */
 1048         /* If the files do exist, check them for the app_file */
 1049         /* requested and read it in if present */
 1050 
 1051         ret = kmem_alloc(kernel_map,
 1052                 (vm_offset_t *)&profile_data_string, PATH_MAX);
 1053 
 1054         if(ret) {
 1055                 (*profile)->data_vp = NULL;
 1056                 (*profile)->busy = 0;
 1057                 wakeup(*profile);
 1058                 return ENOMEM;
 1059         }
 1060 
 1061         /* Split the buffer in half since we know the size of */
 1062         /* our file path and our allocation is adequate for   */
 1063         /* both file path names */
 1064         profile_names_string = profile_data_string + (PATH_MAX/2);
 1065 
 1066 
 1067         strcpy(profile_data_string, cache_path);
 1068         strcpy(profile_names_string, cache_path);
 1069         profile_names_length = profile_data_length 
 1070                         = strlen(profile_data_string);
 1071         substring = profile_data_string + profile_data_length;
 1072         sprintf(substring, "%x_data", user);
 1073         substring = profile_names_string + profile_names_length;
 1074         sprintf(substring, "%x_names", user);
 1075 
 1076         /* We now have the absolute file names */
 1077 
 1078         ret = kmem_alloc(kernel_map,
 1079                         (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
 1080         if(ret) {
 1081                 kmem_free(kernel_map, 
 1082                                 (vm_offset_t)profile_data_string, PATH_MAX);
 1083                 (*profile)->data_vp = NULL;
 1084                 (*profile)->busy = 0;
 1085                 wakeup(*profile);
 1086                 return ENOMEM;
 1087         }
 1088 
 1089         NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF, 
 1090                         UIO_SYSSPACE, profile_names_string, p);
 1091         NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF, 
 1092                         UIO_SYSSPACE, profile_data_string, p);
 1093         if (error = vn_open(&nd_data, FREAD | FWRITE, 0)) {
 1094 #ifdef notdef
 1095                 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
 1096                         profile_data_string);
 1097 #endif
 1098                 kmem_free(kernel_map, 
 1099                                 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1100                 kmem_free(kernel_map, 
 1101                         (vm_offset_t)profile_data_string, PATH_MAX);
 1102                 (*profile)->data_vp = NULL;
 1103                 (*profile)->busy = 0;
 1104                 wakeup(*profile);
 1105                 return error;
 1106         }
 1107 
 1108         data_vp = nd_data.ni_vp;
 1109         VOP_UNLOCK(data_vp, 0, p);
 1110 
 1111         if (error = vn_open(&nd_names, FREAD | FWRITE, 0)) {
 1112                 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
 1113                         profile_data_string);
 1114                 kmem_free(kernel_map, 
 1115                                 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1116                 kmem_free(kernel_map, 
 1117                         (vm_offset_t)profile_data_string, PATH_MAX);
 1118                 vrele(data_vp);
 1119                 (*profile)->data_vp = NULL;
 1120                 (*profile)->busy = 0;
 1121                 wakeup(*profile);
 1122                 return error;
 1123         }
 1124         names_vp = nd_names.ni_vp;
 1125 
 1126         if(error = VOP_GETATTR(names_vp, &vattr, p->p_ucred, p)) {
 1127                 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string);
 1128                 kmem_free(kernel_map, 
 1129                         (vm_offset_t)profile_data_string, PATH_MAX);
 1130                 kmem_free(kernel_map, 
 1131                         (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1132                 vput(names_vp);
 1133                 vrele(data_vp);
 1134                 (*profile)->data_vp = NULL;
 1135                 (*profile)->busy = 0;
 1136                 wakeup(*profile);
 1137                 return error;
 1138         }
 1139 
 1140         size = vattr.va_size;
 1141         if(size > 4 * PAGE_SIZE) 
 1142                 size = 4 * PAGE_SIZE;
 1143         buf_ptr = names_buf;
 1144         resid_off = 0;
 1145 
 1146         while(size) {
 1147                 error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr, 
 1148                         size, resid_off,
 1149                         UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
 1150                 if((error) || (size == resid)) {
 1151                         if(!error) {
 1152                                 error = EINVAL;
 1153                         }
 1154                         kmem_free(kernel_map, 
 1155                                 (vm_offset_t)profile_data_string, PATH_MAX);
 1156                         kmem_free(kernel_map, 
 1157                                 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1158                         vput(names_vp);
 1159                         vrele(data_vp);
 1160                         (*profile)->data_vp = NULL;
 1161                         (*profile)->busy = 0;
 1162                         wakeup(*profile);
 1163                         return error;
 1164                 }
 1165                 buf_ptr += size-resid;
 1166                 resid_off += size-resid;
 1167                 size = resid;
 1168         }
 1169         
 1170         VOP_UNLOCK(names_vp, 0, p);
 1171         kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX);
 1172         (*profile)->names_vp = names_vp;
 1173         (*profile)->data_vp = data_vp;
 1174         (*profile)->buf_ptr = names_buf;
 1175         return 0;
 1176 
 1177 }
 1178 
 1179 void
 1180 bsd_close_page_cache_files(
 1181         struct global_profile *profile)
 1182 {
 1183         profile->busy = 0;
 1184         wakeup(profile);
 1185 }
 1186 
 1187 int
 1188 bsd_read_page_cache_file(
 1189         unsigned int    user,
 1190         int             *fid,
 1191         int             *mod,
 1192         char            *app_name,
 1193         struct vnode    *app_vp,
 1194         vm_offset_t     *buffer,
 1195         vm_offset_t     *buf_size)
 1196 {
 1197 
 1198         boolean_t               funnel_state;
 1199 
 1200         struct proc     *p;
 1201         int             error;
 1202         int             resid;
 1203         vm_size_t       size;
 1204 
 1205         off_t           profile;
 1206         unsigned int    profile_size;
 1207 
 1208         vm_offset_t     names_buf;
 1209         struct vattr    vattr;
 1210 
 1211         kern_return_t   ret;
 1212 
 1213         struct  vnode   *names_vp;
 1214         struct  vnode   *data_vp;
 1215         struct  vnode   *vp1;
 1216         struct  vnode   *vp2;
 1217 
 1218         struct global_profile *uid_files;
 1219 
 1220         funnel_state = thread_funnel_set(kernel_flock, TRUE);
 1221 
 1222         /* Try to open the appropriate users profile files */
 1223         /* If neither file is present, try to create them  */
 1224         /* If one file is present and the other not, fail. */
 1225         /* If the files do exist, check them for the app_file */
 1226         /* requested and read it in if present */
 1227 
 1228 
 1229         error = bsd_open_page_cache_files(user, &uid_files);
 1230         if(error) {
 1231                 thread_funnel_set(kernel_flock, funnel_state);
 1232                 return EINVAL;
 1233         }
 1234 
 1235         p = current_proc();
 1236 
 1237         names_vp = uid_files->names_vp;
 1238         data_vp = uid_files->data_vp;
 1239         names_buf = uid_files->buf_ptr;
 1240 
 1241 
 1242         /* 
 1243          * Get locks on both files, get the vnode with the lowest address first
 1244          */
 1245 
 1246         if((unsigned int)names_vp < (unsigned int)data_vp) {
 1247                 vp1 = names_vp;
 1248                 vp2 = data_vp;
 1249         } else {
 1250                 vp1 = data_vp;
 1251                 vp2 = names_vp;
 1252         }
 1253         error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
 1254         if(error) {
 1255                 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user);
 1256                 bsd_close_page_cache_files(uid_files);
 1257                 thread_funnel_set(kernel_flock, funnel_state);
 1258                 return error;
 1259         }
 1260         error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
 1261         if(error) {
 1262                 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user);
 1263                 VOP_UNLOCK(vp1, 0, p);
 1264                 bsd_close_page_cache_files(uid_files);
 1265                 thread_funnel_set(kernel_flock, funnel_state);
 1266                 return error;
 1267         }
 1268 
 1269         if(error = VOP_GETATTR(app_vp, &vattr, p->p_ucred, p)) {
 1270                 VOP_UNLOCK(names_vp, 0, p);
 1271                 VOP_UNLOCK(data_vp, 0, p);
 1272                 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name);
 1273                 bsd_close_page_cache_files(uid_files);
 1274                 thread_funnel_set(kernel_flock, funnel_state);
 1275                 return error;
 1276         }
 1277 
 1278         *fid = vattr.va_fileid;
 1279         *mod = vattr.va_mtime.tv_sec;
 1280                 
 1281 
 1282         if (bsd_search_page_cache_data_base(names_vp, names_buf, app_name, 
 1283                         (unsigned int) vattr.va_mtime.tv_sec,  
 1284                         vattr.va_fileid, &profile, &profile_size) == 0) {
 1285                 /* profile is an offset in the profile data base */
 1286                 /* It is zero if no profile data was found */
 1287                 
 1288                 if(profile_size == 0) {
 1289                         *buffer = NULL;
 1290                         *buf_size = 0;
 1291                         VOP_UNLOCK(names_vp, 0, p);
 1292                         VOP_UNLOCK(data_vp, 0, p);
 1293                         bsd_close_page_cache_files(uid_files);
 1294                         thread_funnel_set(kernel_flock, funnel_state);
 1295                         return 0;
 1296                 }
 1297                 ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size));
 1298                 if(ret) {
 1299                         VOP_UNLOCK(names_vp, 0, p);
 1300                         VOP_UNLOCK(data_vp, 0, p);
 1301                         bsd_close_page_cache_files(uid_files);
 1302                         thread_funnel_set(kernel_flock, funnel_state);
 1303                         return ENOMEM;
 1304                 }
 1305                 *buf_size = profile_size;
 1306                 while(profile_size) {
 1307                         error = vn_rdwr(UIO_READ, data_vp, 
 1308                                 (caddr_t) *buffer, profile_size, 
 1309                                 profile, UIO_SYSSPACE, IO_NODELOCKED, 
 1310                                 p->p_ucred, &resid, p);
 1311                         if((error) || (profile_size == resid)) {
 1312                                 VOP_UNLOCK(names_vp, 0, p);
 1313                                 VOP_UNLOCK(data_vp, 0, p);
 1314                                 bsd_close_page_cache_files(uid_files);
 1315                                 kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size);
 1316                                 thread_funnel_set(kernel_flock, funnel_state);
 1317                                 return EINVAL;
 1318                         }
 1319                         profile += profile_size - resid;
 1320                         profile_size = resid;
 1321                 }
 1322                 VOP_UNLOCK(names_vp, 0, p);
 1323                 VOP_UNLOCK(data_vp, 0, p);
 1324                 bsd_close_page_cache_files(uid_files);
 1325                 thread_funnel_set(kernel_flock, funnel_state);
 1326                 return 0;
 1327         } else {
 1328                 VOP_UNLOCK(names_vp, 0, p);
 1329                 VOP_UNLOCK(data_vp, 0, p);
 1330                 bsd_close_page_cache_files(uid_files);
 1331                 thread_funnel_set(kernel_flock, funnel_state);
 1332                 return EINVAL;
 1333         }
 1334         
 1335 }
 1336 
 1337 int
 1338 bsd_search_page_cache_data_base(
 1339         struct  vnode                   *vp,
 1340         struct profile_names_header     *database,
 1341         char                            *app_name,
 1342         unsigned int                    mod_date,
 1343         unsigned int                    inode,
 1344         off_t                           *profile,
 1345         unsigned int                    *profile_size)
 1346 {
 1347 
 1348         struct proc             *p;
 1349 
 1350         unsigned int            i;
 1351         struct profile_element  *element;
 1352         unsigned int            ele_total;
 1353         unsigned int            extended_list = 0;
 1354         off_t                   file_off = 0;
 1355         unsigned int            size;
 1356         off_t                   resid_off;
 1357         int                     resid;
 1358         vm_offset_t             local_buf = NULL;
 1359 
 1360         int                     error;
 1361         kern_return_t           ret;
 1362 
 1363         p = current_proc();
 1364 
 1365         if(((vm_offset_t)database->element_array) !=
 1366                                 sizeof(struct profile_names_header)) {
 1367                 return EINVAL;
 1368         }
 1369         element = (struct profile_element *)(
 1370                         (vm_offset_t)database->element_array + 
 1371                                                 (vm_offset_t)database);
 1372 
 1373         ele_total = database->number_of_profiles;
 1374         
 1375         *profile = 0;
 1376         *profile_size = 0;
 1377         while(ele_total) {
 1378                 /* note: code assumes header + n*ele comes out on a page boundary */
 1379                 if(((local_buf == 0) && (sizeof(struct profile_names_header) + 
 1380                         (ele_total * sizeof(struct profile_element))) 
 1381                                         > (PAGE_SIZE * 4)) ||
 1382                         ((local_buf != 0) && 
 1383                                 (ele_total * sizeof(struct profile_element))
 1384                                          > (PAGE_SIZE * 4))) {
 1385                         extended_list = ele_total;
 1386                         if(element == (struct profile_element *)
 1387                                 ((vm_offset_t)database->element_array + 
 1388                                                 (vm_offset_t)database)) {
 1389                                 ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1;
 1390                         } else {
 1391                                 ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element);
 1392                         }
 1393                         extended_list -= ele_total;
 1394                 }
 1395                 for (i=0; i<ele_total; i++) {
 1396                         if((mod_date == element[i].mod_date) 
 1397                                         && (inode == element[i].inode)) {
 1398                                 if(strncmp(element[i].name, app_name, 12) == 0) {
 1399                                         *profile = element[i].addr;
 1400                                         *profile_size = element[i].size;
 1401                                         if(local_buf != NULL) {
 1402                                                 kmem_free(kernel_map, 
 1403                                                         (vm_offset_t)local_buf, 4 * PAGE_SIZE);
 1404                                         }
 1405                                         return 0;
 1406                                 }
 1407                         }
 1408                 }
 1409                 if(extended_list == 0)
 1410                         break;
 1411                 if(local_buf == NULL) {
 1412                         ret = kmem_alloc(kernel_map,
 1413                                 (vm_offset_t *)&local_buf, 4 * PAGE_SIZE);
 1414                         if(ret != KERN_SUCCESS) {
 1415                                 return ENOMEM;
 1416                         }
 1417                 }
 1418                 element = (struct profile_element *)local_buf;
 1419                 ele_total = extended_list;
 1420                 extended_list = 0;
 1421                 file_off +=  4 * PAGE_SIZE;
 1422                 if((ele_total * sizeof(struct profile_element)) > 
 1423                                                         (PAGE_SIZE * 4)) {
 1424                         size = PAGE_SIZE * 4;
 1425                 } else {
 1426                         size = ele_total * sizeof(struct profile_element);
 1427                 }
 1428                 resid_off = 0;
 1429                 while(size) {
 1430                         error = vn_rdwr(UIO_READ, vp, 
 1431                                 CAST_DOWN(caddr_t, (local_buf + resid_off)),
 1432                                 size, file_off + resid_off, UIO_SYSSPACE, 
 1433                                 IO_NODELOCKED, p->p_ucred, &resid, p);
 1434                         if((error) || (size == resid)) {
 1435                                 if(local_buf != NULL) {
 1436                                         kmem_free(kernel_map, 
 1437                                                 (vm_offset_t)local_buf, 
 1438                                                 4 * PAGE_SIZE);
 1439                                 }
 1440                                 return EINVAL;
 1441                         }
 1442                         resid_off += size-resid;
 1443                         size = resid;
 1444                 }
 1445         }
 1446         if(local_buf != NULL) {
 1447                 kmem_free(kernel_map, 
 1448                         (vm_offset_t)local_buf, 4 * PAGE_SIZE);
 1449         }
 1450         return 0;
 1451 }
 1452 
 1453 int
 1454 bsd_write_page_cache_file(
 1455         unsigned int    user,
 1456         char            *file_name,
 1457         caddr_t         buffer,
 1458         vm_size_t       size,
 1459         int             mod,
 1460         int             fid)
 1461 {
 1462         struct proc             *p;
 1463         struct nameidata        nd;
 1464         struct vnode            *vp = 0; 
 1465         int                     resid;
 1466         off_t                   resid_off;
 1467         int                     error;
 1468         boolean_t               funnel_state;
 1469         struct vattr            vattr;
 1470         struct vattr            data_vattr;
 1471 
 1472         off_t                           profile;
 1473         unsigned int                    profile_size;
 1474 
 1475         vm_offset_t     names_buf;
 1476         struct  vnode   *names_vp;
 1477         struct  vnode   *data_vp;
 1478         struct  vnode   *vp1;
 1479         struct  vnode   *vp2;
 1480 
 1481         struct  profile_names_header *profile_header;
 1482         off_t                   name_offset;
 1483 
 1484         struct global_profile *uid_files;
 1485 
 1486 
 1487         funnel_state = thread_funnel_set(kernel_flock, TRUE);
 1488 
 1489 
 1490 
 1491         error = bsd_open_page_cache_files(user, &uid_files);
 1492         if(error) {
 1493                 thread_funnel_set(kernel_flock, funnel_state);
 1494                 return EINVAL;
 1495         }
 1496 
 1497         p = current_proc();
 1498 
 1499         names_vp = uid_files->names_vp;
 1500         data_vp = uid_files->data_vp;
 1501         names_buf = uid_files->buf_ptr;
 1502 
 1503         /* 
 1504          * Get locks on both files, get the vnode with the lowest address first
 1505          */
 1506 
 1507         if((unsigned int)names_vp < (unsigned int)data_vp) {
 1508                 vp1 = names_vp;
 1509                 vp2 = data_vp;
 1510         } else {
 1511                 vp1 = data_vp;
 1512                 vp2 = names_vp;
 1513         }
 1514 
 1515         error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
 1516         if(error) {
 1517                 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user);
 1518                 bsd_close_page_cache_files(uid_files);
 1519                 thread_funnel_set(kernel_flock, funnel_state);
 1520                 return error;
 1521         }
 1522         error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
 1523         if(error) {
 1524                 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user);
 1525                 VOP_UNLOCK(vp1, 0, p);
 1526                 bsd_close_page_cache_files(uid_files);
 1527                 thread_funnel_set(kernel_flock, funnel_state);
 1528                 return error;
 1529         }
 1530 
 1531         /* Stat data file for size */
 1532 
 1533         if(error = VOP_GETATTR(data_vp, &data_vattr, p->p_ucred, p)) {
 1534                 VOP_UNLOCK(names_vp, 0, p);
 1535                 VOP_UNLOCK(data_vp, 0, p);
 1536                 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name);
 1537                 bsd_close_page_cache_files(uid_files);
 1538                 thread_funnel_set(kernel_flock, funnel_state);
 1539                 return error;
 1540         }
 1541                 
 1542         if (bsd_search_page_cache_data_base(names_vp, 
 1543                         (struct profile_names_header *)names_buf, 
 1544                         file_name, (unsigned int) mod,  
 1545                         fid, &profile, &profile_size) == 0) {
 1546                 /* profile is an offset in the profile data base */
 1547                 /* It is zero if no profile data was found */
 1548                 
 1549                 if(profile_size == 0) {
 1550                         unsigned int    header_size;
 1551                         vm_offset_t     buf_ptr;
 1552 
 1553                         /* Our Write case */
 1554 
 1555                         /* read header for last entry */
 1556                         profile_header = 
 1557                                 (struct profile_names_header *)names_buf;
 1558                         name_offset = sizeof(struct profile_names_header) + 
 1559                                 (sizeof(struct profile_element) 
 1560                                         * profile_header->number_of_profiles);
 1561                         profile_header->number_of_profiles += 1;
 1562 
 1563                         if(name_offset < PAGE_SIZE * 4) {
 1564                                 struct profile_element  *name;
 1565                                 /* write new entry */
 1566                                 name = (struct profile_element *)
 1567                                         (names_buf + (vm_offset_t)name_offset);
 1568                                 name->addr =  data_vattr.va_size;
 1569                                 name->size = size;
 1570                                 name->mod_date = mod;
 1571                                 name->inode = fid;
 1572                                 strncpy (name->name, file_name, 12);
 1573                         } else {
 1574                                 unsigned int    ele_size;
 1575                                 struct profile_element  name;
 1576                                 /* write new entry */
 1577                                 name.addr = data_vattr.va_size;
 1578                                 name.size = size;
 1579                                 name.mod_date = mod;
 1580                                 name.inode = fid;
 1581                                 strncpy (name.name, file_name, 12);
 1582                                 /* write element out separately */
 1583                                 ele_size = sizeof(struct profile_element);
 1584                                 buf_ptr = (vm_offset_t)&name;
 1585                                 resid_off = name_offset;
 1586 
 1587                                 while(ele_size) {
 1588                                         error = vn_rdwr(UIO_WRITE, names_vp, 
 1589                                                 (caddr_t)buf_ptr, 
 1590                                                 ele_size, resid_off, 
 1591                                                 UIO_SYSSPACE, IO_NODELOCKED, 
 1592                                                 p->p_ucred, &resid, p);
 1593                                         if(error) {
 1594                                                 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user);
 1595                                                 VOP_UNLOCK(names_vp, 0, p);
 1596                                                 VOP_UNLOCK(data_vp, 0, p);
 1597                                                 bsd_close_page_cache_files(
 1598                                                         uid_files);
 1599                                                 thread_funnel_set(
 1600                                                         kernel_flock, 
 1601                                                         funnel_state);
 1602                                                 return error;
 1603                                         }
 1604                                         buf_ptr += (vm_offset_t)
 1605                                                         ele_size-resid;
 1606                                         resid_off += ele_size-resid;
 1607                                         ele_size = resid;
 1608                                 }
 1609                         }
 1610 
 1611                         if(name_offset < PAGE_SIZE * 4) {
 1612                                 header_size = name_offset + 
 1613                                         sizeof(struct profile_element);
 1614                                 
 1615                         } else {
 1616                                 header_size = 
 1617                                         sizeof(struct profile_names_header);
 1618                         }
 1619                         buf_ptr = (vm_offset_t)profile_header;
 1620                         resid_off = 0;
 1621 
 1622                         /* write names file header */
 1623                         while(header_size) {
 1624                                 error = vn_rdwr(UIO_WRITE, names_vp, 
 1625                                         (caddr_t)buf_ptr, 
 1626                                         header_size, resid_off, 
 1627                                         UIO_SYSSPACE, IO_NODELOCKED, 
 1628                                         p->p_ucred, &resid, p);
 1629                                 if(error) {
 1630                                         VOP_UNLOCK(names_vp, 0, p);
 1631                                         VOP_UNLOCK(data_vp, 0, p);
 1632                                         printf("bsd_write_page_cache_file: Can't write header %x\n", user);
 1633                                         bsd_close_page_cache_files(
 1634                                                 uid_files);
 1635                                         thread_funnel_set(
 1636                                                 kernel_flock, funnel_state);
 1637                                         return error;
 1638                                 }
 1639                                 buf_ptr += (vm_offset_t)header_size-resid;
 1640                                 resid_off += header_size-resid;
 1641                                 header_size = resid;
 1642                         }
 1643                         /* write profile to data file */
 1644                         resid_off = data_vattr.va_size;
 1645                         while(size) {
 1646                                 error = vn_rdwr(UIO_WRITE, data_vp, 
 1647                                         (caddr_t)buffer, size, resid_off, 
 1648                                         UIO_SYSSPACE, IO_NODELOCKED, 
 1649                                         p->p_ucred, &resid, p);
 1650                                 if(error) {
 1651                                         VOP_UNLOCK(names_vp, 0, p);
 1652                                         VOP_UNLOCK(data_vp, 0, p);
 1653                                         printf("bsd_write_page_cache_file: Can't write header %x\n", user);
 1654                                         bsd_close_page_cache_files(
 1655                                                 uid_files);
 1656                                         thread_funnel_set(
 1657                                                 kernel_flock, funnel_state);
 1658                                         return error;
 1659                                 }
 1660                                 buffer += size-resid;
 1661                                 resid_off += size-resid;
 1662                                 size = resid;
 1663                         }
 1664                         VOP_UNLOCK(names_vp, 0, p);
 1665                         VOP_UNLOCK(data_vp, 0, p);
 1666                         bsd_close_page_cache_files(uid_files);
 1667                         thread_funnel_set(kernel_flock, funnel_state);
 1668                         return 0;
 1669                 }
 1670                 /* Someone else wrote a twin profile before us */
 1671                 VOP_UNLOCK(names_vp, 0, p);
 1672                 VOP_UNLOCK(data_vp, 0, p);
 1673                 bsd_close_page_cache_files(uid_files);
 1674                 thread_funnel_set(kernel_flock, funnel_state);
 1675                 return 0;
 1676         } else {                
 1677                 VOP_UNLOCK(names_vp, 0, p);
 1678                 VOP_UNLOCK(data_vp, 0, p);
 1679                 bsd_close_page_cache_files(uid_files);
 1680                 thread_funnel_set(kernel_flock, funnel_state);
 1681                 return EINVAL;
 1682         }
 1683         
 1684 }
 1685 
 1686 int
 1687 prepare_profile_database(int    user)
 1688 {
 1689         char            *cache_path = "/var/vm/app_profile/";
 1690         struct proc     *p;
 1691         int             error;
 1692         int             resid;
 1693         off_t           resid_off;
 1694         unsigned int    lru;
 1695         vm_size_t       size;
 1696 
 1697         struct  vnode   *names_vp;
 1698         struct  vnode   *data_vp;
 1699         vm_offset_t     names_buf;
 1700         vm_offset_t     buf_ptr;
 1701 
 1702         int             profile_names_length;
 1703         int             profile_data_length;
 1704         char            *profile_data_string;
 1705         char            *profile_names_string;
 1706         char            *substring;
 1707 
 1708         struct vattr    vattr;
 1709 
 1710         struct  profile_names_header *profile_header;
 1711         kern_return_t   ret;
 1712 
 1713         struct nameidata nd_names;
 1714         struct nameidata nd_data;
 1715 
 1716         int             i;
 1717 
 1718         p = current_proc();
 1719 
 1720         ret = kmem_alloc(kernel_map,
 1721                 (vm_offset_t *)&profile_data_string, PATH_MAX);
 1722 
 1723         if(ret) {
 1724                 return ENOMEM;
 1725         }
 1726 
 1727         /* Split the buffer in half since we know the size of */
 1728         /* our file path and our allocation is adequate for   */
 1729         /* both file path names */
 1730         profile_names_string = profile_data_string + (PATH_MAX/2);
 1731 
 1732 
 1733         strcpy(profile_data_string, cache_path);
 1734         strcpy(profile_names_string, cache_path);
 1735         profile_names_length = profile_data_length 
 1736                         = strlen(profile_data_string);
 1737         substring = profile_data_string + profile_data_length;
 1738         sprintf(substring, "%x_data", user);
 1739         substring = profile_names_string + profile_names_length;
 1740         sprintf(substring, "%x_names", user);
 1741 
 1742         /* We now have the absolute file names */
 1743 
 1744         ret = kmem_alloc(kernel_map,
 1745                         (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
 1746         if(ret) {
 1747                 kmem_free(kernel_map, 
 1748                                 (vm_offset_t)profile_data_string, PATH_MAX);
 1749                 return ENOMEM;
 1750         }
 1751 
 1752         NDINIT(&nd_names, LOOKUP, FOLLOW, 
 1753                         UIO_SYSSPACE, profile_names_string, p);
 1754         NDINIT(&nd_data, LOOKUP, FOLLOW,
 1755                         UIO_SYSSPACE, profile_data_string, p);
 1756 
 1757         if (error = vn_open(&nd_data, 
 1758                         O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
 1759                         kmem_free(kernel_map, 
 1760                                         (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1761                         kmem_free(kernel_map, 
 1762                                 (vm_offset_t)profile_data_string, PATH_MAX);
 1763                         return 0;
 1764         }
 1765 
 1766         data_vp = nd_data.ni_vp;
 1767         VOP_UNLOCK(data_vp, 0, p);
 1768 
 1769         if (error = vn_open(&nd_names, 
 1770                         O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
 1771                         printf("prepare_profile_database: Can't create CacheNames %s\n",
 1772                                 profile_data_string);
 1773                         kmem_free(kernel_map, 
 1774                                         (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1775                         kmem_free(kernel_map, 
 1776                                 (vm_offset_t)profile_data_string, PATH_MAX);
 1777                         vrele(data_vp);
 1778                         return error;
 1779         }
 1780 
 1781         names_vp = nd_names.ni_vp;
 1782 
 1783 
 1784         /* Write Header for new names file */
 1785 
 1786         profile_header = (struct profile_names_header *)names_buf;
 1787 
 1788         profile_header->number_of_profiles = 0;
 1789         profile_header->user_id =  user;
 1790         profile_header->version = 1;
 1791         profile_header->element_array = 
 1792                                 sizeof(struct profile_names_header);
 1793         profile_header->spare1 = 0;
 1794         profile_header->spare2 = 0;
 1795         profile_header->spare3 = 0;
 1796 
 1797         size = sizeof(struct profile_names_header);
 1798         buf_ptr = (vm_offset_t)profile_header;
 1799         resid_off = 0;
 1800 
 1801         while(size) {
 1802                 error = vn_rdwr(UIO_WRITE, names_vp, 
 1803                                 (caddr_t)buf_ptr, size, resid_off,
 1804                                 UIO_SYSSPACE, IO_NODELOCKED, 
 1805                                 p->p_ucred, &resid, p);
 1806                 if(error) {
 1807                         printf("prepare_profile_database: Can't write header %s\n", profile_names_string);
 1808                         kmem_free(kernel_map, 
 1809                                 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1810                         kmem_free(kernel_map, 
 1811                                 (vm_offset_t)profile_data_string, 
 1812                                 PATH_MAX);
 1813                         vput(names_vp);
 1814                         vrele(data_vp);
 1815                         return error;
 1816                 }
 1817                 buf_ptr += size-resid;
 1818                 resid_off += size-resid;
 1819                 size = resid;
 1820         }
 1821 
 1822         VATTR_NULL(&vattr);
 1823         vattr.va_uid = user;
 1824         error = VOP_SETATTR(names_vp, &vattr, p->p_cred->pc_ucred, p);
 1825         if(error) {
 1826                 printf("prepare_profile_database: "
 1827                         "Can't set user %s\n", profile_names_string);
 1828         }
 1829         vput(names_vp);
 1830         
 1831         error = vn_lock(data_vp, LK_EXCLUSIVE | LK_RETRY, p);
 1832         if(error) {
 1833                 vrele(data_vp);
 1834                 printf("prepare_profile_database: cannot lock data file %s\n",
 1835                         profile_data_string);
 1836                 kmem_free(kernel_map, 
 1837                         (vm_offset_t)profile_data_string, PATH_MAX);
 1838                 kmem_free(kernel_map, 
 1839                         (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1840         }
 1841         VATTR_NULL(&vattr);
 1842         vattr.va_uid = user;
 1843         error = VOP_SETATTR(data_vp, &vattr, p->p_cred->pc_ucred, p);
 1844         if(error) {
 1845                 printf("prepare_profile_database: "
 1846                         "Can't set user %s\n", profile_data_string);
 1847         }
 1848         
 1849         vput(data_vp);
 1850         kmem_free(kernel_map, 
 1851                         (vm_offset_t)profile_data_string, PATH_MAX);
 1852         kmem_free(kernel_map, 
 1853                         (vm_offset_t)names_buf, 4 * PAGE_SIZE);
 1854         return 0;
 1855 
 1856 }

Cache object: 7a3d8c2802481194bdf2ab26830450d4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.