The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/norma/xmm_svm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1991 Carnegie Mellon University
    4  * All Rights Reserved.
    5  * 
    6  * Permission to use, copy, modify and distribute this software and its
    7  * documentation is hereby granted, provided that both the copyright
    8  * notice and this permission notice appear in all copies of the
    9  * software, derivative works or modified versions, and any portions
   10  * thereof, and that both notices appear in supporting documentation.
   11  * 
   12  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS 
   13  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   14  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   15  * 
   16  * Carnegie Mellon requests users of this software to return to
   17  * 
   18  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   19  *  School of Computer Science
   20  *  Carnegie Mellon University
   21  *  Pittsburgh PA 15213-3890
   22  * 
   23  * any improvements or extensions that they make and grant Carnegie the
   24  * rights to redistribute these changes.
   25  */
   26 /*
   27  * HISTORY
   28  * $Log:        xmm_svm.c,v $
   29  * Revision 2.5  92/03/10  16:29:50  jsb
   30  *      Merged in norma branch changes as of NORMA_MK7.
   31  *      [92/03/09  12:51:57  jsb]
   32  * 
   33  * Revision 2.4.3.3  92/02/21  11:28:23  jsb
   34  *      Replaced xmm_reply_allocate_mobj with m_svm_do_request, which now takes
   35  *      a reference to mobj. m_svm_lock_completed now deallocates reply as well
   36  *      as reference to mobj.
   37  *      [92/02/18  17:31:27  jsb]
   38  * 
   39  *      Cosmetic changes, including vm_page_size -> PAGE_SIZE.
   40  *      [92/02/18  08:01:18  jsb]
   41  * 
   42  *      Explicitly provide name parameter to xmm_decl macro.
   43  *      Added MOBJ_STATE_TERMINATED to detect init/terminate race.
   44  *      Added memory_object parameter to xmm_svm_create, and memory_object
   45  *      field to struct mobj, so that m_svm_terminate can call
   46  *      xmm_object_release on memory_object. Move M_TERMINATE call
   47  *      to new routine xmm_svm_destroy, which is called from xmm_object
   48  *      module only when there are no references to xmm object.
   49  *      This fixes race between xmm_object_by_memory_object (where someone
   50  *      decides to use our existing svm stack) and m_svm_terminate
   51  *      (where we used to tear down the stack as soon as all the kernels
   52  *      we knew about had terminated the object).
   53  *      [92/02/16  15:50:31  jsb]
   54  * 
   55  *      Changed copy strategy management to handle (naively)
   56  *      MEMORY_OBJECT_COPY_TEMPORARY (by passing it up unchanged).
   57  *      This will break in its current form when we enable VM_INHERIT_SHARE.
   58  *      (Added appropriate checks to panic in this case.) Removed dead
   59  *      routines xmm_svm_{set_access,initialize}. Changed debugging printfs.
   60  *      [92/02/11  11:32:58  jsb]
   61  * 
   62  *      Use new xmm_decl, and new memory_object_name and deallocation protocol.
   63  *      Use xmm_buffer layer to buffer data writes of migrating pages.
   64  *      General cleanup.
   65  *      [92/02/09  13:58:24  jsb]
   66  * 
   67  * Revision 2.4.3.1  92/01/21  21:54:57  jsb
   68  *      De-linted. Supports new (dlb) memory object routines.
   69  *      Supports arbitrary reply ports to lock_request, etc.
   70  *      Converted mach_port_t (and port_t) to ipc_port_t.
   71  *      [92/01/20  17:46:55  jsb]
   72  * 
   73  *      Fixes from OSF.
   74  *      [92/01/17  14:15:53  jsb]
   75  * 
   76  * Revision 2.4.1.1  92/01/15  12:17:45  jeffreyh
   77  *      Deallocate memory_object_name port when not propagating
   78  *      termination. (dlb)
   79  * 
   80  * Revision 2.4  91/08/03  18:19:47  jsb
   81  *      Added missing type cast.
   82  *      [91/07/17  14:07:46  jsb]
   83  * 
   84  * Revision 2.3  91/07/01  08:26:40  jsb
   85  *      Now allow objects to grow in size (as temporary objects do).
   86  *      Merged user_t and kobj structures. Do garbage collection.
   87  *      Now pass up all set_attribute calls, not just first.
   88  *      Use zone for request structures.
   89  *      [91/06/29  15:43:16  jsb]
   90  * 
   91  * Revision 2.2  91/06/17  15:48:43  jsb
   92  *      First checkin.
   93  *      [91/06/17  11:04:03  jsb]
   94  * 
   95  */
   96 /*
   97  *      File:   norma/xmm_svm.c
   98  *      Author: Joseph S. Barrera III
   99  *      Date:   1991
  100  *
  101  *      Xmm layer providing consistent shared virtual memory.
  102  */
  103 
  104 #ifdef  KERNEL
  105 #include <norma/xmm_obj.h>
  106 #include <mach/vm_param.h>
  107 #include <ipc/ipc_port.h>
  108 #include <ipc/ipc_space.h>
  109 #else   KERNEL
  110 #include <xmm_obj.h>
  111 #endif  KERNEL
  112 
  113 #define dprintf xmm_svm_dprintf
  114 
  115 #define USE_XMM_BUFFER  1
  116 
  117 typedef struct request *        request_t;
  118 
  119 #define REQUEST_NULL            ((request_t) 0)
  120 
  121 #define MOBJ_STATE_UNCALLED     0
  122 #define MOBJ_STATE_CALLED       1
  123 #define MOBJ_STATE_READY        2
  124 #define MOBJ_STATE_TERMINATED   3
  125 
  126 #define DATA_NONE               ((vm_offset_t) 0)
  127 #define DATA_UNAVAILABLE        ((vm_offset_t) 1)
  128 #define DATA_ERROR              ((vm_offset_t) 2)
  129 
  130 #define K                       ((struct kobj *) k)
  131 
  132 /*
  133  * lock[] is set when pager gives us a message.
  134  * prot[] is set when we send message to kernels;
  135  * it should simply reflect max of all kobj->prot.
  136  */
  137 struct mobj {
  138         struct xmm_obj  obj;
  139         xmm_obj_t       kobj_list;
  140         int             state;
  141         unsigned int    num_pages;
  142         request_t       request;
  143         vm_prot_t       *prot;                  /* kernel access */
  144         vm_prot_t       *lock;                  /* lock by pager */
  145         boolean_t       may_cache;
  146         ipc_port_t      memory_object;          /* for xmm_object_release */
  147         ipc_port_t      memory_object_name;     /* at most one send right */
  148         memory_object_copy_strategy_t
  149                         copy_strategy;
  150 };
  151 
  152 union who {
  153         xmm_obj_t       kobj;
  154         xmm_reply_t     reply;
  155 };
  156 
  157 /*
  158  * XXX some of these fields could be aliased to save space
  159  * XXX eg: needs_data,should_clean; lock_value,desired_access
  160  *
  161  * XXX should probably add ref counts to kobjs....
  162  */
  163 struct request {
  164         union who       who;
  165         int             m_count;                /* -> m_yield_count */
  166         int             k_count;                /* -> m_yield_count */
  167         boolean_t       is_kernel;
  168         boolean_t       needs_data;             /* ours alone */
  169         boolean_t       should_clean;           /* same as needs_data? */
  170         boolean_t       should_flush;
  171         vm_prot_t       desired_access;
  172         vm_prot_t       lock_value;
  173         vm_offset_t     offset;                 /* -> page */
  174         request_t       next_eq;
  175         request_t       next_ne;
  176 };
  177 
  178 struct kobj {
  179         struct xmm_obj  obj;
  180         unsigned int    num_pages;              /* needed for deallocation */
  181         vm_prot_t       *prot;
  182         xmm_obj_t       next;
  183 };
  184 
  185 #define m_msvm_init             m_svm_init
  186 #define m_msvm_terminate        m_svm_terminate
  187 #define m_msvm_copy             m_invalid_copy
  188 #define m_msvm_data_request     m_svm_data_request
  189 #define m_msvm_data_unlock      m_svm_data_unlock
  190 #define m_msvm_data_write       m_svm_data_write
  191 #define m_msvm_lock_completed   m_invalid_lock_completed
  192 #define m_msvm_supply_completed m_invalid_supply_completed
  193 #define m_msvm_data_return      m_invalid_data_return
  194 #define m_msvm_change_completed m_invalid_change_completed
  195 #define k_msvm_data_unavailable k_svm_data_unavailable
  196 #define k_msvm_get_attributes   k_invalid_get_attributes
  197 #define k_msvm_lock_request     k_svm_lock_request
  198 #define k_msvm_data_error       k_svm_data_error
  199 #define k_msvm_set_ready        k_svm_set_ready
  200 #define k_msvm_destroy          k_svm_destroy
  201 #define k_msvm_data_supply      k_svm_data_supply
  202 #define m_msvm_deallocate       m_svm_deallocate
  203 
  204 xmm_decl(msvm, "msvm", sizeof(struct mobj));
  205 
  206 #define m_ksvm_init             m_invalid_init
  207 #define m_ksvm_terminate        m_invalid_terminate
  208 #define m_ksvm_copy             m_invalid_copy
  209 #define m_ksvm_data_request     m_invalid_data_request
  210 #define m_ksvm_data_unlock      m_invalid_data_unlock
  211 #define m_ksvm_data_write       m_invalid_data_write
  212 #define m_ksvm_lock_completed   m_svm_lock_completed
  213 #define m_ksvm_supply_completed m_invalid_supply_completed
  214 #define m_ksvm_data_return      m_invalid_data_return
  215 #define m_ksvm_change_completed m_invalid_change_completed
  216 #define k_ksvm_data_unavailable k_svm_data_unavailable
  217 #define k_ksvm_get_attributes   k_invalid_get_attributes
  218 #define k_ksvm_lock_request     k_svm_lock_request
  219 #define k_ksvm_data_error       k_svm_data_error
  220 #define k_ksvm_set_ready        k_svm_set_ready
  221 #define k_ksvm_destroy          k_svm_destroy
  222 #define k_ksvm_data_supply      k_svm_data_supply
  223 #define m_ksvm_deallocate       k_svm_deallocate
  224 
  225 xmm_decl(ksvm, "ksvm", sizeof(struct kobj));
  226 
  227 extern void     xmm_object_release();
  228 
  229 boolean_t       m_svm_add_request();
  230 request_t       m_svm_lookup_request();
  231 void            m_svm_satisfy_request();
  232 void            m_svm_satisfy_kernel_request();
  233 void            m_svm_satisfy_pager_request();
  234 void            m_svm_process_request();
  235 void            m_svm_process_kernel_request();
  236 void            m_svm_process_pager_request();
  237 
  238 zone_t          xmm_svm_request_zone;
  239 
  240 int C_mobj_prot = 0;
  241 int C_mobj_lock = 0;
  242 int C_user_prot = 0;
  243 
  244 /* XXX should be implemented by kalloc.c */
  245 /* XXX should kalloc have asm help for round-to-power-of-two? */
  246 krealloc(old_buf_p, old_size, new_size, counter)
  247         char **old_buf_p;
  248         vm_size_t old_size;
  249         vm_size_t new_size;
  250         int *counter;
  251 {
  252         char *new_buf;
  253 
  254         new_buf = (char *) kalloc(new_size);
  255         if (new_buf == (char *) 0) {
  256                 panic("krealloc");
  257         }
  258         if (old_size > 0) {
  259                 bcopy(*old_buf_p, new_buf, old_size);
  260                 kfree(*old_buf_p, old_size);
  261         }
  262         *counter += (new_size - old_size);
  263         *old_buf_p = new_buf;
  264 }
  265 
  266 void
  267 m_svm_extend(mobj, new_num_pages)
  268         xmm_obj_t mobj;
  269         unsigned int new_num_pages;
  270 {
  271         xmm_obj_t kobj;
  272 
  273         int page, i;
  274         unsigned int old_num_pages = MOBJ->num_pages;
  275 
  276         for (i = 4; i < new_num_pages; i += i) {
  277                 continue;
  278         }
  279         new_num_pages = i;
  280         MOBJ->num_pages = new_num_pages;
  281 /*      assert(new_num_pages > old_num_pages);*/
  282         krealloc((char **) &MOBJ->prot,
  283                  old_num_pages * sizeof(vm_prot_t),
  284                  new_num_pages * sizeof(vm_prot_t),
  285                  &C_mobj_prot);
  286         krealloc((char **) &MOBJ->lock,
  287                  old_num_pages * sizeof(vm_prot_t),
  288                  new_num_pages * sizeof(vm_prot_t),
  289                  &C_mobj_lock);
  290         for (kobj = MOBJ->kobj_list; kobj; kobj = KOBJ->next) {
  291                 assert(KOBJ->num_pages == old_num_pages);
  292                 KOBJ->num_pages = new_num_pages;
  293                 krealloc((char **) &KOBJ->prot,
  294                          old_num_pages * sizeof(vm_prot_t),
  295                          new_num_pages * sizeof(vm_prot_t),
  296                          &C_user_prot);
  297         }
  298         for (page = old_num_pages; page < new_num_pages; page++) {
  299                 MOBJ->prot[page] = VM_PROT_NONE;
  300                 MOBJ->lock[page] = VM_PROT_ALL;
  301                 for (kobj = MOBJ->kobj_list; kobj; kobj = KOBJ->next) {
  302                         KOBJ->prot[page] = VM_PROT_NONE;
  303                 }
  304         }
  305 }
  306 
  307 kern_return_t
  308 xmm_svm_create(old_mobj, memory_object, new_mobj)
  309         xmm_obj_t old_mobj;
  310         ipc_port_t memory_object;
  311         xmm_obj_t *new_mobj;
  312 {
  313         xmm_obj_t mobj;
  314         kern_return_t kr;
  315 
  316 #if     USE_XMM_BUFFER
  317         kr = xmm_buffer_create(old_mobj, &old_mobj);
  318         if (kr != KERN_SUCCESS) {
  319                 return kr;
  320         }
  321 #endif  USE_XMM_BUFFER
  322         kr = xmm_obj_allocate(&msvm_class, old_mobj, &mobj);
  323         if (kr != KERN_SUCCESS) {
  324                 return kr;
  325         }
  326         MOBJ->num_pages = 0;
  327         MOBJ->kobj_list = XMM_OBJ_NULL;
  328         MOBJ->prot = (vm_prot_t *) 0;
  329         MOBJ->lock = (vm_prot_t *) 0;
  330         MOBJ->request = REQUEST_NULL;
  331         MOBJ->memory_object = memory_object;
  332         MOBJ->memory_object_name = IP_NULL;
  333         *new_mobj = mobj;
  334         return KERN_SUCCESS;
  335 }
  336 
  337 m_svm_init(mobj, k_kobj, pagesize, internal, size)
  338         xmm_obj_t mobj;
  339         xmm_obj_t k_kobj;
  340         vm_size_t pagesize;
  341         boolean_t internal;
  342         vm_size_t size;
  343 {
  344         xmm_obj_t kobj;
  345 
  346 #ifdef  lint
  347         M_INIT(mobj, k_kobj, pagesize, internal, size);
  348 #endif  lint
  349         assert(pagesize == PAGE_SIZE);
  350         if (xmm_obj_allocate(&ksvm_class, XMM_OBJ_NULL, &kobj)) {
  351                 panic("m_svm_init");
  352         }
  353         xmm_kobj_link(kobj, k_kobj);
  354 
  355         KOBJ->num_pages = MOBJ->num_pages;
  356         KOBJ->prot = (vm_prot_t *) kalloc(KOBJ->num_pages * sizeof(vm_prot_t));
  357 #if 9
  358         C_user_prot += (KOBJ->num_pages * sizeof(vm_prot_t));
  359 #endif
  360         if (! KOBJ->prot) {
  361                 panic("m_svm_init");
  362         }
  363         bzero((char *) KOBJ->prot,
  364               (int) (KOBJ->num_pages * sizeof(vm_prot_t)));
  365 
  366         KOBJ->next = MOBJ->kobj_list;
  367         MOBJ->kobj_list = kobj;
  368 
  369         /*
  370          * If there are multiple kernels, then we had better be
  371          * using MEMORY_OBJECT_COPY_NONE, at least until we get
  372          * trickier about changing copy strategies.
  373          */
  374         if (MOBJ->kobj_list && ((struct kobj *)MOBJ->kobj_list)->next &&
  375             MOBJ->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
  376                 panic("losing big on multiple copies of temporary object");
  377         }
  378 
  379         if (MOBJ->state == MOBJ_STATE_READY) {
  380                 assert(MOBJ->memory_object_name != IP_NULL);
  381                 ipc_port_copy_send(MOBJ->memory_object_name);
  382                 K_SET_READY(kobj, OBJECT_READY_TRUE, MOBJ->may_cache,
  383                             MOBJ->copy_strategy, USE_OLD_PAGEOUT_TRUE,
  384                             MOBJ->memory_object_name, XMM_REPLY_NULL);
  385         } else if (MOBJ->state == MOBJ_STATE_UNCALLED) {
  386                 MOBJ->state = MOBJ_STATE_CALLED;
  387                 M_INIT(mobj, mobj, PAGE_SIZE, internal, size);
  388         } else {
  389                 assert(MOBJ->state == MOBJ_STATE_TERMINATED);
  390                 panic("m_svm_init: terminate/lookup race");
  391         }
  392 
  393         return KERN_SUCCESS;
  394 }
  395 
  396 m_svm_terminate(mobj, kobj)
  397         xmm_obj_t mobj;
  398         xmm_obj_t kobj;
  399 {
  400         xmm_obj_t kobj_terminated, *kp;
  401 
  402 #ifdef  lint
  403         M_TERMINATE(mobj, kobj);
  404 #endif  lint
  405         /*
  406          * Remove kobj from list and free its resources.
  407          * Return if there are more kobjs.
  408          */
  409         kobj_terminated = kobj;
  410         for (kp = &MOBJ->kobj_list; kobj = *kp; kp = &KOBJ->next) {
  411                 if (kobj == kobj_terminated) {
  412                         *kp = KOBJ->next;
  413                         break;
  414                 }
  415         }
  416         xmm_obj_release(kobj_terminated);
  417 
  418         /*
  419          * Release one reference to xmm object. If there are no
  420          * more references, then xmm_svm_destroy will be called.
  421          */
  422         xmm_object_release(MOBJ->memory_object);
  423         return KERN_SUCCESS;
  424 }
  425 
  426 void
  427 xmm_svm_destroy(mobj)
  428         xmm_obj_t mobj;
  429 {
  430         assert(mobj->class == &msvm_class);
  431         MOBJ->state = MOBJ_STATE_TERMINATED;
  432         (void) M_TERMINATE(mobj, mobj);
  433 }
  434 
  435 void
  436 k_svm_deallocate(kobj)
  437         xmm_obj_t kobj;
  438 {
  439         /*
  440          * Free kobj's resources.
  441          */
  442         if (KOBJ->num_pages > 0) {
  443                 kfree((char *) KOBJ->prot,
  444                       KOBJ->num_pages * sizeof(vm_prot_t));
  445 #if 9
  446                 C_user_prot -= (KOBJ->num_pages * sizeof(vm_prot_t));
  447 #endif
  448         }
  449 }
  450 
  451 void
  452 m_svm_deallocate(mobj)
  453         xmm_obj_t mobj;
  454 {
  455         /*
  456          * Free mobj's resources.
  457          */
  458         if (MOBJ->num_pages > 0) {
  459                 kfree((char *) MOBJ->prot,
  460                       MOBJ->num_pages * sizeof(vm_prot_t));
  461                 kfree((char *) MOBJ->lock,
  462                       MOBJ->num_pages * sizeof(vm_prot_t));
  463 #if 9
  464                 C_mobj_prot -= (MOBJ->num_pages * sizeof(vm_prot_t));
  465                 C_mobj_lock -= (MOBJ->num_pages * sizeof(vm_prot_t));
  466 #endif
  467         }
  468         if (MOBJ->memory_object_name != IP_NULL) {
  469                 ipc_port_release_send(MOBJ->memory_object_name);
  470         }
  471 }
  472 
  473 void
  474 m_svm_request(mobj, r)
  475         xmm_obj_t mobj;
  476         request_t r;
  477 {
  478         assert(mobj->class == &msvm_class);
  479         if((unsigned long)atop(r->offset) >= MOBJ->num_pages) {
  480                 m_svm_extend(mobj, atop(r->offset) + 1);
  481         }
  482         if (m_svm_add_request(mobj, r)) {
  483                 m_svm_process_kernel_request(mobj, r);
  484         }
  485 }
  486 
  487 m_svm_data_request(mobj, kobj, offset, length, desired_access)
  488         xmm_obj_t mobj;
  489         xmm_obj_t kobj;
  490         vm_offset_t offset;
  491         vm_size_t length;
  492         vm_prot_t desired_access;
  493 {
  494         request_t r;
  495 
  496 #ifdef  lint
  497         M_DATA_REQUEST(mobj, kobj, offset, length, desired_access);
  498 #endif  lint
  499         assert(mobj->class == &msvm_class);
  500         assert(kobj->class == &ksvm_class);
  501         if (length != PAGE_SIZE) {
  502                 K_DATA_ERROR(kobj, offset, length, KERN_FAILURE);
  503                 return KERN_FAILURE;
  504         }
  505         r = (request_t) zalloc(xmm_svm_request_zone);
  506         r->who.kobj = kobj;
  507         r->is_kernel = TRUE;
  508         r->m_count = 0;
  509         r->k_count = 0;
  510         r->needs_data = TRUE;
  511         r->should_clean = FALSE;
  512         r->should_flush = FALSE;
  513         r->desired_access = desired_access;
  514         r->offset = offset;
  515         r->next_ne = 0;
  516         r->next_eq = 0;
  517         m_svm_request(mobj, r);
  518         return KERN_SUCCESS;
  519 }
  520 
  521 m_svm_data_unlock(mobj, kobj, offset, length, desired_access)
  522         xmm_obj_t mobj;
  523         xmm_obj_t kobj;
  524         vm_offset_t offset;
  525         vm_size_t length;
  526         vm_prot_t desired_access;
  527 {
  528         request_t r;
  529 
  530 #ifdef  lint
  531         M_DATA_UNLOCK(mobj, kobj, offset, length, desired_access);
  532 #endif  lint
  533         assert(mobj->class == &msvm_class);
  534         assert(kobj->class == &ksvm_class);
  535         if (length != PAGE_SIZE) {
  536                 K_DATA_ERROR(kobj, offset, length, KERN_FAILURE);
  537                 return KERN_FAILURE;
  538         }
  539         r = (request_t) zalloc(xmm_svm_request_zone);
  540         r->who.kobj = kobj;
  541         r->is_kernel = TRUE;
  542         r->m_count = 0;
  543         r->k_count = 0;
  544         r->needs_data = FALSE;
  545         r->should_clean = FALSE;
  546         r->should_flush = FALSE;
  547         r->desired_access = desired_access;
  548         r->offset = offset;
  549         r->next_ne = 0;
  550         r->next_eq = 0;
  551         m_svm_request(mobj, r);
  552         return KERN_SUCCESS;
  553 }
  554 
  555 m_svm_data_write(mobj, kobj, offset, data, length)
  556         xmm_obj_t mobj;
  557         xmm_obj_t kobj;
  558         vm_offset_t offset;
  559         vm_offset_t data;
  560         vm_size_t length;
  561 {
  562         request_t r;
  563 
  564 #ifdef  lint
  565         M_DATA_WRITE(mobj, kobj, offset, data, length);
  566 #endif  lint
  567 #if     USE_XMM_BUFFER
  568         assert(mobj->class == &msvm_class);
  569         assert(kobj->class == &ksvm_class);
  570         /* make sanity checks */
  571         r = m_svm_lookup_request(mobj, offset);
  572         if (r == REQUEST_NULL || ! r->is_kernel) {
  573                 /*
  574                  * If there is no request, then this is an unsolicited
  575                  * pageout. We don't want to buffer this, since no one
  576                  * wants it.
  577                  *
  578                  * If this is not a kernel request, then it is a pager
  579                  * request, and thus the pager wants this page. We
  580                  * don't want to buffer the page in this case either.
  581                  */
  582                 return M_DATA_WRITE(mobj, mobj, offset, data, length);
  583         } else {
  584                 /*
  585                  * To avoid deadlock, pager requests have priority.
  586                  * Thus, if first request is a kernel, then all are.
  587                  * Therefore this pageout is wanted by kernels and
  588                  * not by the memory manager. This is case in which
  589                  * we want to buffer the page.
  590                  */
  591                 return M_BUFFERED_DATA_WRITE(mobj, mobj, offset, data, length);
  592         }
  593 #else   USE_XMM_BUFFER
  594         return M_DATA_WRITE(mobj, mobj, offset, data, length);
  595 #endif  USE_XMM_BUFFER
  596 }
  597 
  598 m_svm_do_lock_request(k, should_clean, should_flush, lock_value, r, mobj)
  599         xmm_obj_t k;
  600         boolean_t should_clean;
  601         boolean_t should_flush;
  602         vm_prot_t lock_value;
  603         request_t r;
  604         xmm_obj_t mobj;
  605 
  606 {
  607         kern_return_t kr;
  608         xmm_reply_t reply;
  609 
  610         xmm_obj_reference(mobj);
  611         kr = xmm_reply_allocate(k, (ipc_port_t) mobj, XMM_SVM_REPLY, &reply);
  612         if (kr != KERN_SUCCESS) {
  613                 panic("m_svm_do_lock_request: xmm_reply_allocate: %d\n", kr);
  614         }
  615         K_LOCK_REQUEST(k, r->offset, PAGE_SIZE, should_clean, should_flush,
  616                        lock_value, reply);
  617 }       
  618 
  619 m_svm_lock_completed(reply, offset, length)
  620         xmm_reply_t reply;
  621         vm_offset_t offset;
  622         vm_size_t length;
  623 {
  624         request_t r;
  625         xmm_obj_t mobj;
  626 
  627 #ifdef  lint
  628         M_LOCK_COMPLETED(reply, offset, length);
  629 #endif  lint
  630         /* XXX should make sanity checks */
  631         /* XXX should store r in reply */
  632         assert(reply->reply_to_type == XMM_SVM_REPLY);
  633         mobj = (xmm_obj_t) reply->reply_to;
  634         xmm_reply_deallocate(reply);
  635         assert(mobj->class == &msvm_class);
  636         r = m_svm_lookup_request(mobj, offset);
  637         if (r == REQUEST_NULL) {
  638                 panic("m_svm_lock_completed: missing request");
  639         }
  640         if (--r->k_count == 0 && r->m_count == 0) {
  641                 m_svm_satisfy_request(mobj, r, DATA_NONE);
  642         }
  643         xmm_obj_release(mobj);  /* reference obtained by do_lock_request */
  644         return KERN_SUCCESS;
  645 }
  646 
  647 k_svm_data_supply(mobj, offset, data, length, lock_value, precious, reply)
  648         xmm_obj_t mobj;
  649         vm_offset_t offset;
  650         vm_offset_t data;
  651         vm_size_t length;
  652         vm_prot_t lock_value;
  653         boolean_t precious;
  654         xmm_reply_t reply;
  655 {
  656         request_t r;
  657 
  658 #ifdef  lint
  659         K_DATA_SUPPLY(mobj, offset, data, length, lock_value, precious, reply);
  660 #endif  lint
  661         assert(mobj->class == &msvm_class);
  662         /* make sanity checks */
  663 
  664         if (precious) {
  665                 panic("k_svm_data_supply: precious");
  666         }
  667         if (reply != XMM_REPLY_NULL) {
  668                 panic("k_svm_data_supply: reply");
  669         }
  670 
  671         /*
  672          * XXX what do we do if this restricts access???
  673          * XXX should probably do whatever lock_request does.
  674          */
  675         if (lock_value & ~MOBJ->lock[atop(offset)]) {
  676                 printf("XXX data_supply: lock=0x%x -> 0x%x\n",
  677                        MOBJ->lock[atop(offset)], lock_value);
  678         }
  679 
  680         MOBJ->lock[atop(offset)] = lock_value;
  681 
  682         r = m_svm_lookup_request(mobj, offset);
  683         if (r == REQUEST_NULL) {
  684                 printf("how strange, data_supply for nothing!\n");
  685                 return KERN_FAILURE;
  686         }
  687         if (--r->m_count == 0 && r->k_count == 0) {
  688                 m_svm_satisfy_request(mobj, r, data);
  689         } else {
  690                 printf("how strange, data provided but still other things\n");
  691                 return KERN_FAILURE;
  692         }
  693         return KERN_SUCCESS;
  694 }
  695 
  696 k_svm_data_unavailable(mobj, offset, length)
  697         xmm_obj_t mobj;
  698         vm_offset_t offset;
  699         vm_size_t length;
  700 {
  701         request_t r;
  702 
  703 #ifdef  lint
  704         K_DATA_UNAVAILABLE(mobj, offset, length);
  705 #endif  lint
  706         assert(mobj->class == &msvm_class);
  707         /* make sanity checks */
  708 
  709         /* XXX is this absolutely correct? */
  710         MOBJ->lock[atop(offset)] = VM_PROT_NONE;
  711 
  712         r = m_svm_lookup_request(mobj, offset);
  713         if (r == REQUEST_NULL) {
  714                 printf("how strange, data_unavailable for nothing!\n");
  715                 return KERN_FAILURE;
  716         }
  717         if (--r->m_count == 0 && r->k_count == 0) {
  718                 m_svm_satisfy_request(mobj, r, DATA_UNAVAILABLE);
  719         }
  720         return KERN_SUCCESS;
  721 }
  722 
  723 k_svm_lock_request(mobj, offset, length, should_clean, should_flush,
  724                    lock_value, reply)
  725         xmm_obj_t mobj;
  726         vm_offset_t offset;
  727         vm_size_t length;
  728         boolean_t should_clean;
  729         boolean_t should_flush;
  730         vm_prot_t lock_value;
  731         xmm_reply_t reply;
  732 {
  733         request_t r, r0;
  734 
  735 #ifdef  lint
  736         K_LOCK_REQUEST(mobj, offset, length, should_clean, should_flush,
  737                        lock_value, reply);
  738 #endif  lint
  739         assert(mobj->class == &msvm_class);
  740         dprintf("k_svm_lock_request!\n");
  741 
  742         if (length != PAGE_SIZE) {
  743                 if (length > PAGE_SIZE) {
  744                         panic("k_svm_lock_request: %d > PAGE_SIZE\n", length);
  745                 }
  746                 length = PAGE_SIZE;
  747         }
  748         if((unsigned long)atop(offset) >= MOBJ->num_pages) {
  749                 m_svm_extend(mobj, atop(offset) + 1);
  750         }
  751 
  752         r0 = m_svm_lookup_request(mobj, offset);
  753 
  754         /*
  755          * If we are not increasing lock value, flushing, or cleaning,
  756          * then we set simply set lock value, without creating a request.
  757          * However, we do need to see whether we can satisfy a kernel request.
  758          */
  759         if (! (lock_value & ~MOBJ->lock[atop(offset)])
  760             && ! should_clean && ! should_flush) {
  761                 MOBJ->lock[atop(offset)] = lock_value;
  762                 if (r0
  763                     && r0->is_kernel
  764                     && !(lock_value & r0->desired_access)
  765                     && r0->m_count > 0 && --r0->m_count == 0
  766                     && r0->k_count == 0) {
  767                         m_svm_satisfy_kernel_request(mobj, r0, DATA_NONE);
  768                 }
  769                 return KERN_SUCCESS;
  770         }
  771 
  772         /*
  773          * We need to submit a request. Create the request.
  774          */
  775         dprintf("** lock_request: submitting request\n");
  776         r = (request_t) zalloc(xmm_svm_request_zone);
  777         r->who.reply = reply;
  778         r->is_kernel = FALSE;
  779         r->m_count = 0;
  780         r->k_count = 0;
  781         r->needs_data = FALSE;
  782         r->should_clean = should_clean;
  783         r->should_flush = should_flush;
  784         r->lock_value = lock_value;
  785         r->offset = offset;
  786         r->next_ne = 0;
  787         r->next_eq = 0;
  788 
  789         /*
  790          * If there are no requests, then add new request and process it.
  791          */
  792         if (! r0) {
  793                 dprintf("- no reqs\n");
  794                 (void) m_svm_add_request(mobj, r); /* will be true */
  795                 (void) m_svm_process_pager_request(mobj, r);
  796                 return KERN_SUCCESS;
  797         }
  798 
  799         /*
  800          * If first request is pager request, then place new request
  801          * after all pager requests, but before any kernel requests.
  802          */
  803         if (! r0->is_kernel) {
  804                 dprintf("- only pager reqs\n");
  805                 while (r0->next_eq && ! r0->next_eq->is_kernel) {
  806                         r0 = r0->next_eq;
  807                 }
  808                 r->next_eq = r0->next_eq;
  809                 r0->next_eq = r;
  810                 return KERN_SUCCESS;
  811         }
  812 
  813         /*
  814          * First request is a kernel request.
  815          * To avoid deadlock, pager requests have priority.
  816          * Thus, if first request is a kernel, then all are.
  817          * In this case, we place new request at the top
  818          * (before all kernel requests) and process it immediately.
  819          *
  820          * XXXO
  821          * This is slightly pessimal because we just ignore any
  822          * request that the kernel request made to the other kernels.
  823          */
  824         if (r0->is_kernel) {
  825                 request_t *rp;
  826                 for (rp = &MOBJ->request; r0 = *rp; rp = &r0->next_ne) {
  827                         if (r0->offset == offset) {
  828                                 break;
  829                         }
  830                 }
  831                 if (! r0) {
  832                         printf("oops, oh my\n");
  833                         return KERN_FAILURE;
  834                 }
  835                 *rp = r;
  836                 r->next_ne = r0->next_ne;
  837                 r->next_eq = r0;
  838                 if (r0->m_count) {
  839                         printf("This could get confusing\n");
  840                 }
  841                 r->m_count = r0->m_count;
  842                 r->k_count = r0->k_count;
  843                 r0->m_count = 0;
  844                 r0->k_count = 0;        /* XXXO */
  845                 (void) m_svm_process_pager_request(mobj, r);
  846                 return KERN_SUCCESS;
  847         }
  848         return KERN_SUCCESS;
  849 }
  850 
  851 k_svm_data_error(mobj, offset, length, error_value)
  852         xmm_obj_t mobj;
  853         vm_offset_t offset;
  854         vm_size_t length;
  855         kern_return_t error_value;
  856 {
  857         request_t r;
  858 
  859 #ifdef  lint
  860         K_DATA_ERROR(mobj, offset, length, error_value);
  861 #endif  lint
  862         assert(mobj->class == &msvm_class);
  863         /* make sanity checks */
  864 
  865         /* XXX certainly questionable! */
  866         MOBJ->lock[atop(offset)] = VM_PROT_NONE;
  867 
  868         r = m_svm_lookup_request(mobj, offset);
  869         if (r == REQUEST_NULL) {
  870                 printf("how strange, data_unavailable for nothing!\n");
  871                 return KERN_FAILURE;
  872         }
  873         if (--r->m_count == 0 && r->k_count == 0) {
  874                 m_svm_satisfy_request(mobj, r, DATA_ERROR);
  875         }
  876         /* XXX should keep and return error_value */
  877         printf("k_svm_data_error: Gack(%d)!\n", error_value);
  878         return KERN_SUCCESS;
  879 }
  880 
  881 k_svm_set_ready(mobj, object_ready, may_cache, copy_strategy, use_old_pageout,
  882                 memory_object_name, reply)
  883         xmm_obj_t mobj;
  884         boolean_t object_ready;
  885         boolean_t may_cache;
  886         memory_object_copy_strategy_t copy_strategy;
  887         boolean_t use_old_pageout;
  888         ipc_port_t memory_object_name;
  889         xmm_reply_t reply;
  890 {
  891         xmm_obj_t kobj;
  892 
  893 #ifdef  lint
  894         K_SET_READY(mobj, object_ready, may_cache, copy_strategy,
  895                     use_old_pageout, memory_object_name, reply);
  896 #endif  lint
  897         assert(mobj->class == &msvm_class);
  898         MOBJ->may_cache = may_cache;
  899 
  900         /*
  901          * Compute our copy strategy based on that of underlying pager.
  902          *
  903          * XXX
  904          * Right now, we always use COPY_NONE, except if underlying pager
  905          * specifies COPY_TEMPORARY, in which case we use COPY_DELAY.
  906          * What this means is that we don't have any intelligent way
  907          * of dealing with sharing, but that if it's a temporary object
  908          * (either a vm internal object, created via memory_object_create,
  909          * or an xmm internal object, created via norma_copy_create),
  910          * then we don't expect any sharing, so we can use a lazy copy.
  911          *
  912          * THIS WILL BREAK IN ITS CURRENT FORM WHEN WE ENABLE VM_INHERIT_SHARE
  913          */
  914         if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
  915                 MOBJ->copy_strategy = MEMORY_OBJECT_COPY_TEMPORARY;
  916         } else {
  917                 MOBJ->copy_strategy = MEMORY_OBJECT_COPY_NONE;
  918         }
  919 
  920         if (MOBJ->memory_object_name == IP_NULL) {
  921                 MOBJ->memory_object_name = memory_object_name;
  922         } else {
  923                 assert(MOBJ->memory_object_name == memory_object_name);
  924                 ipc_port_release_send(memory_object_name);
  925         }
  926         if (object_ready) {
  927                 MOBJ->state = MOBJ_STATE_READY;
  928         } else if (MOBJ->state == MOBJ_STATE_READY) {
  929                 /* XXX What should we do here? */
  930                 printf("k_svm_set_ready: ready -> not ready ?\n");
  931         }
  932         if (! use_old_pageout) {
  933                 panic("k_svm_set_ready: use_old_pageout=FALSE!");
  934         }
  935         if (reply != XMM_REPLY_NULL) {
  936                 panic("k_svm_set_ready: reply!\n");
  937         }
  938 
  939         /*
  940          * If there are multiple kernels, then we had better be
  941          * using MEMORY_OBJECT_COPY_NONE, at least until we get
  942          * trickier about changing copy strategies.
  943          */
  944         if (MOBJ->kobj_list && ((struct kobj *)MOBJ->kobj_list)->next &&
  945             MOBJ->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
  946                 panic("losing big on multiple copies of temporary object");
  947         }
  948 
  949         /*
  950          * Let all kernels know we're ready
  951          */
  952         for (kobj = MOBJ->kobj_list; kobj; kobj = KOBJ->next) {
  953                 assert(MOBJ->memory_object_name != IP_NULL);
  954                 ipc_port_copy_send(MOBJ->memory_object_name);
  955                 K_SET_READY(kobj, object_ready, may_cache, MOBJ->copy_strategy,
  956                             USE_OLD_PAGEOUT_TRUE, MOBJ->memory_object_name,
  957                             XMM_REPLY_NULL);
  958         }
  959         return KERN_SUCCESS;
  960 }
  961 
  962 k_svm_destroy(mobj, reason)
  963         xmm_obj_t mobj;
  964         kern_return_t reason;
  965 {
  966 #ifdef  lint
  967         K_DESTROY(mobj, reason);
  968 #endif  lint
  969         assert(mobj->class == &msvm_class);
  970         printf("k_svm_destroy: Gack!\n");
  971 }
  972 
  973 /*
  974  * Place request at end of appropriate queue.
  975  * Return TRUE if first request in queue for this page.
  976  */
  977 boolean_t
  978 m_svm_add_request(mobj, r0)
  979         xmm_obj_t mobj;
  980         request_t r0;
  981 {
  982         request_t r, *rp;
  983 
  984         assert(mobj->class == &msvm_class);
  985         dprintf("m_svm_add_request(0x%x, 0x%x)\n", mobj, r0);
  986         for (rp = &MOBJ->request; r = *rp; rp = &r->next_ne) {
  987         dprintf("m_svm_add_request: 0x%x 0x%x\n", r, r0);
  988                 if (r->offset == r0->offset) {
  989                         for (; r->next_eq; r = r->next_eq) {
  990                                 continue;
  991                         }
  992                         r->next_eq = r0;
  993                         return FALSE;
  994                 }
  995         }
  996         r0->next_ne = MOBJ->request;
  997         MOBJ->request = r0;
  998         return TRUE;
  999 }
 1000 
 1001 /*
 1002  * Look for first request for given offset.
 1003  * If we find such a request, move it to front of list
 1004  * since we expect to remove it soon.
 1005  */
 1006 request_t
 1007 m_svm_lookup_request(mobj, offset)
 1008         xmm_obj_t mobj;
 1009         vm_offset_t offset;
 1010 {
 1011         request_t r, *rp;
 1012 
 1013         assert(mobj->class == &msvm_class);
 1014         for (rp = &MOBJ->request; r = *rp; rp = &r->next_ne) {
 1015                 if (r->offset == offset) {
 1016                         *rp = r->next_ne;
 1017                         r->next_ne = MOBJ->request;
 1018                         MOBJ->request = r;
 1019                         return r;
 1020                 }
 1021         }
 1022         return REQUEST_NULL;
 1023 }
 1024 
 1025 /*
 1026  * Remove first request for given offset.
 1027  * Return next request for same offset, if any.
 1028  */
 1029 request_t
 1030 m_svm_remove_request(mobj, offset)
 1031         xmm_obj_t mobj;
 1032         vm_offset_t offset;
 1033 {
 1034         request_t r, *rp;
 1035 
 1036         assert(mobj->class == &msvm_class);
 1037         for (rp = &MOBJ->request; r = *rp; rp = &r->next_ne) {
 1038                 if (r->offset == offset) {
 1039                         if (r->next_eq) {
 1040                                 r = r->next_eq;
 1041                                 r->next_ne = (*rp)->next_ne;
 1042                                 *rp = r;
 1043                                 return r;
 1044                         } else {
 1045                                 *rp = r->next_ne;
 1046                                 return REQUEST_NULL;
 1047                         }
 1048                 }
 1049         }
 1050         printf("m_svm_remove_request: request not found!\n");
 1051         return REQUEST_NULL;
 1052 }
 1053 
 1054 /*
 1055  * All the real work takes place in m_svm_process_request and
 1056  * m_svm_satisfy_request.
 1057  *
 1058  * m_svm_process_request takes a request for a page that does not already have
 1059  * outstanding requests and generates the appropriate K/M_ requests.
 1060  * If, after generating all apropriate K/M_ requests, there are no outstanding
 1061  * K/M_ requests (either because no K/M_ requests were required, or because
 1062  * they were all satisfied by the time we check), we call
 1063  * m_svm_satisfy_request.
 1064  *
 1065  * m_svm_satisfy_request takes a request for a page that has had its last
 1066  * outstanding K/M_ request satisfied, and sends the appropriate K/M_ reply
 1067  * to the entity (kernel or memory manager) that generated the request. If more
 1068  * requests follow the request being satisfied, m_svm_satisfy_request calls
 1069  * m_svm_process_request on the first such request.
 1070  */
 1071 
 1072 /*
 1073  * This routine does not worry about lock[page]; m_svm_satisfy_request does.
 1074  */
 1075 void
 1076 m_svm_process_kernel_request(mobj, r)
 1077         xmm_obj_t mobj;
 1078         request_t r;
 1079 {
 1080         int page;
 1081         xmm_obj_t kobj, k;
 1082 
 1083         assert(mobj->class == &msvm_class);
 1084         page = atop(r->offset);
 1085         kobj = r->who.kobj;
 1086 
 1087         /*
 1088          * If requesting kernel wants to write, we must flush and lock
 1089          * all kernels (either readers or a single writer).
 1090          */
 1091         if (r->desired_access & VM_PROT_WRITE) {
 1092                 boolean_t writing = !! (MOBJ->prot[page] & VM_PROT_WRITE);
 1093                 MOBJ->prot[page] = VM_PROT_NONE;
 1094                 r->k_count++;
 1095                 for (k = MOBJ->kobj_list; k; k = K->next) {
 1096                         if (k == kobj || K->prot[page] == VM_PROT_NONE) {
 1097                                 continue;
 1098                         }
 1099                         r->k_count++;
 1100                         K->prot[page] = VM_PROT_NONE;
 1101                         m_svm_do_lock_request(k, writing, TRUE, VM_PROT_ALL,
 1102                                               r, mobj);
 1103                         if (writing) {
 1104                                 break;
 1105                         }
 1106                 }
 1107                 if (--r->k_count == 0 && r->m_count == 0) {
 1108                         m_svm_satisfy_kernel_request(mobj, r, DATA_NONE);
 1109                 }
 1110                 return;
 1111         }
 1112 
 1113         /*
 1114          * If requesting kernel wants to read, but the page is being written,
 1115          * then we must clean and lock the writer.
 1116          */
 1117         if (r->desired_access && (MOBJ->prot[page] & VM_PROT_WRITE)) {
 1118                 if (KOBJ->prot[page] & VM_PROT_WRITE) {
 1119                         /*
 1120                          * What could the writer be doing asking us for read?
 1121                          *
 1122                          * This can happen if page was cleaned and flushed,
 1123                          * or (more commonly?) cleaned and then paged out.
 1124                          *
 1125                          * Should we give this kernel read (more concurrency)
 1126                          * or write (on the assumption that he will want
 1127                          * to write again)?
 1128                          *
 1129                          * For now, we just give him read.
 1130                          * We have to correct our notion of how this page is
 1131                          * used. Note that there is no problem giving
 1132                          * him either read or write, since there is nobody
 1133                          * else to evict.
 1134                          */
 1135                         KOBJ->prot[page] = r->desired_access;
 1136                         MOBJ->prot[page] = r->desired_access;
 1137                         m_svm_satisfy_kernel_request(mobj, r, DATA_NONE);
 1138                         return;
 1139                 }
 1140                 for (k = MOBJ->kobj_list; k; k = K->next) {
 1141                         if (K->prot[page] & VM_PROT_WRITE) {
 1142                                 break;
 1143                         }
 1144                 }
 1145                 if (k == XMM_OBJ_NULL) {
 1146                         printf("x lost writer!\n");
 1147                         return;
 1148                 }
 1149                 MOBJ->prot[page] = VM_PROT_READ;
 1150                 K->prot[page] = VM_PROT_READ;
 1151                 r->k_count++;
 1152                 m_svm_do_lock_request(k, TRUE, FALSE, VM_PROT_WRITE, r, mobj);
 1153                 return;
 1154         }
 1155 
 1156         /*
 1157          * No current kernel use conflicts with requesting kernel's
 1158          * desired use. Call m_svm_satisfy_kernel_request, which
 1159          * will handle any requests that need to be made of the pager.
 1160          */
 1161         m_svm_satisfy_kernel_request(mobj, r, DATA_NONE);
 1162 }
 1163 
 1164 void
 1165 m_svm_process_pager_request(mobj, r)
 1166         xmm_obj_t mobj;
 1167         request_t r;
 1168 {
 1169         int page;
 1170         xmm_obj_t k;
 1171 
 1172         assert(mobj->class == &msvm_class);
 1173         page = atop(r->offset);
 1174 
 1175         /*
 1176          * Locking against non-write implies locking all access.
 1177          * Is this a bug, or universal truth?
 1178          * Beware: code below and elsewhere depends on this mapping.
 1179          */
 1180         if (r->lock_value & ~VM_PROT_WRITE) {
 1181                 r->lock_value = VM_PROT_ALL;
 1182         }
 1183 
 1184         /*
 1185          * XXX we can't yet represent
 1186          *      (lock=write but dirty)
 1187          * or
 1188          *      (lock=all but resident)
 1189          *
 1190          * Thus we force lock=write into clean,
 1191          * and lock=all into flush.
 1192          */
 1193         if (r->lock_value == VM_PROT_WRITE) {
 1194                 r->should_clean = TRUE;
 1195         } else if (r->lock_value) {
 1196                 r->should_clean = TRUE;
 1197                 r->should_flush = TRUE;
 1198         }
 1199 
 1200         /*
 1201          * If we need to flush, or lock all access, then we must talk
 1202          * to all kernels.
 1203          */
 1204         if (r->should_flush || r->lock_value == VM_PROT_ALL) {
 1205                 r->k_count++;
 1206                 MOBJ->prot[page] &= ~r->lock_value;
 1207                 for (k = MOBJ->kobj_list; k; k = K->next) {
 1208                         if (K->prot[page] == VM_PROT_NONE) {
 1209                                 continue;
 1210                         }
 1211                         r->k_count++;
 1212                         K->prot[page] &= ~r->lock_value;
 1213                         m_svm_do_lock_request(k, r->should_clean,
 1214                                               r->should_flush,
 1215                                               r->lock_value, r, mobj);
 1216                 }
 1217                 if (--r->k_count == 0 && r->m_count == 0) {
 1218                         m_svm_satisfy_request(mobj, r, DATA_NONE);
 1219                 }
 1220                 return;
 1221         }
 1222 
 1223         /*
 1224          * If we need to clean, or lock write access, and there is in fact
 1225          * a writer, then we must talk to that writer.
 1226          */
 1227         if ((r->should_clean || r->lock_value == VM_PROT_WRITE)
 1228             && (MOBJ->prot[page] & VM_PROT_WRITE)) {
 1229                 MOBJ->prot[page] &= ~r->lock_value;
 1230                 for (k = MOBJ->kobj_list; k; k = K->next) {
 1231                         if (K->prot[page] & VM_PROT_WRITE) {
 1232                                 break;
 1233                         }
 1234                 }
 1235                 if (k == XMM_OBJ_NULL) {
 1236                         printf("y lost writer!\n");
 1237                         return;
 1238                 }
 1239                 K->prot[page] &= ~r->lock_value;
 1240                 r->k_count++;
 1241                 m_svm_do_lock_request(k, r->should_clean, FALSE, r->lock_value,
 1242                                       r, mobj);
 1243                 return;
 1244         }
 1245 
 1246         /*
 1247          * We didn't need to flush, clean, or lock.
 1248          */
 1249         m_svm_satisfy_pager_request(mobj, r);
 1250 }
 1251 
 1252 void
 1253 m_svm_process_request(mobj, r)
 1254         xmm_obj_t mobj;
 1255         request_t r;
 1256 {
 1257         assert(mobj->class == &msvm_class);
 1258         if (r->is_kernel) {
 1259                 m_svm_process_kernel_request(mobj, r);
 1260         } else {
 1261                 m_svm_process_pager_request(mobj, r);
 1262         }
 1263 }
 1264 
 1265 void
 1266 m_svm_satisfy_kernel_request(mobj, r, data)
 1267         xmm_obj_t mobj;
 1268         request_t r;
 1269         vm_offset_t data;
 1270 {
 1271         xmm_obj_t kobj;
 1272         request_t r_next;
 1273 
 1274         kobj = r->who.kobj;
 1275         assert(mobj->class == &msvm_class);
 1276         assert(r->is_kernel);
 1277         assert(r->k_count == 0);
 1278         assert(r->m_count == 0);
 1279 
 1280         /*
 1281          * If we need an unlock or data from the pager, make the request now.
 1282          */
 1283         if ((MOBJ->lock[atop(r->offset)] & r->desired_access)
 1284             || (r->needs_data && data == DATA_NONE)) {
 1285                 if (data) {
 1286                         M_DATA_WRITE(mobj, mobj, r->offset, data,
 1287                                      PAGE_SIZE);
 1288                 }
 1289                 r->m_count++;
 1290                 if (r->needs_data) {
 1291                         M_DATA_REQUEST(mobj, mobj, r->offset, PAGE_SIZE,
 1292                                        r->desired_access);
 1293                 } else {
 1294                         M_DATA_UNLOCK(mobj, mobj, r->offset, PAGE_SIZE,
 1295                                       r->desired_access);
 1296                 }
 1297                 return;
 1298         }
 1299 
 1300         /*
 1301          * We have everything we need. Satisfy the kernel request.
 1302          */
 1303         if (! r->needs_data) {
 1304                 K_LOCK_REQUEST(r->who.kobj, r->offset, PAGE_SIZE, FALSE,
 1305                                FALSE, r->desired_access ^ VM_PROT_ALL,
 1306                                XMM_REPLY_NULL);
 1307         } else if (data == DATA_UNAVAILABLE) {
 1308                 K_DATA_UNAVAILABLE(r->who.kobj, r->offset, PAGE_SIZE);
 1309                 r->desired_access = VM_PROT_ALL;        /* XXX */
 1310         } else if (data == DATA_ERROR) {
 1311                 K_DATA_ERROR(r->who.kobj, r->offset, PAGE_SIZE,
 1312                              KERN_FAILURE);
 1313                 /* XXX start killing object? */
 1314         } else {
 1315                 K_DATA_SUPPLY(r->who.kobj, r->offset, data, PAGE_SIZE,
 1316                               r->desired_access ^ VM_PROT_ALL, FALSE,
 1317                               XMM_REPLY_NULL);
 1318         }
 1319 
 1320         /*
 1321          * Update KOBJ->prot[] and MOBJ->prot[] values.
 1322          */
 1323         MOBJ->prot[atop(r->offset)] = r->desired_access;
 1324         KOBJ->prot[atop(r->offset)] = r->desired_access;
 1325 
 1326         /*
 1327          * Remove and free request.
 1328          */
 1329         r_next = m_svm_remove_request(mobj, r->offset);
 1330         zfree(xmm_svm_request_zone, (vm_offset_t) r);
 1331 
 1332         /*
 1333          * If there is another request, process it now.
 1334          */
 1335         if (r_next) {
 1336                 m_svm_process_request(mobj, r_next);
 1337         }
 1338 }
 1339 
 1340 void
 1341 m_svm_satisfy_pager_request(mobj, r)
 1342         xmm_obj_t mobj;
 1343         request_t r;
 1344 {
 1345         request_t r_next;
 1346 
 1347         assert(mobj->class == &msvm_class);
 1348         assert(! r->is_kernel);
 1349         assert(r->k_count == 0);
 1350         assert(r->m_count == 0);
 1351 
 1352 #if     USE_XMM_BUFFER
 1353         /*
 1354          * Flush or clean any buffered data if necessary.
 1355          */
 1356         if (r->should_flush || r->should_clean) {
 1357                 M_UNBUFFER_DATA(mobj, r->offset, PAGE_SIZE,
 1358                                 r->should_clean, r->should_flush);
 1359         }
 1360 #endif  USE_XMM_BUFFER
 1361 
 1362         /*
 1363          * We have everything we need. Satisfy the pager request.
 1364          */
 1365         if (r->who.reply != XMM_REPLY_NULL) {
 1366                 M_LOCK_COMPLETED(r->who.reply, r->offset, PAGE_SIZE);
 1367         }
 1368 
 1369         /*
 1370          * Update MOBJ->lock[] value.
 1371          */
 1372         MOBJ->lock[atop(r->offset)] = r->lock_value;
 1373 
 1374         /*
 1375          * Remove and free request.
 1376          */
 1377         r_next = m_svm_remove_request(mobj, r->offset);
 1378         zfree(xmm_svm_request_zone, (vm_offset_t) r);
 1379 
 1380         /*
 1381          * If there is another request, process it now.
 1382          */
 1383         if (r_next) {
 1384                 m_svm_process_request(mobj, r_next);
 1385         }
 1386 }
 1387 
 1388 void
 1389 m_svm_satisfy_request(mobj, r, data)
 1390         xmm_obj_t mobj;
 1391         request_t r;
 1392         vm_offset_t data;
 1393 {
 1394         assert(mobj->class == &msvm_class);
 1395         if (r->is_kernel) {
 1396                 m_svm_satisfy_kernel_request(mobj, r, data);
 1397         } else {
 1398                 m_svm_satisfy_pager_request(mobj, r);
 1399         }
 1400 }
 1401 
 1402 xmm_svm_init()
 1403 {
 1404         xmm_svm_request_zone = zinit(sizeof(struct request), 512*1024,
 1405                                      sizeof(struct request), FALSE,
 1406                                      "xmm.svm.request");
 1407 }
 1408 
 1409 #include <sys/varargs.h>
 1410 
 1411 int xmm_svm_debug = 0;
 1412 
 1413 /* VARARGS */
 1414 xmm_svm_dprintf(fmt, va_alist)
 1415         char *fmt;
 1416         va_dcl
 1417 {
 1418         va_list listp;
 1419 
 1420         if (xmm_svm_debug) {
 1421                 va_start(listp);
 1422                 printf(fmt, &listp);
 1423                 va_end(listp);
 1424         }
 1425 }

Cache object: 2c8ea6024ebfddc9080bc4c47999c233


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.