1 /*
2 * Mach Operating System
3 * Copyright (c) 1992 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie the
24 * rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: xmm_object.c,v $
29 * Revision 2.2 92/03/10 16:29:28 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:51:37 jsb]
32 *
33 * Revision 2.1.2.1 92/02/21 11:27:52 jsb
34 * Use xmm_object_destroy in xmm_object_notify to release mobj and
35 * deallocate xmm object port. Added hack to get rid of send-once
36 * right to xmm object port before we deallocate the port, since
37 * ipc_port_release_sonce won't do so after we deallocate it.
38 * [92/02/20 14:00:36 jsb]
39 *
40 * Fixed reference counting on xmm objs. A reference is now held by
41 * xmm object, which is copied along with send right to xmm object.
42 * [92/02/18 17:15:33 jsb]
43 *
44 * Lots of changes. First reasonably working version.
45 * [92/02/16 15:58:03 jsb]
46 *
47 * Added missing line to xmm_memory_manager_export.
48 * [92/02/12 05:58:07 jsb]
49 *
50 * Added xmm_object_allocate routine to replace replicated xmm object
51 * creation and initialization logic.
52 * Added xmm_object_by_memory_object_release which disassociates
53 * xmm object from memory object, possibly resulting in the deallocation
54 * of mobj associated with xmm object (via no-senders).
55 * Moved all responsibility for tearing down stack in case of xmm
56 * object creation race to no-senders notification handler.
57 * [92/02/11 18:38:36 jsb]
58 *
59 * Updated explanatory text. Fixed send right management. Added
60 * xmm_memory_manager_export routine for xmm-internal memory managers.
61 * [92/02/11 11:24:42 jsb]
62 *
63 * Added xmm_object_notify.
64 * [92/02/10 17:27:44 jsb]
65 *
66 * First checkin.
67 * [92/02/10 17:05:02 jsb]
68 *
69 */
70 /*
71 * File: norma/xmm_object.c
72 * Author: Joseph S. Barrera III
73 * Date: 1991
74 *
75 * Routines to manage xmm object to memory object association.
76 */
77
78 #include <norma/xmm_obj.h>
79 #include <norma/xmm_server_rename.h>
80 #include <ipc/ipc_space.h>
81 #include <ipc/ipc_port.h>
82 #include <vm/memory_object.h>
83 #include <vm/vm_fault.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <mach/notify.h>
91
92 extern void xmm_svm_destroy();
93
94 /*
95 * The structure here is:
96 *
97 * memory object -> xmm object [ send right ]
98 * xmm object -> top of mobj stack [ xmm obj ref ]
99 * bottom of mobj stack -> memory object [ send right ]
100 *
101 * The xmm object and mobj stack are colocated. They are originally created
102 * on the same node as the memory object, so that we can atomically set
103 * memory_object->ip_norma_xmm_object on principal port for memory object,
104 * which is the central synchronizing point for creating and finding an
105 * mobj stack for a memory object.
106 *
107 * After the stack is created, the memory object may migrated away from
108 * the stack. The port migration mechanism is responsible for maintaining
109 * the association between the memory object port and the xmm object port.
110 * (In the current implementation, this means moving the send right to
111 * xmm object and setting it as ip_norma_xmm_object in the new principal
112 * for memory object.)
113 *
114 * This doesn't seem right... of course the real basic problem is
115 * we are designing around a couple unclean things:
116 *
117 * 1. pointers from ports to objects
118 * 2. transparent port interposition
119 *
120 * So I guess it's natural that if an object moves, and we've associated
121 * data with that object, and that object doesn't know about it, then
122 * we have to migrate that data ourselves. I guess a netmsgserver could
123 * export a kobject (port data) interface, in which case it would know
124 * to migrate the data when the port was migrated.
125 *
126 * Right now the policy is to create the layer at the current home of the
127 * memory object. We could create it at the home of the first mapper.
128 * This might make sense if we expect to often not need to talk to the
129 * pager from the svm layer, for example in shadowing cases.
130 * We might even want to migrate the layer. There's a lot of flexibility
131 * here now that memory object has a port pointing to the svm layer.
132 *
133 * We could get rid of ip_norma_xmm_object and replace it with a hash table
134 * (and a set of routines to manipulate it). The advantage would be the
135 * space savings of eliminating the field, which for most ports will be
136 * unused. Note that port migration must still migrate xmm object assocication.
137 */
138
139 xmm_object_set(memory_object, xmm_object, make_copy)
140 ipc_port_t memory_object;
141 ipc_port_t xmm_object;
142 boolean_t make_copy;
143 {
144 assert(! IP_NORMA_IS_PROXY(xmm_object));
145 memory_object->ip_norma_xmm_object = ipc_port_make_send(xmm_object);
146 if (make_copy) {
147 assert(ip_kotype(xmm_object) == IKOT_XMM_OBJECT);
148 xmm_obj_reference((xmm_obj_t) xmm_object->ip_kobject);
149 memory_object->ip_norma_xmm_object_refs = 1;
150 ipc_port_copy_send(xmm_object);
151 } else {
152 memory_object->ip_norma_xmm_object_refs = 0;
153 }
154 }
155
156 ipc_port_t
157 xmm_object_copy(memory_object)
158 ipc_port_t memory_object;
159 {
160 register ipc_port_t xmm_object;
161
162 assert(! IP_NORMA_IS_PROXY(memory_object));
163 xmm_object = memory_object->ip_norma_xmm_object;
164 if (xmm_object == IP_NULL) {
165 return IP_NULL;
166 }
167 assert(ip_kotype(xmm_object) == IKOT_XMM_OBJECT);
168 xmm_obj_reference((xmm_obj_t) xmm_object->ip_kobject);
169 memory_object->ip_norma_xmm_object_refs++;
170 return ipc_port_copy_send(xmm_object);
171 }
172
173 void
174 xmm_object_release_local(memory_object)
175 ipc_port_t memory_object;
176 {
177 assert(! IP_NORMA_IS_PROXY(memory_object));
178 if (--memory_object->ip_norma_xmm_object_refs == 0) {
179 /*
180 * We use no-senders because it's snazzier, but we could
181 * use a call that did a move_send instead. The receiver
182 * would then deallocate the send and the no-senders
183 * notification would be done locally (if at all).
184 * Using no-senders might help deal with node failure
185 */
186 ipc_port_release_send(memory_object->ip_norma_xmm_object);
187 memory_object->ip_norma_xmm_object = IP_NULL;
188 }
189 }
190
191 /*
192 * Only called internally.
193 * Allocate an xmm_object port with a no-senders notification request.
194 * The xmm_object takes the mobj reference.
195 */
196 ipc_port_t
197 xmm_object_allocate(mobj)
198 xmm_obj_t mobj;
199 {
200 ipc_port_t xmm_object;
201 ipc_port_t old_nsrequest;
202
203 /*
204 * Create an xmm object port.
205 */
206 xmm_object = ipc_port_alloc_kernel();
207 if (xmm_object == IP_NULL) {
208 return IP_NULL;
209 }
210
211 /*
212 * Associate the xmm obj with the xmm object port.
213 * We keep the xmm obj reference returned by xmm_svm_create.
214 */
215 ipc_kobject_set(xmm_object, (ipc_kobject_t) mobj, IKOT_XMM_OBJECT);
216
217 /*
218 * Request a no-senders notification.
219 */
220 ipc_port_nsrequest(xmm_object, 1, ipc_port_make_sonce(xmm_object),
221 &old_nsrequest);
222 assert(old_nsrequest == IP_NULL);
223
224 /*
225 * Return the port.
226 */
227 return xmm_object;
228 }
229
230 /*
231 * Called when we lose a race to associate a newly created xmm object
232 * with a memory object. Also called by xmm_object_notify.
233 */
234 xmm_object_destroy(xmm_object, mobj)
235 ipc_port_t xmm_object;
236 xmm_obj_t mobj;
237 {
238 #if 666
239 /*
240 * XXX
241 * A temporary fix. ipc_port_release_sonce won't decr sorights
242 * on a dead port, and ipc_kobject.c calls ipc_port_release_sonce
243 * after we've killed it, and norma_port_remove won't happen
244 * until send-once rights drop to 0. The correct fix is probably
245 * to change ipc_port_release_sonce.
246 */
247 assert(xmm_object->ip_sorights > 0);
248 xmm_object->ip_sorights--;
249 #endif
250
251 /*
252 * Destroy xmm object port (and its no-senders notification request).
253 */
254 ipc_port_dealloc_kernel(xmm_object);
255
256 /*
257 * Lose reference to mobj, and explicitly destroy it.
258 */
259 xmm_obj_release(mobj);
260 xmm_svm_destroy(mobj);
261 }
262
263 /*
264 * Handle notifications. We only care about no-senders notifications.
265 */
266 boolean_t
267 xmm_object_notify(msg)
268 mach_msg_header_t *msg;
269 {
270 ipc_port_t xmm_object;
271 xmm_obj_t mobj;
272
273 /*
274 * Only process no-senders notifications.
275 */
276 if (msg->msgh_id != MACH_NOTIFY_NO_SENDERS) {
277 printf("xmm_object_notify: strange notification %d\n",
278 msg->msgh_id);
279 return FALSE;
280 }
281
282 /*
283 * Extract xmm_object port from notification message.
284 */
285 xmm_object = (ipc_port_t) msg->msgh_remote_port;
286
287 /*
288 * Get and disassociate mobj from xmm object port.
289 */
290 assert(ip_kotype(xmm_object) == IKOT_XMM_OBJECT);
291 mobj = (xmm_obj_t) xmm_object->ip_kobject;
292 ipc_kobject_set(xmm_object, IKO_NULL, IKOT_NONE);
293
294 /*
295 * Destroy xmm object port and mobj.
296 */
297 xmm_object_destroy(xmm_object, mobj);
298 return TRUE;
299 }
300
301 /*
302 * Called with memory_object locked. Unlocks memory_object.
303 */
304 ipc_port_t
305 xmm_object_by_memory_object_remote(memory_object)
306 ipc_port_t memory_object;
307 {
308 unsigned long node;
309 kern_return_t kr;
310 ipc_port_t xmm_object;
311
312 node = ipc_port_node(memory_object);
313 assert(node != node_self());
314 ip_unlock(memory_object);
315 kr = r_norma_xmm_object_by_memory_object(remote_host_priv(node),
316 memory_object,
317 &xmm_object);
318 assert(kr == KERN_SUCCESS);
319 return xmm_object;
320 }
321
322 /*
323 * Return send right for xmm object corresponding to memory object.
324 * This is to be consumed when using xmm object to set up init,
325 * either via move_send dest in proxy_init, or explicit deallocation
326 * in local case.
327 * Also returns one xmm_object ref, to be given to svm layer and
328 * released there upon termination via xmm_object_release.
329 * Also returns one xmm obj ref, to be consumed by xmm_obj_allocate
330 * in either _proxy_init or xmm_memory_object_init.
331 *
332 * Create xmm object if necessary.
333 * Memory object holds a send right to xmm object as well, which is released
334 * when xmm object refs drop to 0. No-senders then triggers
335 * svm deallocation.
336 */
337 ipc_port_t
338 xmm_object_by_memory_object(memory_object)
339 ipc_port_t memory_object;
340 {
341 ipc_port_t xmm_object, old_xmm_object;
342 xmm_obj_t mobj;
343 kern_return_t kr;
344
345 /*
346 * We always create the svm stack at the current location of the
347 * memory object. We may have to chase it down if it's migrating.
348 *
349 * The memory_object principal node is the one true source
350 * of knowledge about whether an svm stack exists.
351 */
352 ip_lock(memory_object);
353 if (IP_NORMA_IS_PROXY(memory_object)) {
354 /* the following call inherits the lock */
355 return xmm_object_by_memory_object_remote(memory_object);
356 }
357
358 /*
359 * If there is already an xmm_object associated with this
360 * memory_object, return it, after taking a send-right reference
361 * which will be given (moved, if necessary) to the caller.
362 */
363 xmm_object = xmm_object_copy(memory_object);
364 if (xmm_object != IP_NULL) {
365 ip_unlock(memory_object);
366 return xmm_object;
367 }
368
369 /*
370 * Check kobject type, to foil attempts to map in inappropriate
371 * kernel objects (like task ports).
372 */
373 if (ip_kotype(memory_object) != IKOT_NONE &&
374 ip_kotype(memory_object) != IKOT_PAGER) {
375 ip_unlock(memory_object);
376 return IP_NULL;
377 }
378
379 /*
380 * No xmm object is currently associcated with memory object.
381 * Unlock memory object port, and create an xmm obj stack.
382 * and a corresponding xmm obj stack.
383 *
384 * XXX
385 * Should deallocate things if this call fails part-way.
386 */
387 ip_unlock(memory_object);
388 kr = xmm_user_create(memory_object, &mobj);
389 if (kr != KERN_SUCCESS) {
390 panic("xmm_mo_create: xmm_user_create: %x\n", kr);
391 return IP_NULL;
392 }
393 kr = xmm_split_create(mobj, &mobj);
394 if (kr != KERN_SUCCESS) {
395 panic("xmm_mo_create: xmm_split_create: %x\n", kr);
396 return IP_NULL;
397 }
398 kr = xmm_svm_create(mobj, memory_object, &mobj);
399 if (kr != KERN_SUCCESS) {
400 panic("xmm_mo_create: xmm_svm_create: %x\n", kr);
401 return IP_NULL;
402 }
403
404 /*
405 * Create an xmm object and associate it with stack.
406 * It will have one send right and a no-senders notification request.
407 */
408 xmm_object = xmm_object_allocate(mobj);
409 if (xmm_object == IP_NULL) {
410 panic("xmm_mo_create: xmm_object_allocate: %x\n", kr);
411 return IP_NULL;
412 }
413
414 /*
415 * Now that we have a stack to associate with the memory object,
416 * make sure we still want it. If we don't, then just release
417 * the send right, and the no-senders notification handler
418 * will take care of deallocation.
419 *
420 * First, make sure that the memory object has not migrated.
421 */
422 ip_lock(memory_object);
423 if (IP_NORMA_IS_PROXY(memory_object)) {
424 xmm_object_destroy(xmm_object, mobj);
425 /* the following call inherits the lock */
426 return xmm_object_by_memory_object_remote(memory_object);
427 }
428
429 /*
430 * If we lost the race to create the stack, discard ours
431 * and use the one already created. Otherwise, associate
432 * our xmm object and stack with the memory object,
433 * by giving the memory object the send right to the xmm object.
434 */
435 old_xmm_object = xmm_object_copy(memory_object);
436 if (old_xmm_object != IP_NULL) {
437 xmm_object_destroy(xmm_object, mobj);
438 xmm_object = old_xmm_object;
439 } else {
440 xmm_object_set(memory_object, xmm_object, TRUE);
441 }
442
443 /*
444 * Unlock memory object and return the xmm object send right.
445 */
446 ip_unlock(memory_object);
447 return xmm_object;
448 }
449
450 /*
451 * Remote, protected cover routine for xmm_object_by_memory_object.
452 * Requires host_priv.
453 */
454 kern_return_t
455 norma_xmm_object_by_memory_object(host, memory_object, xmm_object)
456 host_t host;
457 ipc_port_t memory_object;
458 ipc_port_t *xmm_object;
459 {
460 /*
461 * Check host port validity.
462 */
463 if (host == HOST_NULL) {
464 return KERN_INVALID_ARGUMENT;
465 }
466
467 /*
468 * Obtain xmm_object, perhaps recursively.
469 */
470 *xmm_object = xmm_object_by_memory_object(memory_object);
471
472 /*
473 * Discard send right to memory_object given to us by our caller.
474 */
475 ipc_port_release_send(memory_object);
476 return KERN_SUCCESS;
477 }
478
479 /*
480 * Called with memory_object locked. Unlocks memory_object.
481 */
482 void
483 xmm_object_release_remote(memory_object)
484 ipc_port_t memory_object;
485 {
486 unsigned long node;
487
488 node = ipc_port_node(memory_object);
489 assert(node != node_self());
490 ip_unlock(memory_object);
491 r_norma_xmm_object_release(remote_host_priv(node), memory_object);
492 }
493
494 /*
495 * If there are no real references to xmm object, then break its
496 * association with memory object.
497 */
498 void
499 xmm_object_release(memory_object)
500 ipc_port_t memory_object;
501 {
502 /*
503 * Use local or remote form as appropriate.
504 */
505 ip_lock(memory_object);
506 if (IP_NORMA_IS_PROXY(memory_object)) {
507 /* the following call inherits the lock */
508 xmm_object_release_remote(memory_object);
509 } else {
510 xmm_object_release_local(memory_object);
511 ip_unlock(memory_object);
512 }
513 }
514
515 /*
516 * Remote, protected cover routine for xmm_object_release.
517 * Requires host_priv.
518 */
519 kern_return_t
520 norma_xmm_object_release(host, memory_object)
521 host_t host;
522 ipc_port_t memory_object;
523 {
524 /*
525 * Check host port validity.
526 */
527 if (host == HOST_NULL) {
528 return KERN_INVALID_ARGUMENT;
529 }
530
531 /*
532 * Release xmm object.
533 */
534 xmm_object_release(memory_object);
535
536 /*
537 * Discard send right to memory_object given to us by our caller.
538 */
539 ipc_port_release_send(memory_object);
540 return KERN_SUCCESS;
541 }
542
543 /*
544 * Create an xmm object and a stack for an xmm-internal memory manager.
545 */
546 ipc_port_t
547 xmm_memory_manager_export(mobj)
548 xmm_obj_t mobj;
549 {
550 kern_return_t kr;
551 ipc_port_t xmm_object;
552 ipc_port_t memory_object;
553
554 /*
555 * Create a memory object port for the memory manager.
556 */
557 memory_object = ipc_port_alloc_kernel();
558 if (memory_object == IP_NULL) {
559 panic("xmm_memory_manager_export: memory_object");
560 return IP_NULL;
561 }
562
563 /*
564 * Create an svm stack on top of mobj.
565 */
566 kr = xmm_split_create(mobj, &mobj);
567 if (kr != KERN_SUCCESS) {
568 panic("xmm_memory_manager_export: xmm_split_create: %x\n", kr);
569 return IP_NULL;
570 }
571 kr = xmm_svm_create(mobj, memory_object, &mobj);
572 if (kr != KERN_SUCCESS) {
573 panic("xmm_memory_manager_export: xmm_svm_create: %x\n", kr);
574 return IP_NULL;
575 }
576
577 /*
578 * Create an xmm object and associate it with stack.
579 * It will have one send right and a no-senders notification request.
580 */
581 xmm_object = xmm_object_allocate(mobj);
582 if (xmm_object == IP_NULL) {
583 panic("xmm_memory_manager_export: xmm_object_allocate");
584 }
585
586 /*
587 * Associate the xmm object with a memory object,
588 * and return a send right for the memory object.
589 */
590 xmm_object_set(memory_object, xmm_object, FALSE);
591 return ipc_port_make_send(memory_object);
592 }
Cache object: a8269dd99c08d3187784259d3dd616be
|