1 /*
2 * Mach Operating System
3 * Copyright (c) 1991 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie the
24 * rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: xmm_export.c,v $
29 * Revision 2.5 92/03/10 16:29:05 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:51:14 jsb]
32 *
33 * Revision 2.4.2.3 92/02/21 11:25:50 jsb
34 * In _proxy_terminate, deallocate xmm_pager and release xmm_kernel.
35 * [92/02/20 15:46:35 jsb]
36 *
37 * Reference mobj on port to mobj conversion; release when done.
38 * [92/02/20 10:54:05 jsb]
39 *
40 * Changed MACH_PORT use to IP_NULL. Use m_interpose_deallocate.
41 * [92/02/18 17:13:28 jsb]
42 *
43 * Changed reply->mobj to reply->kobj.
44 * [92/02/16 18:22:12 jsb]
45 *
46 * Explicitly provide name parameter to xmm_decl macro.
47 * Hide and release mobj in _proxy_terminate.
48 * [92/02/16 15:51:50 jsb]
49 *
50 * Renamed xmm_export_notify to xmm_pager_notify.
51 * [92/02/10 17:27:15 jsb]
52 *
53 * Changed proxy_init to use xmm object instead of
54 * <guessed host, memory_object> pair.
55 * Renamed mobj_port to xmm_pager, and xmm_control to xmm_kernel.
56 * [92/02/10 17:01:03 jsb]
57 *
58 * Use new xmm_decl, and new memory_object_name and deallocation protocol.
59 * [92/02/09 12:51:49 jsb]
60 *
61 * Revision 2.4.2.2 92/01/21 21:54:06 jsb
62 * Added xmm_export_notify stub.
63 * [92/01/21 18:22:48 jsb]
64 *
65 * Use ports instead of pointers when communicating with xmm_import.c.
66 * De-linted. Supports new (dlb) memory object routines.
67 * Supports arbitrary reply ports to lock_request, etc.
68 * Converted mach_port_t (and port_t) to ipc_port_t.
69 * [92/01/20 17:21:43 jsb]
70 *
71 * Fixes from OSF.
72 * [92/01/17 14:14:46 jsb]
73 *
74 * Revision 2.4.2.1.1.1 92/01/15 12:15:33 jeffreyh
75 * Deallocate memory object name port on termination. (dlb)
76 *
77 * Revision 2.4.2.1 92/01/03 16:38:45 jsb
78 * Added missing type cast.
79 * [91/12/27 21:29:32 jsb]
80 *
81 * Cleaned up debugging printf.
82 * [91/12/24 14:30:28 jsb]
83 *
84 * Revision 2.4 91/11/15 14:10:03 rpd
85 * Use ipc_port_copy_send in _proxy_init for import_master.
86 * [91/09/23 09:14:28 jsb]
87 *
88 * Revision 2.3 91/07/01 08:26:07 jsb
89 * Fixed object importation protocol.
90 * Corrected declaration of _proxy_lock_completed.
91 * [91/06/29 15:28:46 jsb]
92 *
93 * Revision 2.2 91/06/17 15:48:15 jsb
94 * First checkin.
95 * [91/06/17 11:06:11 jsb]
96 *
97 */
98 /*
99 * File: norma/xmm_export.c
100 * Author: Joseph S. Barrera III
101 * Date: 1991
102 *
103 * Xmm layer for allowing remote kernels to map a local object.
104 */
105
106 #include <norma/xmm_obj.h>
107 #include <kern/host.h>
108 #include <ipc/ipc_space.h>
109 #include <ipc/ipc_port.h>
110 #include <mach/notify.h>
111 #include <mach/proxy.h>
112
113 struct mobj {
114 struct xmm_obj obj;
115 ipc_port_t xmm_pager;
116 ipc_port_t xmm_kernel;
117 };
118
119 #undef KOBJ
120 #define KOBJ ((struct mobj *) kobj)
121
122 #define m_export_init m_invalid_init
123 #define m_export_terminate m_invalid_terminate
124 #define m_export_deallocate m_interpose_deallocate
125 #define m_export_copy m_invalid_copy
126 #define m_export_data_request m_invalid_data_request
127 #define m_export_data_unlock m_invalid_data_unlock
128 #define m_export_data_write m_invalid_data_write
129 #define m_export_lock_completed m_invalid_lock_completed
130 #define m_export_supply_completed m_invalid_supply_completed
131 #define m_export_data_return m_invalid_data_return
132 #define m_export_change_completed m_invalid_change_completed
133
134 xmm_decl(export, "export", sizeof(struct mobj));
135
136 extern ipc_port_t xmm_object_by_memory_object();
137
138 k_export_data_unavailable(kobj, offset, length)
139 xmm_obj_t kobj;
140 vm_offset_t offset;
141 vm_size_t length;
142 {
143 #ifdef lint
144 K_DATA_UNAVAILABLE(kobj, offset, length);
145 #endif lint
146 return proxy_data_unavailable(KOBJ->xmm_kernel, offset, length);
147 }
148
149 k_export_get_attributes(kobj, object_ready, may_cache, copy_strategy)
150 xmm_obj_t kobj;
151 boolean_t *object_ready;
152 boolean_t *may_cache;
153 memory_object_copy_strategy_t *copy_strategy;
154 {
155 #ifdef lint
156 K_GET_ATTRIBUTES(kobj, object_ready, may_cache, copy_strategy);
157 #endif lint
158 return proxy_get_attributes(KOBJ->xmm_kernel, object_ready, may_cache,
159 copy_strategy);
160 }
161
162 k_export_lock_request(kobj, offset, length, should_clean, should_flush,
163 lock_value, reply)
164 xmm_obj_t kobj;
165 vm_offset_t offset;
166 vm_size_t length;
167 boolean_t should_clean;
168 boolean_t should_flush;
169 vm_prot_t lock_value;
170 xmm_reply_t reply;
171 {
172 kern_return_t kr;
173
174 #ifdef lint
175 K_LOCK_REQUEST(kobj, offset, length, should_clean, should_flush,
176 lock_value, reply);
177 #endif lint
178 if (reply == XMM_REPLY_NULL) {
179 return proxy_lock_request(KOBJ->xmm_kernel, offset, length,
180 should_clean, should_flush,
181 lock_value, IP_NULL);
182 }
183 kr = xmm_reply_allocate_proxy(reply);
184 if (kr != KERN_SUCCESS) {
185 return kr;
186 }
187 assert(reply != XMM_REPLY_NULL && reply->kobj == kobj);
188 return proxy_lock_request(KOBJ->xmm_kernel, offset, length,
189 should_clean, should_flush, lock_value,
190 reply->reply_proxy);
191 }
192
193 k_export_data_error(kobj, offset, length, error_value)
194 xmm_obj_t kobj;
195 vm_offset_t offset;
196 vm_size_t length;
197 kern_return_t error_value;
198 {
199 #ifdef lint
200 K_DATA_ERROR(kobj, offset, length, error_value);
201 #endif lint
202 return proxy_data_error(KOBJ->xmm_kernel, offset, length, error_value);
203 }
204
205 k_export_set_ready(kobj, object_ready, may_cache, copy_strategy,
206 use_old_pageout, memory_object_name, reply)
207 xmm_obj_t kobj;
208 boolean_t object_ready;
209 boolean_t may_cache;
210 memory_object_copy_strategy_t copy_strategy;
211 boolean_t use_old_pageout;
212 ipc_port_t memory_object_name;
213 xmm_reply_t reply;
214 {
215 kern_return_t kr;
216
217 #ifdef lint
218 K_SET_READY(kobj, object_ready, may_cache, copy_strategy,
219 use_old_pageout, memory_object_name, reply);
220 #endif lint
221 if (reply == XMM_REPLY_NULL) {
222 return proxy_set_ready(KOBJ->xmm_kernel, KOBJ->xmm_pager,
223 object_ready, may_cache, copy_strategy,
224 KERN_SUCCESS, use_old_pageout,
225 memory_object_name, IP_NULL);
226 }
227 kr = xmm_reply_allocate_proxy(reply);
228 if (kr != KERN_SUCCESS) {
229 return kr;
230 }
231 assert(reply != XMM_REPLY_NULL && reply->kobj == kobj);
232 return proxy_set_ready(KOBJ->xmm_kernel, KOBJ->xmm_pager,
233 object_ready, may_cache, copy_strategy,
234 KERN_SUCCESS, use_old_pageout,
235 memory_object_name, reply->reply_proxy);
236 }
237
238 k_export_destroy(kobj, reason)
239 xmm_obj_t kobj;
240 kern_return_t reason;
241 {
242 #ifdef lint
243 K_DESTROY(kobj, reason);
244 #endif lint
245 return proxy_destroy(KOBJ->xmm_kernel, reason);
246 }
247
248 k_export_data_supply(kobj, offset, data, length, lock_value, precious, reply)
249 xmm_obj_t kobj;
250 vm_offset_t offset;
251 vm_offset_t data;
252 vm_size_t length;
253 vm_prot_t lock_value;
254 boolean_t precious;
255 xmm_reply_t reply;
256 {
257 kern_return_t kr;
258
259 #ifdef lint
260 K_DATA_SUPPLY(kobj, offset, data, length, lock_value, precious, reply);
261 #endif lint
262 if (reply == XMM_REPLY_NULL) {
263 return proxy_data_supply(KOBJ->xmm_kernel, offset, data, length,
264 lock_value, precious, IP_NULL);
265 }
266 kr = xmm_reply_allocate_proxy(reply);
267 if (kr != KERN_SUCCESS) {
268 return kr;
269 }
270 assert(reply != XMM_REPLY_NULL && reply->kobj == kobj);
271 return proxy_data_supply(KOBJ->xmm_kernel, offset, data, length,
272 lock_value, precious, reply->reply_proxy);
273 }
274
275 xmm_obj_t
276 convert_xmm_pager_to_mobj(xmm_pager)
277 ipc_port_t xmm_pager;
278 {
279 xmm_obj_t mobj = XMM_OBJ_NULL;
280
281 if (IP_VALID(xmm_pager)) {
282 ip_lock(xmm_pager);
283 if (ip_active(xmm_pager) &&
284 ip_kotype(xmm_pager) == IKOT_XMM_PAGER) {
285 mobj = (xmm_obj_t) xmm_pager->ip_kobject;
286 xmm_obj_reference(mobj);
287 }
288 ip_unlock(xmm_pager);
289 }
290 return mobj;
291 }
292
293 boolean_t
294 xmm_pager_notify(msg)
295 mach_msg_header_t *msg;
296 {
297 return FALSE;
298 }
299
300 _proxy_init(xmm_object, xmm_kernel, pagesize, internal, size)
301 ipc_port_t xmm_object;
302 ipc_port_t xmm_kernel;
303 vm_size_t pagesize;
304 boolean_t internal;
305 vm_size_t size;
306 {
307 xmm_obj_t mobj;
308 kern_return_t kr;
309
310 /*
311 * XXX
312 * Check for multiple inits and/or reuse of memory_object.
313 * XXX
314 * Should use proxy_set_ready to return errors.
315 */
316 ip_lock(xmm_object);
317 if (ip_kotype(xmm_object) != IKOT_XMM_OBJECT) {
318 ip_unlock(xmm_object);
319 return KERN_INVALID_ARGUMENT;
320 }
321 mobj = (xmm_obj_t) xmm_object->ip_kobject;
322 ip_unlock(xmm_object);
323
324 kr = xmm_obj_allocate(&export_class, mobj, &mobj);
325 if (kr != KERN_SUCCESS) {
326 printf("_proxy_init: xmm_obj_allocate: %x\n", kr);
327 return kr;
328 }
329
330 MOBJ->xmm_pager = ipc_port_alloc_kernel();
331 if (MOBJ->xmm_pager == IP_NULL) {
332 panic("m_import_init: allocate xmm_pager");
333 }
334 ipc_kobject_set(MOBJ->xmm_pager, (ipc_kobject_t) mobj, IKOT_XMM_PAGER);
335 MOBJ->xmm_kernel = xmm_kernel;
336
337 xmm_obj_reference(mobj);
338 kr = M_INIT(mobj, mobj, pagesize, internal, size);
339 xmm_obj_release(mobj);
340 return kr;
341 }
342
343 _proxy_terminate(xmm_pager)
344 ipc_port_t xmm_pager;
345 {
346 xmm_obj_t mobj;
347 kern_return_t kr;
348
349 mobj = convert_xmm_pager_to_mobj(xmm_pager);
350 if (mobj == XMM_OBJ_NULL) {
351 return KERN_FAILURE;
352 }
353 ipc_kobject_set(MOBJ->xmm_pager, IKO_NULL, IKOT_NONE);
354 ipc_port_dealloc_kernel(MOBJ->xmm_pager);
355 ipc_port_release_send(MOBJ->xmm_kernel);
356 xmm_obj_release(mobj);
357 kr = M_TERMINATE(mobj, mobj);
358 xmm_obj_release(mobj);
359 return kr;
360 }
361
362 _proxy_copy(xmm_pager, offset, length, new_memory_object)
363 ipc_port_t xmm_pager;
364 vm_offset_t offset;
365 vm_size_t length;
366 memory_object_t new_memory_object;
367 {
368 xmm_obj_t mobj;
369
370 #ifdef lint
371 offset++;
372 length++;
373 new_memory_object++;
374 #endif
375 mobj = convert_xmm_pager_to_mobj(xmm_pager);
376 if (mobj == XMM_OBJ_NULL) {
377 return KERN_FAILURE;
378 }
379 panic("_proxy_copy: not implemented\n");
380 xmm_obj_release(mobj);
381 return KERN_FAILURE;
382 }
383
384 _proxy_data_request(xmm_pager, offset, length, desired_access)
385 ipc_port_t xmm_pager;
386 vm_offset_t offset;
387 vm_size_t length;
388 vm_prot_t desired_access;
389 {
390 xmm_obj_t mobj;
391 kern_return_t kr;
392
393 mobj = convert_xmm_pager_to_mobj(xmm_pager);
394 if (mobj == XMM_OBJ_NULL) {
395 return KERN_FAILURE;
396 }
397 kr = M_DATA_REQUEST(mobj, mobj, offset, length, desired_access);
398 xmm_obj_release(mobj);
399 return kr;
400 }
401
402 _proxy_data_unlock(xmm_pager, offset, length, desired_access)
403 ipc_port_t xmm_pager;
404 vm_offset_t offset;
405 vm_size_t length;
406 vm_prot_t desired_access;
407 {
408 xmm_obj_t mobj;
409 kern_return_t kr;
410
411 mobj = convert_xmm_pager_to_mobj(xmm_pager);
412 if (mobj == XMM_OBJ_NULL) {
413 return KERN_FAILURE;
414 }
415 kr = M_DATA_UNLOCK(mobj, mobj, offset, length, desired_access);
416 xmm_obj_release(mobj);
417 return kr;
418 }
419
420 _proxy_data_write(xmm_pager, offset, data, length)
421 ipc_port_t xmm_pager;
422 vm_offset_t offset;
423 vm_offset_t data;
424 vm_size_t length;
425 {
426 xmm_obj_t mobj;
427 kern_return_t kr;
428
429 mobj = convert_xmm_pager_to_mobj(xmm_pager);
430 if (mobj == XMM_OBJ_NULL) {
431 return KERN_FAILURE;
432 }
433 kr = M_DATA_WRITE(mobj, mobj, offset, data, length);
434 xmm_obj_release(mobj);
435 return kr;
436 }
437
438 _proxy_lock_completed(reply_to, offset, length)
439 ipc_port_t reply_to;
440 vm_offset_t offset;
441 vm_size_t length;
442 {
443 xmm_reply_t reply;
444
445 reply = convert_port_to_reply(reply_to);
446 if (reply == XMM_REPLY_NULL) {
447 return KERN_FAILURE;
448 }
449 return M_LOCK_COMPLETED(reply, offset, length);
450 }
451
452 _proxy_supply_completed(reply_to, offset, length, result, error_offset)
453 ipc_port_t reply_to;
454 vm_offset_t offset;
455 vm_size_t length;
456 kern_return_t result;
457 vm_offset_t error_offset;
458 {
459 xmm_reply_t reply;
460
461 reply = convert_port_to_reply(reply_to);
462 if (reply == XMM_REPLY_NULL) {
463 return KERN_FAILURE;
464 }
465 return M_SUPPLY_COMPLETED(reply, offset, length, result, error_offset);
466 }
467
468 _proxy_data_return(xmm_pager, offset, data, length)
469 ipc_port_t xmm_pager;
470 vm_offset_t offset;
471 vm_offset_t data;
472 vm_size_t length;
473 {
474 xmm_obj_t mobj;
475 kern_return_t kr;
476
477 mobj = convert_xmm_pager_to_mobj(xmm_pager);
478 if (mobj == XMM_OBJ_NULL) {
479 return KERN_FAILURE;
480 }
481 kr = M_DATA_RETURN(mobj, mobj, offset, data, length);
482 xmm_obj_release(mobj);
483 return kr;
484 }
485
486 _proxy_change_completed(reply_to, may_cache, copy_strategy)
487 ipc_port_t reply_to;
488 boolean_t may_cache;
489 memory_object_copy_strategy_t copy_strategy;
490 {
491 xmm_reply_t reply;
492
493 reply = convert_port_to_reply(reply_to);
494 if (reply == XMM_REPLY_NULL) {
495 return KERN_FAILURE;
496 }
497 return M_CHANGE_COMPLETED(reply, may_cache, copy_strategy);
498 }
Cache object: ee01a777f4e82cc028211394e4a99fe6
|