1 /*
2 * Mach Operating System
3 * Copyright (c) 1991, 1992 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie the
24 * rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: xmm_server.c,v $
29 * Revision 2.6 92/03/10 16:29:35 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:51:42 jsb]
32 *
33 * Revision 2.5.1.4 92/03/03 16:24:06 jeffreyh
34 * Pick up fix from dlb to add missing vm_object_dealocate to the
35 * object->internal case of k_server_set_ready().
36 * [92/02/29 jeffreyh]
37 *
38 * Revision 2.5.1.3 92/02/21 11:28:01 jsb
39 * Release send right to memory object in memory_object_terminate, now
40 * that the xmm_user layer keeps a separate send right.
41 * [92/02/20 14:02:57 jsb]
42 *
43 * Explicitly provide name parameter to xmm_decl macro.
44 * Changed termination for new reference counting implementation.
45 * [92/02/16 15:53:26 jsb]
46 *
47 * In memory_object_terminate, don't release_send memory_object_name
48 * if it is null. Do call xmm_object_by_memory_object_release.
49 * [92/02/11 18:23:15 jsb]
50 *
51 * Changed xmm_memory_object_init to use xmm_object_by_memory_object
52 * instead of xmm_lookup. Removed xmm_lookup.
53 * [92/02/10 17:02:39 jsb]
54 *
55 * Instead of holding a vm_object pointer, always do a vm_object_lookup
56 * on pager to obtain vm_object. This allows us to notice when vm_object.c
57 * has removed (as in vm_object_terminate) or changed (vm_object_collapse)
58 * the port to object associations. Removed now unneeded xmm_object_set.
59 * Declare second parameter of memory_object_* calls as xmm_obj_t, thanks
60 * to new pager_request_t declaration of object->pager_request.
61 * [92/02/10 09:47:16 jsb]
62 *
63 * Use new xmm_decl, and new memory_object_name and deallocation protocol.
64 * Removed svm exceptions; this is now handled by xmm_vm_object_lookup.
65 * Changed xmm_lookup to not use memory_object kobject to hold
66 * both mobj and vm_object; we now use memory_object->ip_norma_xmm_object
67 * which is migrated upon memory_object port migration.
68 * Don't defined memory_object_{init,create}; instead, vm/vm_object.c
69 * calls new routine xmm_memory_object_init routine which passes
70 * internal and size parameters down the xmm layers.
71 * [92/02/09 13:54:41 jsb]
72 *
73 * Revision 2.5.1.2 92/01/21 21:54:46 jsb
74 * De-linted. Supports new (dlb) memory object routines.
75 * Supports arbitrary reply ports to lock_request, etc.
76 * Converted mach_port_t (and port_t) to ipc_port_t.
77 * [92/01/20 17:28:58 jsb]
78 *
79 * Revision 2.5.1.1 92/01/03 17:13:19 jsb
80 * MACH_PORT_NULL -> IP_NULL.
81 *
82 * Revision 2.5 91/08/28 11:16:24 jsb
83 * Added temporary definition for memory_object_change_completed.
84 * [91/08/16 14:21:20 jsb]
85 *
86 * Added comment to xmm_lookup about read-only pagers.
87 * [91/08/15 10:12:12 jsb]
88 *
89 * Revision 2.4 91/08/03 18:19:40 jsb
90 * Changed mach_port_t to ipc_port_t whereever appropriate.
91 * [91/07/17 14:07:08 jsb]
92 *
93 * Revision 2.3 91/07/01 08:26:29 jsb
94 * Added support for memory_object_create.
95 * Now export normal memory_object_init with standard arguments.
96 * Improved object initialization logic.
97 * Added garbage collection.
98 * [91/06/29 15:39:01 jsb]
99 *
100 * Revision 2.2 91/06/17 15:48:33 jsb
101 * First checkin.
102 * [91/06/17 11:05:10 jsb]
103 *
104 */
105 /*
106 * File: norma/xmm_server.c
107 * Author: Joseph S. Barrera III
108 * Date: 1991
109 *
110 * Interface between kernel and xmm system.
111 */
112
113 #include <norma/xmm_obj.h>
114 #include <norma/xmm_server_rename.h>
115 #include <ipc/ipc_space.h>
116 #include <ipc/ipc_port.h>
117 #include <vm/memory_object.h>
118 #include <vm/vm_fault.h>
119 #include <vm/vm_map.h>
120 #include <vm/vm_object.h>
121 #include <vm/vm_page.h>
122 #include <vm/vm_pageout.h>
123 #include <kern/host.h>
124 #include <kern/ipc_kobject.h>
125
126 struct mobj {
127 struct xmm_obj obj;
128 ipc_port_t pager;
129 };
130
131 #undef KOBJ
132 #define KOBJ ((struct mobj *) kobj)
133
134 #define m_server_init m_invalid_init
135 #define m_server_terminate m_invalid_terminate
136 #define m_server_copy m_invalid_copy
137 #define m_server_data_request m_invalid_data_request
138 #define m_server_data_unlock m_invalid_data_unlock
139 #define m_server_data_write m_invalid_data_write
140 #define m_server_lock_completed m_invalid_lock_completed
141 #define m_server_supply_completed m_invalid_supply_completed
142 #define m_server_data_return m_invalid_data_return
143 #define m_server_change_completed m_invalid_change_completed
144
145 xmm_decl(server, "server", sizeof(struct mobj));
146
147 extern ipc_port_t xmm_object_by_memory_object();
148
149 k_server_data_unavailable(kobj, offset, length)
150 xmm_obj_t kobj;
151 vm_offset_t offset;
152 vm_size_t length;
153 {
154 #ifdef lint
155 K_DATA_UNAVAILABLE(kobj, offset, length);
156 #endif lint
157 return memory_object_data_unavailable(vm_object_lookup(KOBJ->pager),
158 offset, length);
159 }
160
161 k_server_get_attributes(kobj, object_ready, may_cache, copy_strategy)
162 xmm_obj_t kobj;
163 boolean_t *object_ready;
164 boolean_t *may_cache;
165 memory_object_copy_strategy_t *copy_strategy;
166 {
167 #ifdef lint
168 K_GET_ATTRIBUTES(kobj, object_ready, may_cache, copy_strategy);
169 #endif lint
170 return memory_object_get_attributes(vm_object_lookup(KOBJ->pager),
171 object_ready, may_cache,
172 copy_strategy);
173 }
174
175 k_server_lock_request(kobj, offset, length, should_clean, should_flush,
176 lock_value, reply)
177 xmm_obj_t kobj;
178 vm_offset_t offset;
179 vm_size_t length;
180 boolean_t should_clean;
181 boolean_t should_flush;
182 vm_prot_t lock_value;
183 xmm_reply_t reply;
184 {
185 #ifdef lint
186 K_LOCK_REQUEST(kobj, offset, length, should_clean, should_flush,
187 lock_value, reply);
188 #endif lint
189 return memory_object_lock_request(vm_object_lookup(KOBJ->pager),
190 offset, length, should_clean,
191 should_flush, lock_value,
192 (ipc_port_t) reply,
193 MACH_MSG_TYPE_PORT_SEND_ONCE);
194 }
195
196 k_server_data_error(kobj, offset, length, error_value)
197 xmm_obj_t kobj;
198 vm_offset_t offset;
199 vm_size_t length;
200 kern_return_t error_value;
201 {
202 #ifdef lint
203 K_DATA_ERROR(kobj, offset, length, error_value);
204 #endif lint
205 return memory_object_data_error(vm_object_lookup(KOBJ->pager),
206 offset, length, error_value);
207 }
208
209 k_server_set_ready(kobj, object_ready, may_cache, copy_strategy,
210 use_old_pageout, memory_object_name, reply)
211 xmm_obj_t kobj;
212 boolean_t object_ready;
213 boolean_t may_cache;
214 memory_object_copy_strategy_t copy_strategy;
215 boolean_t use_old_pageout;
216 ipc_port_t memory_object_name;
217 xmm_reply_t reply;
218 {
219 vm_object_t object;
220 kern_return_t kr;
221
222 #ifdef lint
223 K_SET_READY(kobj, object_ready, may_cache, copy_strategy,
224 use_old_pageout, memory_object_name, reply);
225 #endif lint
226
227 /*
228 * Remember pager_name. Only keep one send right for it.
229 */
230 object = vm_object_lookup(KOBJ->pager);
231 vm_object_lock(object);
232 if (object->pager_name == IP_NULL) {
233 object->pager_name = memory_object_name;
234 } else {
235 assert(object->pager_name == memory_object_name);
236 ipc_port_release_send(memory_object_name);
237 }
238
239 /*
240 * If we are internal, we don't need to call set_attributes_common.
241 */
242 if (object->internal) {
243 assert(object->pager_ready);
244 assert(reply == XMM_REPLY_NULL);
245 vm_object_unlock(object);
246 vm_object_deallocate(object);
247 return KERN_SUCCESS;
248 }
249
250 /*
251 * Call set_attributes_common.
252 */
253 vm_object_unlock(object);
254 kr = memory_object_set_attributes_common(object, object_ready,
255 may_cache, copy_strategy,
256 use_old_pageout);
257
258 /*
259 * Send a reply if one was requested.
260 */
261 if (reply != XMM_REPLY_NULL) {
262 M_CHANGE_COMPLETED(reply, may_cache, copy_strategy);
263 }
264 return kr;
265 }
266
267 k_server_destroy(kobj, reason)
268 xmm_obj_t kobj;
269 kern_return_t reason;
270 {
271 #ifdef lint
272 K_DESTROY(kobj, reason);
273 #endif lint
274 return memory_object_destroy(vm_object_lookup(KOBJ->pager), reason);
275 }
276
277 k_server_data_supply(kobj, offset, data, length, lock_value, precious, reply)
278 xmm_obj_t kobj;
279 vm_offset_t offset;
280 vm_offset_t data;
281 vm_size_t length;
282 vm_prot_t lock_value;
283 boolean_t precious;
284 xmm_reply_t reply;
285 {
286 #ifdef lint
287 K_DATA_SUPPLY(kobj, offset, data, length, lock_value, precious, reply);
288 #endif lint
289 return memory_object_data_supply(vm_object_lookup(KOBJ->pager),
290 offset, (vm_map_copy_t) data,
291 length, lock_value, precious,
292 (ipc_port_t) reply,
293 MACH_MSG_TYPE_PORT_SEND_ONCE);
294 }
295
296 xmm_memory_object_init(object)
297 vm_object_t object;
298 {
299 ipc_port_t xmm_object;
300 xmm_obj_t mobj;
301 kern_return_t kr;
302
303 /*
304 * Find or create xmm_object corresponding to memory_object.
305 * Once created, the xmm_object for a memory_object remains
306 * the same until the memory_object port dies.
307 *
308 * XXX
309 * This isn't right -- what about memory_object_destroy()?
310 *
311 * Maybe at that point the xmm_object port is destroyed.
312 *
313 * XXX
314 * Check for multiple inits? Or is this handled well enough
315 * by vm_object_enter. A few asserts might be worthwhile...
316 */
317 xmm_object = xmm_object_by_memory_object(object->pager);
318 assert(xmm_object != IP_NULL); /* XXX */
319
320 /*
321 * If xmm_object is local, then so is the svm stack, which will
322 * be stored as xmm_object's kobject. Otherwise, we need to
323 * create an import mobj.
324 */
325 if (IP_NORMA_IS_PROXY(xmm_object)) {
326 kr = xmm_import_create(xmm_object, &mobj);
327 if (kr != KERN_SUCCESS) {
328 panic("xmm_memory_object_init: xmm_import_create");
329 return kr;
330 }
331 } else {
332 assert(ip_kotype(xmm_object) == IKOT_XMM_OBJECT);
333 mobj = (xmm_obj_t) xmm_object->ip_kobject;
334 ipc_port_release_send(xmm_object);
335 }
336
337 /*
338 * Create a server layer on top.
339 */
340 kr = xmm_obj_allocate(&server_class, mobj, &mobj);
341 if (kr != KERN_SUCCESS) {
342 printf("xmm_memory_object_init: xmm_obj_allocate: %x\n", kr);
343 return kr;
344 }
345
346 /*
347 * Associate server mobj with vm object, and pager with mobj.
348 *
349 * The reason that we don't store the vm object directly in the
350 * mobj is that the vm object can change, for example as a result
351 * of vm_object_collapse.
352 */
353 MOBJ->pager = object->pager;
354 object->pager_request = mobj;
355
356 /*
357 * Intialize the mobj for this kernel.
358 */
359 M_INIT(mobj, mobj, PAGE_SIZE, (boolean_t) object->internal,
360 object->size);
361 return KERN_SUCCESS;
362 }
363
364 memory_object_terminate(memory_object, mobj, memory_object_name)
365 ipc_port_t memory_object;
366 xmm_obj_t mobj;
367 ipc_port_t memory_object_name;
368 {
369 if (memory_object != IP_NULL) {
370 ipc_port_release_send(memory_object);
371 }
372 if (memory_object_name != IP_NULL) {
373 ipc_port_release_send(memory_object_name);
374 }
375 xmm_obj_release(mobj);
376 return M_TERMINATE(mobj, mobj);
377 }
378
379 void
380 m_server_deallocate(mobj)
381 xmm_obj_t mobj;
382 {
383 #if 666
384 /*
385 * XXX should release pager --
386 * but need to coordinate with xmm_user.c
387 */
388 #else
389 ipc_port_release_send(MOBJ->pager);
390 #endif
391 }
392
393 /* ARGSUSED */
394 memory_object_copy(memory_object, mobj, offset, length, new_memory_object)
395 ipc_port_t memory_object;
396 xmm_obj_t mobj;
397 vm_offset_t offset;
398 vm_size_t length;
399 ipc_port_t new_memory_object;
400 {
401 panic("xmm_server: memory_object_copy\n");
402 return KERN_FAILURE;
403 }
404
405 memory_object_data_request(memory_object, mobj, offset, length, desired_access)
406 ipc_port_t memory_object;
407 xmm_obj_t mobj;
408 vm_offset_t offset;
409 vm_size_t length;
410 vm_prot_t desired_access;
411 {
412 #ifdef lint
413 memory_object++;
414 #endif lint
415 return M_DATA_REQUEST(mobj, mobj, offset, length, desired_access);
416 }
417
418 memory_object_data_unlock(memory_object, mobj, offset, length, desired_access)
419 ipc_port_t memory_object;
420 xmm_obj_t mobj;
421 vm_offset_t offset;
422 vm_size_t length;
423 vm_prot_t desired_access;
424 {
425 #ifdef lint
426 memory_object++;
427 #endif lint
428 return M_DATA_UNLOCK(mobj, mobj, offset, length, desired_access);
429 }
430
431 memory_object_data_write(memory_object, mobj, offset, data, length)
432 ipc_port_t memory_object;
433 xmm_obj_t mobj;
434 vm_offset_t offset;
435 vm_offset_t data;
436 vm_size_t length;
437 {
438 #ifdef lint
439 memory_object++;
440 #endif lint
441 return M_DATA_WRITE(mobj, mobj, offset, data, length);
442 }
443
444 #ifdef KERNEL
445 /* ARGSUSED */
446 memory_object_data_initialize(memory_object, mobj, offset, data, length)
447 ipc_port_t memory_object;
448 xmm_obj_t mobj;
449 vm_offset_t offset;
450 vm_offset_t data;
451 vm_size_t length;
452 {
453 /*
454 * Probably need to add M_DATA_INITIALIZE, or perhaps
455 * an 'initial' parameter to memory_object_data_write.
456 */
457 panic("memory_object_data_initialize");
458 }
459 #endif KERNEL
460
461 memory_object_lock_completed(reply_to, reply_to_type, mobj, offset, length)
462 ipc_port_t reply_to;
463 mach_msg_type_name_t reply_to_type;
464 xmm_obj_t mobj;
465 vm_offset_t offset;
466 vm_size_t length;
467 {
468 xmm_reply_t reply = (xmm_reply_t) reply_to;
469
470 #ifdef lint
471 mobj++;
472 #endif
473 assert(reply != XMM_REPLY_NULL);
474 assert(reply_to_type == MACH_MSG_TYPE_PORT_SEND_ONCE);
475 return M_LOCK_COMPLETED(reply, offset, length);
476 }
477
478 memory_object_supply_completed(reply_to, reply_to_type, mobj, offset, length,
479 result, error_offset)
480 ipc_port_t reply_to;
481 mach_msg_type_name_t reply_to_type;
482 xmm_obj_t mobj;
483 vm_offset_t offset;
484 vm_size_t length;
485 kern_return_t result;
486 vm_offset_t error_offset;
487 {
488 xmm_reply_t reply = (xmm_reply_t) reply_to;
489
490 #ifdef lint
491 mobj++;
492 #endif
493 assert(reply != XMM_REPLY_NULL);
494 assert(reply_to_type == MACH_MSG_TYPE_PORT_SEND_ONCE);
495 return M_SUPPLY_COMPLETED(reply, offset, length, result, error_offset);
496 }
497
498 memory_object_data_return(memory_object, mobj, offset, data, length)
499 ipc_port_t memory_object;
500 xmm_obj_t mobj;
501 vm_offset_t offset;
502 vm_offset_t data;
503 vm_size_t length;
504 {
505 #ifdef lint
506 memory_object++;
507 #endif lint
508 return M_DATA_RETURN(mobj, mobj, offset, data, length);
509 }
Cache object: 19eb2f2c165b2849340a30dda69455b5
|