FreeBSD/Linux Kernel Cross Reference
sys/norma/xmm_user.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie the
24 * rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: xmm_user.c,v $
29 * Revision 2.6 92/03/10 16:38:42 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:53:39 jsb]
32 *
33 * Revision 2.4.3.4 92/03/03 16:24:12 jeffreyh
34 * Pick up fix from jsb to call K_SET_READY with MAY_CACHE_FALSE. This
35 * fixes a memory leak.
36 * [92/02/26 12:25:25 jeffreyh]
37 *
38 * Revision 2.4.3.3 92/02/21 11:28:30 jsb
39 * Copy send right to memory object, so that each kernel can release its
40 * own send right to memory object.
41 * [92/02/20 13:57:24 jsb]
42 *
43 * Reference mobj on port to mobj conversion; release when done.
44 * [92/02/20 10:54:28 jsb]
45 *
46 * Removed initialized flag and corresponding code to detect multiple
47 * initialization, since xmm_kobj_link now handles such detection.
48 * [92/02/18 08:47:44 jsb]
49 *
50 * Changed reply->mobj to reply->kobj.
51 * [92/02/16 18:22:26 jsb]
52 *
53 * Explicitly provide name parameter to xmm_decl macro.
54 * Changes for reference counting termination protocol.
55 * [92/02/16 15:54:47 jsb]
56 *
57 * Removed is_internal_memory_object_call; xmm internal objects now
58 * create their own xmm stacks and objects and thus will never be
59 * seen here. Use new MEMORY_OBJECT_COPY_TEMPORARY strategy instead
60 * of MEMORY_OBJECT_COPY_NONE for setting internal objects ready.
61 * Removed ipc_kobject_set of memory_object; this was a hack for when
62 * xmm_server.c stored a pointer to the svm mobj stack in the
63 * memory_object kobject. We now use a separate port (the xmm object
64 * port) for this association, and break that association elsewhere.
65 * [92/02/11 11:22:23 jsb]
66 *
67 * Remove vm_object_lookup_by_pager.
68 * [92/02/10 09:41:36 jsb]
69 *
70 * Use new xmm_decl, and new memory_object_name and deallocation protocol.
71 * Let mig do automatic conversion of memory_control port into user obj.
72 * Cleaned up memory_object_create support.
73 * [92/02/09 14:01:13 jsb]
74 *
75 * Revision 2.4.3.2 92/01/21 21:55:05 jsb
76 * De-linted. Supports new (dlb) memory object routines.
77 * Supports arbitrary reply ports to lock_request, etc.
78 * Converted mach_port_t (and port_t) to ipc_port_t.
79 * [92/01/20 17:48:19 jsb]
80 *
81 * Revision 2.4.3.1 92/01/03 16:39:03 jsb
82 * Picked up temporary fix to m_user_terminate from dlb.
83 * [91/12/24 14:31:09 jsb]
84 *
85 * Revision 2.4 91/08/28 11:16:30 jsb
86 * Added definition for xxx_memory_object_lock_request, and temporary
87 * stubs for data_supply, object_ready, and change_attributes.
88 * [91/08/16 14:22:37 jsb]
89 *
90 * Added check for internal memory objects to xmm_user_create.
91 * [91/08/15 10:14:19 jsb]
92 *
93 * Revision 2.3 91/07/01 08:26:46 jsb
94 * Collect garbage. Support memory_object_create.
95 * Disassociate kobj from memory_control before calling
96 * memory_object_terminate to prevent upcalls on terminated kobj.
97 * [91/06/29 15:51:50 jsb]
98 *
99 * Revision 2.2 91/06/17 15:48:48 jsb
100 * Renamed xmm_vm_object_lookup.
101 * [91/06/17 13:20:06 jsb]
102 *
103 * First checkin.
104 * [91/06/17 11:02:47 jsb]
105 *
106 */
107 /*
108 * File: norma/xmm_user.c
109 * Author: Joseph S. Barrera III
110 * Date: 1991
111 *
112 * Interface between memory managers and xmm system.
113 */
114
115 #include <norma/xmm_obj.h>
116 #include <norma/xmm_user_rename.h>
117 #include <ipc/ipc_space.h>
118 #include <ipc/ipc_port.h>
119 #include <vm/memory_object.h>
120 #include <vm/vm_fault.h>
121 #include <vm/vm_map.h>
122 #include <vm/vm_object.h>
123 #include <vm/vm_page.h>
124 #include <vm/vm_pageout.h>
125
126 /*
127 * Since we ALWAYS have an SVM module above us,
128 * we NEVER have more than one memory_control per memory_object.
129 * Thus we can combine mobj and kobj.
130 */
131
132 struct mobj {
133 struct xmm_obj obj;
134 ipc_port_t memory_object;
135 ipc_port_t memory_control;
136 ipc_port_t memory_object_name;
137 };
138
139 #undef KOBJ
140 #define KOBJ ((struct mobj *) kobj)
141
142 #define m_user_deallocate m_interpose_deallocate
143
144 #define k_user_data_unavailable k_invalid_data_unavailable
145 #define k_user_get_attributes k_invalid_get_attributes
146 #define k_user_lock_request k_invalid_lock_request
147 #define k_user_data_error k_invalid_data_error
148 #define k_user_set_ready k_invalid_set_ready
149 #define k_user_destroy k_invalid_destroy
150 #define k_user_data_supply k_invalid_data_supply
151
152 xmm_decl(user, "user", sizeof(struct mobj));
153
154 /*
155 * Translate from memory_control to kobj. Take a reference.
156 */
157 xmm_obj_t
158 xmm_kobj_lookup(memory_control)
159 ipc_port_t memory_control;
160 {
161 register xmm_obj_t kobj;
162
163 if (memory_control == IP_NULL) {
164 return XMM_OBJ_NULL;
165 }
166 ip_lock(memory_control);
167 if (ip_kotype(memory_control) == IKOT_PAGING_REQUEST) {
168 kobj = (xmm_obj_t) memory_control->ip_kobject;
169 xmm_obj_reference(kobj);
170 } else {
171 kobj = XMM_OBJ_NULL;
172 }
173 ip_unlock(memory_control);
174 return kobj;
175 }
176
177 /*
178 * We create our own memory_control and memory_object_name ports.
179 * This is easier and less confusing than each kernel allocating
180 * its own ports, particularly for name ports, since everyone should
181 * see the same name port for the same object.
182 */
183 kern_return_t
184 xmm_user_create(memory_object, new_mobj)
185 ipc_port_t memory_object;
186 xmm_obj_t *new_mobj;
187 {
188 ipc_port_t memory_control;
189 ipc_port_t memory_object_name;
190 kern_return_t kr;
191 xmm_obj_t mobj;
192
193 /*
194 * Allocate request port.
195 */
196 memory_control = ipc_port_alloc_kernel();
197 if (memory_control == IP_NULL) {
198 panic("xmm_user_create: memory_control");
199 }
200
201 /*
202 * Allocate name port.
203 */
204 memory_object_name = ipc_port_alloc_kernel();
205 if (memory_object_name == IP_NULL) {
206 panic("xmm_user_create: memory_object_name");
207 }
208
209 /*
210 * Allocate mobj.
211 */
212 kr = xmm_obj_allocate(&user_class, XMM_OBJ_NULL, &mobj);
213 if (kr != KERN_SUCCESS) {
214 return kr;
215 }
216
217 MOBJ->memory_object = ipc_port_copy_send(memory_object);
218 MOBJ->memory_control = memory_control;
219 MOBJ->memory_object_name = memory_object_name;
220
221 /*
222 * Grab a reference for mobj and associate it with memory_control port.
223 */
224 xmm_obj_reference(mobj);
225 ipc_kobject_set(memory_control, (ipc_kobject_t) mobj,
226 IKOT_PAGING_REQUEST);
227 *new_mobj = mobj;
228 return KERN_SUCCESS;
229 }
230
231 m_user_init(mobj, k_kobj, pagesize, internal, size)
232 xmm_obj_t mobj;
233 xmm_obj_t k_kobj;
234 vm_size_t pagesize;
235 boolean_t internal;
236 vm_size_t size;
237 {
238 xmm_obj_t kobj = mobj;
239
240 #ifdef lint
241 M_INIT(mobj, k_kobj, pagesize, internal, size);
242 #endif lint
243 xmm_kobj_link(kobj, k_kobj);
244
245 assert(MOBJ->memory_object != IP_NULL);
246 if (internal) {
247 /* acquire a naked send right for the default pager */
248 ipc_port_t default_pager = memory_manager_default_reference();
249
250 /* consumes the naked send right for default_pager */
251 (void) k_memory_object_create(default_pager,
252 MOBJ->memory_object, size,
253 MOBJ->memory_control,
254 MOBJ->memory_object_name,
255 PAGE_SIZE);
256
257 /* call set_ready, since default pager won't */
258 return K_SET_READY(mobj, OBJECT_READY_TRUE, MAY_CACHE_FALSE,
259 MEMORY_OBJECT_COPY_TEMPORARY,
260 USE_OLD_PAGEOUT_TRUE,
261 ipc_port_make_send(MOBJ->
262 memory_object_name),
263 XMM_REPLY_NULL);
264 } else {
265 (void) memory_object_init(MOBJ->memory_object,
266 MOBJ->memory_control,
267 MOBJ->memory_object_name,
268 PAGE_SIZE);
269 }
270 return KERN_SUCCESS;
271 }
272
273 m_user_terminate(mobj, kobj)
274 xmm_obj_t mobj;
275 xmm_obj_t kobj;
276 {
277 kern_return_t kr;
278
279 #ifdef lint
280 M_TERMINATE(mobj, kobj);
281 #endif lint
282 ipc_kobject_set(MOBJ->memory_control, IKO_NULL, IKOT_NONE);
283 kr = memory_object_terminate(MOBJ->memory_object,
284 MOBJ->memory_control,
285 MOBJ->memory_object_name);
286 xmm_obj_release(mobj);
287 return kr;
288 }
289
290 m_user_copy(mobj, kobj, offset, length, new_mobj)
291 xmm_obj_t mobj;
292 xmm_obj_t kobj;
293 vm_offset_t offset;
294 vm_size_t length;
295 xmm_obj_t new_mobj;
296 {
297 #ifdef lint
298 M_COPY(mobj, kobj, offset, length, new_mobj);
299 #endif lint
300 panic("m_user_copy");
301 /* NOTREACHED */
302 }
303
304 m_user_data_request(mobj, kobj, offset, length, desired_access)
305 xmm_obj_t mobj;
306 xmm_obj_t kobj;
307 vm_offset_t offset;
308 vm_size_t length;
309 vm_prot_t desired_access;
310 {
311 #ifdef lint
312 M_DATA_REQUEST(mobj, kobj, offset, length, desired_access);
313 #endif lint
314 return memory_object_data_request(MOBJ->memory_object,
315 KOBJ->memory_control,
316 offset,
317 length,
318 desired_access);
319 }
320
321 m_user_data_unlock(mobj, kobj, offset, length, desired_access)
322 xmm_obj_t mobj;
323 xmm_obj_t kobj;
324 vm_offset_t offset;
325 vm_size_t length;
326 vm_prot_t desired_access;
327 {
328 #ifdef lint
329 M_DATA_UNLOCK(mobj, kobj, offset, length, desired_access);
330 #endif lint
331 return memory_object_data_unlock(MOBJ->memory_object,
332 KOBJ->memory_control,
333 offset,
334 length,
335 desired_access);
336 }
337
338 m_user_data_write(mobj, kobj, offset, data, length)
339 xmm_obj_t mobj;
340 xmm_obj_t kobj;
341 vm_offset_t offset;
342 vm_offset_t data;
343 vm_size_t length;
344 {
345 #ifdef lint
346 M_DATA_WRITE(mobj, kobj, offset, data, length);
347 #endif lint
348 return memory_object_data_write(MOBJ->memory_object,
349 KOBJ->memory_control,
350 offset,
351 data,
352 length);
353 }
354
355 m_user_lock_completed(reply, offset, length)
356 xmm_reply_t reply;
357 vm_offset_t offset;
358 vm_size_t length;
359 {
360 xmm_obj_t kobj;
361 kern_return_t kr;
362
363 #ifdef lint
364 M_LOCK_COMPLETED(reply, offset, length);
365 #endif lint
366 kobj = reply->kobj;
367 assert(kobj->class == &user_class);
368 kr = memory_object_lock_completed(reply->reply_to,
369 reply->reply_to_type,
370 KOBJ->memory_control,
371 offset,
372 length);
373 xmm_reply_deallocate(reply);
374 return kr;
375 }
376
377 m_user_supply_completed(reply, offset, length, result, error_offset)
378 xmm_reply_t reply;
379 vm_offset_t offset;
380 vm_size_t length;
381 kern_return_t result;
382 vm_offset_t error_offset;
383 {
384 xmm_obj_t kobj;
385 kern_return_t kr;
386
387 #ifdef lint
388 M_SUPPLY_COMPLETED(reply, offset, length, result, error_offset);
389 #endif lint
390 kobj = reply->kobj;
391 assert(kobj->class == &user_class);
392 kr = memory_object_supply_completed(reply->reply_to,
393 reply->reply_to_type,
394 KOBJ->memory_control,
395 offset,
396 length,
397 result,
398 error_offset);
399 xmm_reply_deallocate(reply);
400 return kr;
401 }
402
403 m_user_data_return(mobj, kobj, offset, data, length)
404 xmm_obj_t mobj;
405 xmm_obj_t kobj;
406 vm_offset_t offset;
407 vm_offset_t data;
408 vm_size_t length;
409 {
410 #ifdef lint
411 M_DATA_RETURN(mobj, kobj, offset, data, length);
412 #endif lint
413 return memory_object_data_return(MOBJ->memory_object,
414 KOBJ->memory_control,
415 offset,
416 data,
417 length);
418 }
419
420 m_user_change_completed(reply, may_cache, copy_strategy)
421 xmm_reply_t reply;
422 boolean_t may_cache;
423 memory_object_copy_strategy_t copy_strategy;
424 {
425 kern_return_t kr;
426
427 #ifdef lint
428 M_CHANGE_COMPLETED(reply, may_cache, copy_strategy);
429 #endif lint
430 kr = memory_object_change_completed(reply->reply_to,
431 reply->reply_to_type,
432 may_cache,
433 copy_strategy);
434 xmm_reply_deallocate(reply);
435 return kr;
436 }
437
438 kern_return_t
439 memory_object_data_provided(kobj, offset, data, length, lock_value)
440 xmm_obj_t kobj;
441 vm_offset_t offset;
442 pointer_t data;
443 unsigned int length;
444 vm_prot_t lock_value;
445 {
446 kern_return_t kr;
447
448 if (kobj == XMM_OBJ_NULL) {
449 return KERN_INVALID_ARGUMENT;
450 }
451 kr = K_DATA_SUPPLY(kobj, offset, data, length, lock_value,
452 PRECIOUS_FALSE, XMM_REPLY_NULL);
453 xmm_obj_release(kobj);
454 return kr;
455 }
456
457 kern_return_t
458 memory_object_data_unavailable(kobj, offset, length)
459 xmm_obj_t kobj;
460 vm_offset_t offset;
461 vm_size_t length;
462 {
463 kern_return_t kr;
464
465 if (kobj == XMM_OBJ_NULL) {
466 return KERN_INVALID_ARGUMENT;
467 }
468 kr = K_DATA_UNAVAILABLE(kobj, offset, length);
469 xmm_obj_release(kobj);
470 return kr;
471 }
472
473 kern_return_t
474 memory_object_get_attributes(kobj, object_ready, may_cache, copy_strategy)
475 xmm_obj_t kobj;
476 boolean_t *object_ready;
477 boolean_t *may_cache;
478 memory_object_copy_strategy_t *copy_strategy;
479 {
480 kern_return_t kr;
481
482 if (kobj == XMM_OBJ_NULL) {
483 return KERN_INVALID_ARGUMENT;
484 }
485 kr = K_GET_ATTRIBUTES(kobj, object_ready, may_cache, copy_strategy);
486 xmm_obj_release(kobj);
487 return kr;
488 }
489
490 kern_return_t
491 memory_object_lock_request(kobj, offset, length, should_return, should_flush,
492 prot, reply_to, reply_to_type)
493 xmm_obj_t kobj;
494 vm_offset_t offset;
495 vm_size_t length;
496 int should_return;
497 boolean_t should_flush;
498 vm_prot_t prot;
499 ipc_port_t reply_to;
500 mach_msg_type_name_t reply_to_type;
501 {
502 xmm_reply_t reply;
503 kern_return_t kr;
504
505 if (kobj == XMM_OBJ_NULL) {
506 return KERN_INVALID_ARGUMENT;
507 }
508 kr = xmm_reply_allocate(kobj, reply_to, reply_to_type, &reply);
509 if (kr != KERN_SUCCESS) {
510 return kr;
511 }
512 kr = K_LOCK_REQUEST(kobj, offset, length, should_return, should_flush,
513 prot, reply);
514 xmm_obj_release(kobj);
515 return kr;
516 }
517
518 kern_return_t
519 xxx_memory_object_lock_request(kobj, offset, size, should_clean, should_flush,
520 prot, reply_to, reply_to_type)
521 xmm_obj_t kobj;
522 vm_offset_t offset;
523 vm_size_t size;
524 boolean_t should_clean;
525 boolean_t should_flush;
526 vm_prot_t prot;
527 ipc_port_t reply_to;
528 mach_msg_type_name_t reply_to_type;
529 {
530 int should_return;
531
532 if (should_clean) {
533 should_return = MEMORY_OBJECT_RETURN_DIRTY;
534 } else {
535 should_return = MEMORY_OBJECT_RETURN_NONE;
536 }
537 return memory_object_lock_request(kobj, offset, size, should_return,
538 should_flush, prot, reply_to,
539 reply_to_type);
540 }
541
542 kern_return_t
543 memory_object_data_error(kobj, offset, length, error_value)
544 xmm_obj_t kobj;
545 vm_offset_t offset;
546 vm_size_t length;
547 kern_return_t error_value;
548 {
549 kern_return_t kr;
550
551 if (kobj == XMM_OBJ_NULL) {
552 return KERN_INVALID_ARGUMENT;
553 }
554 kr = K_DATA_ERROR(kobj, offset, length, error_value);
555 xmm_obj_release(kobj);
556 return kr;
557 }
558
559 kern_return_t
560 memory_object_set_attributes(kobj, object_ready, may_cache, copy_strategy)
561 xmm_obj_t kobj;
562 boolean_t object_ready;
563 boolean_t may_cache;
564 memory_object_copy_strategy_t copy_strategy;
565 {
566 kern_return_t kr;
567
568 if (kobj == XMM_OBJ_NULL) {
569 return KERN_INVALID_ARGUMENT;
570 }
571 kr = K_SET_READY(kobj, object_ready, may_cache, copy_strategy,
572 USE_OLD_PAGEOUT_TRUE,
573 ipc_port_make_send(KOBJ->memory_object_name),
574 XMM_REPLY_NULL);
575 xmm_obj_release(kobj);
576 return kr;
577 }
578
579 kern_return_t
580 memory_object_destroy(kobj, reason)
581 xmm_obj_t kobj;
582 kern_return_t reason;
583 {
584 kern_return_t kr;
585
586 if (kobj == XMM_OBJ_NULL) {
587 return KERN_INVALID_ARGUMENT;
588 }
589 kr = K_DESTROY(kobj, reason);
590 xmm_obj_release(kobj);
591 return kr;
592 }
593
594 kern_return_t
595 memory_object_data_supply(kobj, offset, data, length, lock_value, precious,
596 reply_to, reply_to_type)
597 xmm_obj_t kobj;
598 vm_offset_t offset;
599 pointer_t data;
600 unsigned int length;
601 vm_prot_t lock_value;
602 boolean_t precious;
603 ipc_port_t reply_to;
604 mach_msg_type_name_t reply_to_type;
605 {
606 xmm_reply_t reply;
607 kern_return_t kr;
608
609 if (kobj == XMM_OBJ_NULL) {
610 return KERN_INVALID_ARGUMENT;
611 }
612 kr = xmm_reply_allocate(kobj, reply_to, reply_to_type, &reply);
613 if (kr != KERN_SUCCESS) {
614 return kr;
615 }
616 kr = K_DATA_SUPPLY(kobj, offset, data, length, lock_value, precious,
617 reply);
618 xmm_obj_release(kobj);
619 return kr;
620 }
621
622 kern_return_t
623 memory_object_ready(kobj, may_cache, copy_strategy)
624 xmm_obj_t kobj;
625 boolean_t may_cache;
626 memory_object_copy_strategy_t copy_strategy;
627 {
628 kern_return_t kr;
629
630 if (kobj == XMM_OBJ_NULL) {
631 return KERN_INVALID_ARGUMENT;
632 }
633 kr = K_SET_READY(kobj, OBJECT_READY_TRUE, may_cache, copy_strategy,
634 USE_OLD_PAGEOUT_FALSE,
635 ipc_port_make_send(KOBJ->memory_object_name),
636 XMM_REPLY_NULL);
637 xmm_obj_release(kobj);
638 return kr;
639 }
640
641 kern_return_t
642 memory_object_change_attributes(kobj, may_cache, copy_strategy, reply_to,
643 reply_to_type)
644 xmm_obj_t kobj;
645 boolean_t may_cache;
646 memory_object_copy_strategy_t copy_strategy;
647 ipc_port_t reply_to;
648 mach_msg_type_name_t reply_to_type;
649 {
650 xmm_reply_t reply;
651 kern_return_t kr;
652
653 if (kobj == XMM_OBJ_NULL) {
654 return KERN_INVALID_ARGUMENT;
655 }
656 kr = xmm_reply_allocate(kobj, reply_to, reply_to_type, &reply);
657 if (kr != KERN_SUCCESS) {
658 return kr;
659 }
660 kr = K_SET_READY(kobj, OBJECT_READY_TRUE, may_cache, copy_strategy,
661 USE_OLD_PAGEOUT_FALSE,
662 ipc_port_make_send(KOBJ->memory_object_name), reply);
663 xmm_obj_release(kobj);
664 return kr;
665 }
Cache object: b69e372a9c3f7ebd4d00b9540e55b2df
|