1 /*
2 * Mach Operating System
3 * Copyright (c) 1992 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie the
24 * rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: xmm_shadow.c,v $
29 * Revision 2.2 92/03/10 16:29:43 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:51:50 jsb]
32 *
33 * Revision 2.1.3.3 92/02/21 11:28:09 jsb
34 * Explicitly provide name parameter to xmm_decl macro.
35 * [92/02/16 14:24:15 jsb]
36 *
37 * Disabled; use the vm system to manage copies.
38 * [92/02/11 11:08:14 jsb]
39 *
40 * Removed *_internal_memory_object garbage.
41 * [92/02/11 11:00:23 jsb]
42 *
43 * First real implementation. Moved internal_memory_object routines here.
44 * [92/02/09 14:19:11 jsb]
45 *
46 */
47 /*
48 * File: norma/xmm_shadow.c
49 * Author: Joseph S. Barrera III
50 * Date: 1992
51 *
52 * Xmm layer to handle writes to a read-only object.
53 * Interim solution until we can do lazy copies of svm memory.
54 */
55
56 #if 0
57
58 #include <norma/xmm_obj.h>
59 #include <mach/vm_param.h>
60 #include <ipc/ipc_space.h>
61 #include <ipc/ipc_port.h>
62
63 struct mobj {
64 struct xmm_obj obj;
65 ipc_port_t memory_object; /* must be second field */
66 xmm_obj_t next; /* must be third field */
67 vm_size_t size;
68 boolean_t *present;
69 vm_map_copy_t *page; /* XXX should create internal pager */
70 };
71
72 #undef KOBJ
73 #define KOBJ ((struct mobj *) kobj)
74
75 kern_return_t m_shadow_terminate();
76 kern_return_t m_shadow_data_request();
77 kern_return_t m_shadow_data_write();
78
79 xmm_decl(shadow_class,
80 /* m_init */ m_interpose_init,
81 /* m_terminate */ m_shadow_terminate,
82 /* m_copy */ m_interpose_copy,
83 /* m_data_request */ m_shadow_data_request,
84 /* m_data_unlock */ m_interpose_data_unlock,
85 /* m_data_write */ m_shadow_data_write,
86 /* m_lock_completed */ m_interpose_lock_completed,
87 /* m_supply_completed */ m_interpose_supply_completed,
88 /* m_data_return */ m_interpose_data_return,
89 /* m_change_completed */ m_interpose_change_completed,
90
91 /* k_data_unavailable */ k_interpose_data_unavailable,
92 /* k_get_attributes */ k_interpose_get_attributes,
93 /* k_lock_request */ k_interpose_lock_request,
94 /* k_data_error */ k_interpose_data_error,
95 /* k_set_ready */ k_interpose_set_ready,
96 /* k_destroy */ k_interpose_destroy,
97 /* k_data_supply */ k_interpose_data_supply,
98
99 /* name */ "shadow",
100 /* size */ sizeof(struct mobj)
101 );
102
103 kern_return_t
104 xmm_shadow_create(old_mobj, size, new_mobj)
105 xmm_obj_t old_mobj;
106 vm_size_t size;
107 xmm_obj_t *new_mobj;
108 {
109 xmm_obj_t mobj;
110 kern_return_t kr;
111
112 assert(page_aligned(size));
113 kr = xmm_obj_allocate(&shadow_class, old_mobj, &mobj);
114 if (kr != KERN_SUCCESS) {
115 return kr;
116 }
117 MOBJ->size = size;
118 MOBJ->present = (boolean_t *) kalloc(atop(size) * sizeof(boolean_t));
119 bzero(MOBJ->present, atop(size) * sizeof(boolean_t));
120 MOBJ->page = (vm_map_copy_t *)
121 kalloc(atop(MOBJ->size) * sizeof(vm_map_copy_t));
122 bzero(MOBJ->page, atop(MOBJ->size) * sizeof(vm_map_copy_t));
123 *new_mobj = mobj;
124 return KERN_SUCCESS;
125 }
126
127 m_shadow_terminate(mobj, kobj, memory_object_name)
128 xmm_obj_t mobj;
129 xmm_obj_t kobj;
130 ipc_port_t memory_object_name;
131 {
132 unsigned long page;
133 kern_return_t kr;
134
135 for (page = 0; page < atop(MOBJ->size); page++) {
136 if (MOBJ->page[page]) {
137 vm_map_copy_discard(MOBJ->page[page]);
138 }
139 }
140 kfree(MOBJ->present, atop(MOBJ->size) * sizeof(boolean_t));
141 remove_internal_memory_object(mobj);
142 kr = M_TERMINATE(mobj, mobj, memory_object_name);
143 xmm_obj_deallocate(mobj);
144 return kr;
145 }
146
147 m_shadow_data_request(mobj, kobj, offset, length, desired_access)
148 xmm_obj_t mobj;
149 xmm_obj_t kobj;
150 vm_offset_t offset;
151 vm_size_t length;
152 vm_prot_t desired_access;
153 {
154 extern zone_t vm_map_copy_zone;
155 vm_map_copy_t copy;
156 unsigned long page;
157
158 assert(page_aligned(offset));
159 assert(page_aligned(length));
160 assert(length == PAGE_SIZE);
161 assert(offset + length <= MOBJ->size);
162
163 /*
164 * If the page is not shadowed, pass request down to source obj.
165 */
166 page = atop(offset);
167 if (! MOBJ->present[page]) {
168 return M_DATA_REQUEST(mobj, kobj, offset, length,
169 desired_access);
170 }
171
172 /*
173 * Copy the shadowed page.
174 * MOBJ->page[page] is a object flavor copy object.
175 * We need to keep a copy here (for multiple pageins),
176 * and we need a page list copy object anyway.
177 */
178 if (MOBJ->page[page] == VM_MAP_COPY_NULL) {
179 panic("m_shadow_data_request: absent page");
180 }
181 copy = (vm_map_copy_t) xmm_buffer_copy(MOBJ->page[page]);
182
183 /*
184 * Provide the page list copy object containing the page
185 * to the kernel.
186 */
187 return K_DATA_SUPPLY(kobj, offset, (vm_offset_t) copy, length,
188 VM_PROT_NONE, FALSE, XMM_REPLY_NULL);
189 }
190
191 kern_return_t
192 m_shadow_data_write(mobj, kobj, offset, data, length)
193 xmm_obj_t mobj;
194 xmm_obj_t kobj;
195 vm_offset_t offset;
196 vm_offset_t data;
197 vm_size_t length;
198 {
199 unsigned long page;
200 vm_map_copy_t copy = (vm_map_copy_t) data;
201
202 assert(page_aligned(offset));
203 assert(page_aligned(length));
204 assert(length == PAGE_SIZE);
205 assert(offset + length <= MOBJ->size);
206
207 page = atop(offset);
208 if (MOBJ->present[page]) {
209 assert(MOBJ->page[page]);
210 vm_map_copy_discard(MOBJ->page[page]);
211 }
212 MOBJ->present[page] = TRUE;
213 MOBJ->page[page] = copy;
214 return KERN_SUCCESS;
215 }
216
217 /* ---------------------------------------------------------------------- */
218
219 #if 666
220 /*
221 * This should live somewhere else
222 */
223 xmm_obj_t internal_mobj_list;
224
225 boolean_t
226 is_internal_memory_object(memory_object, new_mobj)
227 ipc_port_t memory_object;
228 xmm_obj_t *new_mobj;
229 {
230 xmm_obj_t mobj;
231
232 for (mobj = internal_mobj_list; mobj; mobj = MOBJ->next) {
233 if (MOBJ->memory_object == memory_object) {
234 *new_mobj = mobj;
235 return TRUE;
236 }
237 }
238 return FALSE;
239 }
240
241 add_internal_memory_object(mobj, memory_object_p)
242 xmm_obj_t mobj;
243 ipc_port_t *memory_object_p;
244 {
245 MOBJ->memory_object = ipc_port_alloc_kernel();
246 if (MOBJ->memory_object == IP_NULL) {
247 panic("add_internal_memory_object: ipc_port_alloc_kernel");
248 }
249 MOBJ->memory_object = ipc_port_make_send(MOBJ->memory_object);
250
251 MOBJ->next = internal_mobj_list;
252 internal_mobj_list = mobj;
253
254 *memory_object_p = MOBJ->memory_object;
255 }
256
257 #define MP ((struct mobj *) (*mp))
258
259 remove_internal_memory_object(mobj)
260 xmm_obj_t mobj;
261 {
262 xmm_obj_t *mp;
263
264 for (mp = &internal_mobj_list; *mp; mp = &MP->next) {
265 if (*mp == mobj) {
266 /* XXX deallocate MOBJ->memory_object? */
267 *mp = MOBJ->next;
268 return;
269 }
270 }
271 assert(0);
272 }
273 #endif 666
274
275 #endif 0
Cache object: a6e910c0f05230ae9c2c5cc7caec208e
|