FreeBSD/Linux Kernel Cross Reference
sys/norma/xmm_copy.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie the
24 * rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: xmm_copy.c,v $
29 * Revision 2.4 92/03/10 16:28:57 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:51:05 jsb]
32 *
33 * Revision 2.3.3.3 92/02/21 11:25:37 jsb
34 * Deallocate memory_object and memory_object_name ports.
35 * [92/02/20 10:56:16 jsb]
36 *
37 * Use newer xmm_decl macro with explicit name parameter.
38 * [92/02/16 14:06:22 jsb]
39 *
40 * Eliminated shadow obj creation, since we now let the vm system
41 * handle copies for us.
42 * [92/02/11 11:07:52 jsb]
43 *
44 * Separate xmm_copy_create from norma_copy_create.
45 * Replaced *_internal_memory_object garbage with call to
46 * xmm_memory_manager_export. Use new MEMORY_OBJECT_COPY_TEMPORARY
47 * strategy instead of MEMORY_OBJECT_COPY_DELAY, since we don't
48 * need (or want) to see changes made to our object.
49 * [92/02/11 11:05:04 jsb]
50 *
51 * Use new xmm_decl, and new memory_object_name and deallocation protocol.
52 * Removed *_internal_memory_object routines. Removed bogus data_write
53 * implementation. Added shadow layer creation.
54 * [92/02/09 12:49:43 jsb]
55 *
56 * Obtain reference to map upon creation; release on termination.
57 * [92/01/22 10:35:58 jsb]
58 *
59 * Revision 2.3.3.1 92/01/21 21:53:50 jsb
60 * De-linted. Supports new (dlb) memory object routines.
61 * Supports arbitrary reply ports to lock_request, etc.
62 * Converted mach_port_t (and port_t) to ipc_port_t.
63 * [92/01/20 17:19:14 jsb]
64 *
65 * Fixes from OSF.
66 * [92/01/17 14:13:56 jsb]
67 *
68 * Revision 2.3.1.1 92/01/15 12:13:44 jeffreyh
69 * Deallocate memory object name port on termination. (dlb)
70 *
71 * Revision 2.3 91/11/15 14:09:53 rpd
72 * Use ipc_port_make_send in norma_copy_create for returned memory_object.
73 * [91/09/23 09:09:25 jsb]
74 *
75 * Revision 2.2 91/08/28 11:16:22 jsb
76 * In m_copy_data_request: removed dead code, and added missing
77 * is_continuation parameter to vm_map_copyin_page_list.
78 * [91/08/16 14:25:04 jsb]
79 *
80 * First checkin.
81 * [91/08/15 13:03:06 jsb]
82 *
83 */
84 /*
85 * File: norma/xmm_copy.c
86 * Author: Joseph S. Barrera III
87 * Date: 1991
88 */
89
90 #include <norma/xmm_obj.h>
91 #include <norma/xmm_user_rename.h>
92 #include <ipc/ipc_space.h>
93 #include <ipc/ipc_port.h>
94 #include <vm/memory_object.h>
95 #include <vm/vm_fault.h>
96 #include <vm/vm_map.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pageout.h>
100
101 struct mobj {
102 struct xmm_obj obj;
103 vm_map_t map;
104 vm_offset_t start;
105 vm_size_t size;
106 ipc_port_t memory_object_name;
107 ipc_port_t memory_object;
108 };
109
110 #undef KOBJ
111 #define KOBJ ((struct mobj *) kobj)
112
113 #define m_copy_copy m_invalid_copy
114 #define m_copy_data_unlock m_invalid_data_unlock
115 #define m_copy_data_write m_invalid_data_write
116 #define m_copy_lock_completed m_invalid_lock_completed
117 #define m_copy_supply_completed m_invalid_supply_completed
118 #define m_copy_data_return m_invalid_data_return
119 #define m_copy_change_completed m_invalid_change_completed
120
121 #define k_copy_data_unavailable k_invalid_data_unavailable
122 #define k_copy_get_attributes k_invalid_get_attributes
123 #define k_copy_lock_request k_invalid_lock_request
124 #define k_copy_data_error k_invalid_data_error
125 #define k_copy_set_ready k_invalid_set_ready
126 #define k_copy_destroy k_invalid_destroy
127 #define k_copy_data_supply k_invalid_data_supply
128
129 xmm_decl(copy, "copy", sizeof(struct mobj));
130
131 kern_return_t
132 xmm_copy_create(map, start, size, new_mobj)
133 vm_map_t map;
134 vm_offset_t start;
135 vm_size_t size;
136 xmm_obj_t *new_mobj;
137 {
138 xmm_obj_t mobj;
139 kern_return_t kr;
140
141 assert(page_aligned(start));
142 assert(page_aligned(size));
143 kr = xmm_obj_allocate(©_class, XMM_OBJ_NULL, &mobj);
144 if (kr != KERN_SUCCESS) {
145 return kr;
146 }
147 vm_map_reference(map);
148 MOBJ->map = map;
149 MOBJ->start = start;
150 MOBJ->size = size;
151 MOBJ->memory_object_name = ipc_port_alloc_kernel();
152 if (MOBJ->memory_object_name == IP_NULL) {
153 panic("add_internal_memory_object: ipc_port_alloc_kernel");
154 }
155 MOBJ->memory_object = IP_NULL;
156 *new_mobj = mobj;
157 return KERN_SUCCESS;
158 }
159
160 norma_copy_create(map, start, size, memory_object_p)
161 vm_map_t map;
162 vm_offset_t start;
163 vm_size_t size;
164 ipc_port_t *memory_object_p;
165 {
166 xmm_obj_t mobj;
167 kern_return_t kr;
168 ipc_port_t xmm_memory_manager_export();
169
170 /*
171 * Create a read-only, xmm-internal memory manager for map.
172 */
173 kr = xmm_copy_create(map, start, size, &mobj);
174 if (kr != KERN_SUCCESS) {
175 return kr;
176 }
177
178 /*
179 * Create an svm stack and an xmm object, and save memory object.
180 */
181 MOBJ->memory_object = xmm_memory_manager_export(mobj);
182
183 /*
184 * Return memory object.
185 */
186 *memory_object_p = MOBJ->memory_object;
187 return KERN_SUCCESS;
188 }
189
190 m_copy_init(mobj, k_kobj, pagesize, internal, size)
191 xmm_obj_t mobj;
192 xmm_obj_t k_kobj;
193 vm_size_t pagesize;
194 boolean_t internal;
195 vm_size_t size;
196 {
197 xmm_obj_t kobj = mobj;
198
199 #ifdef lint
200 M_INIT(mobj, k_kobj, pagesize, internal, size);
201 #endif lint
202 xmm_kobj_link(kobj, k_kobj);
203 return K_SET_READY(kobj, OBJECT_READY_TRUE, MAY_CACHE_FALSE,
204 MEMORY_OBJECT_COPY_TEMPORARY, USE_OLD_PAGEOUT_TRUE,
205 ipc_port_make_send(MOBJ->memory_object_name),
206 XMM_REPLY_NULL);
207 }
208
209 m_copy_terminate(mobj, kobj)
210 xmm_obj_t mobj;
211 xmm_obj_t kobj;
212 {
213 return KERN_SUCCESS;
214 }
215
216 void
217 m_copy_deallocate(mobj)
218 xmm_obj_t mobj;
219 {
220 vm_map_deallocate(MOBJ->map);
221 ipc_port_dealloc_kernel(MOBJ->memory_object_name);
222 if (MOBJ->memory_object != IP_NULL) {
223 ipc_port_dealloc_kernel(MOBJ->memory_object);
224 }
225 }
226
227 m_copy_data_request(mobj, kobj, offset, length, desired_access)
228 xmm_obj_t mobj;
229 xmm_obj_t kobj;
230 vm_offset_t offset;
231 vm_size_t length;
232 vm_prot_t desired_access;
233 {
234 extern zone_t vm_map_copy_zone;
235 vm_map_copy_t copy;
236 kern_return_t kr;
237
238 #ifdef lint
239 M_DATA_REQUEST(mobj, kobj, offset, length, desired_access);
240 #endif lint
241 assert(page_aligned(offset));
242 assert(page_aligned(length));
243 assert(length == PAGE_SIZE);
244 assert(offset + length <= MOBJ->size);
245
246 kr = vm_map_copyin_page_list(MOBJ->map,
247 MOBJ->start + offset,
248 PAGE_SIZE,
249 FALSE,/* src_destroy */
250 TRUE,/* steal pages */
251 ©,
252 FALSE/* is continuation */);
253 if (kr) {
254 fret("xmm_copy_data_request 0x%x 0x%x 0x%x: %x\n",
255 MOBJ->start, offset, length, kr);
256 return K_DATA_ERROR(kobj, offset, length, kr);
257 }
258 /* XXX should only return appropriate access */
259 return K_DATA_SUPPLY(kobj, offset, (vm_offset_t) copy, length,
260 VM_PROT_NONE, FALSE, XMM_REPLY_NULL);
261 }
Cache object: 23457ca66284837d2f412fdc324d8090
|