FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_user.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993,1992,1991,1990,1989,1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: vm_user.c,v $
29 * Revision 2.24 93/11/17 18:57:54 dbg
30 * Conditionalized projected buffer support under NET_ATM.
31 * Added ANSI function prototypes.
32 * [93/09/10 dbg]
33 *
34 * Revision 2.23 93/08/10 15:13:57 mrt
35 * Included calls to projected_buffer_in_range to deny user direct
36 * manipulation of protection, inheritance, machine attributes or
37 * wiring of projected buffers. These can be altered only by other code inside
38 * the kernel, presumably the device driver that created the projected buffer.
39 * [93/02/16 09:48:40 jcb]
40 *
41 * Revision 2.22 92/08/03 18:02:30 jfriedl
42 * removed silly prototypes
43 * [92/08/02 jfriedl]
44 *
45 * Revision 2.21 92/05/21 17:27:02 jfriedl
46 * tried prototypes.
47 * [92/05/20 jfriedl]
48 *
49 * Revision 2.20 92/03/10 16:30:43 jsb
50 * Removed NORMA_VM workaround.
51 * [92/02/11 17:42:41 jsb]
52 * Add checks for protection and inheritance arguments.
53 * [92/02/22 17:07:18 dlb@osf.org]
54 *
55 * Revision 2.19 92/02/23 19:51:23 elf
56 * Eliminate keep_wired argument from vm_map_copyin().
57 * [92/02/21 10:16:59 dlb]
58 *
59 * Revision 2.18 91/12/11 08:44:21 jsb
60 * Fixed vm_write and vm_copy to check for a null map.
61 * Fixed vm_write and vm_copy to not check for misalignment.
62 * Fixed vm_copy to discard the copy if the overwrite fails.
63 * [91/12/09 rpd]
64 *
65 * Revision 2.17 91/12/10 13:27:17 jsb
66 * Apply temporary NORMA_VM workaround to XMM problem.
67 * This leaks objects if vm_map() fails.
68 * [91/12/10 12:55:27 jsb]
69 *
70 * Revision 2.16 91/08/28 11:19:07 jsb
71 * Fixed vm_map to check memory_object with IP_VALID.
72 * Changed vm_wire to use KERN_INVALID_{HOST,TASK,VALUE}
73 * instead of a generic KERN_INVALID_ARGUMENT return code.
74 * [91/07/12 rpd]
75 *
76 * Revision 2.15 91/07/31 18:22:40 dbg
77 * Change vm_pageable to vm_wire. Require host_priv port to gain
78 * wiring privileges.
79 * [91/07/30 17:28:22 dbg]
80 *
81 * Revision 2.14 91/05/14 17:51:35 mrt
82 * Correcting copyright
83 *
84 * Revision 2.13 91/03/16 15:07:13 rpd
85 * Removed temporary extra stats.
86 * [91/02/10 rpd]
87 *
88 * Revision 2.12 91/02/05 18:00:35 mrt
89 * Changed to new Mach copyright
90 * [91/02/01 16:35:00 mrt]
91 *
92 * Revision 2.11 90/08/06 15:08:59 rwd
93 * Vm_read should check that the map is non null.
94 * [90/07/26 rwd]
95 *
96 * Revision 2.10 90/06/02 15:12:07 rpd
97 * Moved trap versions of syscalls to kern/ipc_mig.c.
98 * Removed syscall_vm_allocate_with_pager.
99 * [90/05/31 rpd]
100 *
101 * Purged vm_allocate_with_pager.
102 * [90/04/09 rpd]
103 * Purged MACH_XP_FPD. Use vm_map_pageable_user for vm_pageable.
104 * Converted to new IPC kernel call semantics.
105 * [90/03/26 23:21:55 rpd]
106 *
107 * Revision 2.9 90/05/29 18:39:57 rwd
108 * New trap versions of exported vm calls from rfr.
109 * [90/04/20 rwd]
110 *
111 * Revision 2.8 90/05/03 15:53:30 dbg
112 * Set current protection to VM_PROT_DEFAULT in
113 * vm_allocate_with_pager.
114 * [90/04/12 dbg]
115 *
116 * Revision 2.7 90/03/14 21:11:49 rwd
117 * Get rfr bug fix.
118 * [90/03/07 rwd]
119 *
120 * Revision 2.6 90/02/22 20:07:02 dbg
121 * Use new vm_object_copy routines. Use new vm_map_copy
122 * technology. vm_read() no longer requires page alignment.
123 * Change PAGE_WAKEUP to PAGE_WAKEUP_DONE to reflect the fact
124 * that it clears the busy flag.
125 * [90/01/25 dbg]
126 *
127 * Revision 2.5 90/01/24 14:08:30 af
128 * Fixed bug in optimized vm_write: now that we relaxed the restriction
129 * on the page-alignment of the size arg we must be able to cope with
130 * e.g. one-and-a-half pages as well.
131 * Also, by simple measures on my pmax turns out that mapping is a win
132 * versus copyin even for a single page. IF you can map.
133 * [90/01/24 11:37:35 af]
134 *
135 * Revision 2.4 90/01/22 23:09:42 af
136 * Go through the map module for machine attributes.
137 * [90/01/20 17:23:35 af]
138 *
139 * Added vm_machine_attribute(), which only invokes the
140 * corresponding pmap operation, for now. Just a first
141 * shot at it, lacks proper locking and keeping the info
142 * around, someplace.
143 * [89/12/08 af]
144 *
145 * Revision 2.3 90/01/19 14:36:22 rwd
146 * Disable vm_write optimization on mips since it doesn't appear to
147 * work.
148 * [90/01/19 rwd]
149 *
150 * Get version that works on multiprocessor from rfr
151 * [90/01/10 rwd]
152 * Get new user copyout code from rfr.
153 * [90/01/05 rwd]
154 *
155 * Revision 2.2 89/09/08 11:29:05 dbg
156 * Pass keep_wired parameter to vm_map_move.
157 * [89/07/14 dbg]
158 *
159 * 28-Apr-89 David Golub (dbg) at Carnegie-Mellon University
160 * Changes for MACH_KERNEL:
161 * . Removed non-MACH include files and all conditionals.
162 * . Added vm_pageable, for privileged tasks only.
163 * . vm_read now uses vm_map_move to consolidate map operations.
164 * . If using FAST_PAGER_DATA, vm_write expects data to be in
165 * current task's address space.
166 *
167 * Revision 2.12 89/04/18 21:30:56 mwyoung
168 * All relevant history has been integrated into the documentation below.
169 *
170 */
171 /*
172 * File: vm/vm_user.c
173 * Author: Avadis Tevanian, Jr., Michael Wayne Young
174 *
175 * User-exported virtual memory functions.
176 */
177
178 #include <net_atm.h>
179
180 #include <mach/boolean.h>
181 #include <mach/kern_return.h>
182 #include <mach/mach_types.h> /* to get vm_address_t */
183 #include <mach/memory_object.h>
184 #include <mach/std_types.h> /* to get pointer_t */
185 #include <mach/vm_attributes.h>
186 #include <mach/vm_param.h>
187 #include <mach/vm_statistics.h>
188 #include <kern/host.h>
189 #include <kern/task.h>
190 #include <vm/vm_fault.h>
191 #include <vm/vm_map.h>
192 #include <vm/vm_object.h>
193 #include <vm/vm_page.h>
194
195
196
197 vm_statistics_data_t vm_stat;
198
199 /*
200 * vm_allocate allocates "zero fill" memory in the specfied
201 * map.
202 */
203 kern_return_t vm_allocate(
204 register vm_map_t map,
205 register vm_offset_t *addr,
206 register vm_size_t size,
207 boolean_t anywhere)
208 {
209
210 if (map == VM_MAP_NULL)
211 return KERN_INVALID_ARGUMENT;
212 if (size == 0) {
213 *addr = 0;
214 return KERN_SUCCESS;
215 }
216
217 if (anywhere)
218 *addr = vm_map_min(map);
219 else
220 *addr = trunc_page(*addr);
221 size = round_page(size);
222
223 return vm_map_enter(
224 map,
225 addr,
226 size,
227 (vm_offset_t)0,
228 anywhere,
229 VM_OBJECT_NULL,
230 (vm_offset_t)0,
231 FALSE,
232 VM_PROT_DEFAULT,
233 VM_PROT_ALL,
234 VM_INHERIT_DEFAULT);
235 }
236
237 /*
238 * vm_deallocate deallocates the specified range of addresses in the
239 * specified address map.
240 */
241 kern_return_t vm_deallocate(
242 register vm_map_t map,
243 vm_offset_t start,
244 vm_size_t size)
245 {
246 if (map == VM_MAP_NULL)
247 return KERN_INVALID_ARGUMENT;
248
249 if (size == (vm_size_t) 0)
250 return KERN_SUCCESS;
251
252 return vm_map_remove(map, trunc_page(start), round_page(start+size));
253 }
254
255 /*
256 * vm_inherit sets the inheritance of the specified range in the
257 * specified map.
258 */
259 kern_return_t vm_inherit(
260 register vm_map_t map,
261 vm_offset_t start,
262 vm_size_t size,
263 vm_inherit_t new_inheritance)
264 {
265 if (map == VM_MAP_NULL)
266 return KERN_INVALID_ARGUMENT;
267
268 switch (new_inheritance) {
269 case VM_INHERIT_NONE:
270 case VM_INHERIT_COPY:
271 case VM_INHERIT_SHARE:
272 break;
273 default:
274 return KERN_INVALID_ARGUMENT;
275 }
276
277 #if NET_ATM
278 /*Check if range includes projected buffer;
279 user is not allowed direct manipulation in that case*/
280 if (projected_buffer_in_range(map, start, start+size))
281 return KERN_INVALID_ARGUMENT;
282 #endif /* NET_ATM */
283
284 return vm_map_inherit(map,
285 trunc_page(start),
286 round_page(start+size),
287 new_inheritance);
288 }
289
290 /*
291 * vm_protect sets the protection of the specified range in the
292 * specified map.
293 */
294
295 kern_return_t vm_protect(
296 register vm_map_t map,
297 vm_offset_t start,
298 vm_size_t size,
299 boolean_t set_maximum,
300 vm_prot_t new_protection)
301 {
302 if ((map == VM_MAP_NULL) || (new_protection & ~VM_PROT_ALL))
303 return KERN_INVALID_ARGUMENT;
304
305 #if NET_ATM
306 /*Check if range includes projected buffer;
307 user is not allowed direct manipulation in that case*/
308 if (projected_buffer_in_range(map, start, start+size))
309 return KERN_INVALID_ARGUMENT;
310 #endif /* NET_ATM */
311
312 return vm_map_protect(map,
313 trunc_page(start),
314 round_page(start+size),
315 new_protection,
316 set_maximum);
317 }
318
319 kern_return_t vm_statistics(
320 vm_map_t map,
321 vm_statistics_data_t *stat)
322 {
323 if (map == VM_MAP_NULL)
324 return KERN_INVALID_ARGUMENT;
325
326 *stat = vm_stat;
327
328 stat->pagesize = PAGE_SIZE;
329 stat->free_count = vm_page_free_count;
330 stat->active_count = vm_page_active_count;
331 stat->inactive_count = vm_page_inactive_count;
332 stat->wire_count = vm_page_wire_count;
333
334 return KERN_SUCCESS;
335 }
336
337 /*
338 * Handle machine-specific attributes for a mapping, such
339 * as cachability, migrability, etc.
340 */
341 kern_return_t vm_machine_attribute(
342 vm_map_t map,
343 vm_address_t address,
344 vm_size_t size,
345 vm_machine_attribute_t attribute,
346 vm_machine_attribute_val_t* value) /* IN/OUT */
347 {
348 extern kern_return_t vm_map_machine_attribute();
349
350 if (map == VM_MAP_NULL)
351 return KERN_INVALID_ARGUMENT;
352
353 #if NET_ATM
354 /*Check if range includes projected buffer;
355 user is not allowed direct manipulation in that case*/
356 if (projected_buffer_in_range(map, address, address+size))
357 return KERN_INVALID_ARGUMENT;
358 #endif /* NET_ATM */
359
360 return vm_map_machine_attribute(map, address, size, attribute, value);
361 }
362
363 kern_return_t vm_read(
364 vm_map_t map,
365 vm_address_t address,
366 vm_size_t size,
367 pointer_t *data,
368 vm_size_t *data_size)
369 {
370 kern_return_t error;
371 vm_map_copy_t ipc_address;
372
373 if (map == VM_MAP_NULL)
374 return KERN_INVALID_ARGUMENT;
375
376 if ((error = vm_map_copyin(map,
377 address,
378 size,
379 FALSE, /* src_destroy */
380 &ipc_address)) == KERN_SUCCESS) {
381 *data = (pointer_t) ipc_address;
382 *data_size = size;
383 }
384 return error;
385 }
386
387 kern_return_t vm_write(
388 vm_map_t map,
389 vm_address_t address,
390 pointer_t data,
391 vm_size_t size)
392 {
393 if (map == VM_MAP_NULL)
394 return KERN_INVALID_ARGUMENT;
395
396 return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
397 FALSE /* interruptible XXX */);
398 }
399
400 kern_return_t vm_copy(
401 vm_map_t map,
402 vm_address_t source_address,
403 vm_size_t size,
404 vm_address_t dest_address)
405 {
406 vm_map_copy_t copy;
407 kern_return_t kr;
408
409 if (map == VM_MAP_NULL)
410 return KERN_INVALID_ARGUMENT;
411
412 kr = vm_map_copyin(map, source_address, size,
413 FALSE, ©);
414 if (kr != KERN_SUCCESS)
415 return kr;
416
417 kr = vm_map_copy_overwrite(map, dest_address, copy,
418 FALSE /* interruptible XXX */);
419 if (kr != KERN_SUCCESS) {
420 vm_map_copy_discard(copy);
421 return kr;
422 }
423
424 return KERN_SUCCESS;
425 }
426
427 /*
428 * Routine: vm_map
429 */
430 kern_return_t vm_map(
431 vm_map_t target_map,
432 vm_offset_t *address,
433 vm_size_t size,
434 vm_offset_t mask,
435 boolean_t anywhere,
436 ipc_port_t memory_object,
437 vm_offset_t offset,
438 boolean_t copy,
439 vm_prot_t cur_protection,
440 vm_prot_t max_protection,
441 vm_inherit_t inheritance)
442 {
443 register
444 vm_object_t object;
445 register
446 kern_return_t result;
447
448 if ((target_map == VM_MAP_NULL) ||
449 (cur_protection & ~VM_PROT_ALL) ||
450 (max_protection & ~VM_PROT_ALL))
451 return KERN_INVALID_ARGUMENT;
452
453 switch (inheritance) {
454 case VM_INHERIT_NONE:
455 case VM_INHERIT_COPY:
456 case VM_INHERIT_SHARE:
457 break;
458 default:
459 return KERN_INVALID_ARGUMENT;
460 }
461
462 *address = trunc_page(*address);
463 size = round_page(size);
464
465 if (!IP_VALID(memory_object)) {
466 object = VM_OBJECT_NULL;
467 offset = 0;
468 copy = FALSE;
469 } else if ((object = vm_object_enter(memory_object, size, FALSE))
470 == VM_OBJECT_NULL)
471 return KERN_INVALID_ARGUMENT;
472
473 /*
474 * Perform the copy if requested
475 */
476
477 if (copy) {
478 vm_object_t new_object;
479 vm_offset_t new_offset;
480
481 result = vm_object_copy_strategically(object, offset, size,
482 &new_object, &new_offset,
483 ©);
484
485 /*
486 * Throw away the reference to the
487 * original object, as it won't be mapped.
488 */
489
490 vm_object_deallocate(object);
491
492 if (result != KERN_SUCCESS)
493 return result;
494
495 object = new_object;
496 offset = new_offset;
497 }
498
499 if ((result = vm_map_enter(target_map,
500 address, size, mask, anywhere,
501 object, offset,
502 copy,
503 cur_protection, max_protection, inheritance
504 )) != KERN_SUCCESS)
505 vm_object_deallocate(object);
506 return result;
507 }
508
509 /*
510 * Specify that the range of the virtual address space
511 * of the target task must not cause page faults for
512 * the indicated accesses.
513 *
514 * [ To unwire the pages, specify VM_PROT_NONE. ]
515 */
516 kern_return_t vm_wire(
517 host_t host,
518 register vm_map_t map,
519 vm_offset_t start,
520 vm_size_t size,
521 vm_prot_t access)
522 {
523 if (host == HOST_NULL)
524 return KERN_INVALID_HOST;
525
526 if (map == VM_MAP_NULL)
527 return KERN_INVALID_TASK;
528
529 if (access & ~VM_PROT_ALL)
530 return KERN_INVALID_ARGUMENT;
531
532 #if NET_ATM
533 /*Check if range includes projected buffer;
534 user is not allowed direct manipulation in that case*/
535 if (projected_buffer_in_range(map, start, start+size))
536 return(KERN_INVALID_ARGUMENT);
537 #endif /* NET_ATM */
538
539 return vm_map_pageable_user(map,
540 trunc_page(start),
541 round_page(start+size),
542 access);
543 }
Cache object: ea7ebbe2fa0e4759b8f8c89362d2875a
|