FreeBSD/Linux Kernel Cross Reference
sys/vm/memguard.c
1 /*-
2 * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
3 * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/10.0/sys/vm/memguard.c 254307 2013-08-13 22:40:43Z jeff $");
30
31 /*
32 * MemGuard is a simple replacement allocator for debugging only
33 * which provides ElectricFence-style memory barrier protection on
34 * objects being allocated, and is used to detect tampering-after-free
35 * scenarios.
36 *
37 * See the memguard(9) man page for more information on using MemGuard.
38 */
39
40 #include "opt_vm.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/types.h>
46 #include <sys/queue.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/vmem.h>
52
53 #include <vm/vm.h>
54 #include <vm/uma.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_extern.h>
61 #include <vm/uma_int.h>
62 #include <vm/memguard.h>
63
64 static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
65 /*
66 * The vm_memguard_divisor variable controls how much of kmem_map should be
67 * reserved for MemGuard.
68 */
69 static u_int vm_memguard_divisor;
70 SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
71 &vm_memguard_divisor,
72 0, "(kmem_size/memguard_divisor) == memguard submap size");
73
74 /*
75 * Short description (ks_shortdesc) of memory type to monitor.
76 */
77 static char vm_memguard_desc[128] = "";
78 static struct malloc_type *vm_memguard_mtype = NULL;
79 TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
80 static int
81 memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
82 {
83 char desc[sizeof(vm_memguard_desc)];
84 int error;
85
86 strlcpy(desc, vm_memguard_desc, sizeof(desc));
87 error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
88 if (error != 0 || req->newptr == NULL)
89 return (error);
90
91 mtx_lock(&malloc_mtx);
92 /*
93 * If mtp is NULL, it will be initialized in memguard_cmp().
94 */
95 vm_memguard_mtype = malloc_desc2type(desc);
96 strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
97 mtx_unlock(&malloc_mtx);
98 return (error);
99 }
100 SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
101 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
102 memguard_sysctl_desc, "A", "Short description of memory type to monitor");
103
104 static vm_offset_t memguard_cursor;
105 static vm_offset_t memguard_base;
106 static vm_size_t memguard_mapsize;
107 static vm_size_t memguard_physlimit;
108 static u_long memguard_wasted;
109 static u_long memguard_wrap;
110 static u_long memguard_succ;
111 static u_long memguard_fail_kva;
112 static u_long memguard_fail_pgs;
113
114 SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
115 &memguard_cursor, 0, "MemGuard cursor");
116 SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
117 &memguard_mapsize, 0, "MemGuard private arena size");
118 SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
119 &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
120 SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
121 &memguard_wasted, 0, "Excess memory used through page promotion");
122 SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
123 &memguard_wrap, 0, "MemGuard cursor wrap count");
124 SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
125 &memguard_succ, 0, "Count of successful MemGuard allocations");
126 SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
127 &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
128 SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
129 &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
130
131 #define MG_GUARD_AROUND 0x001
132 #define MG_GUARD_ALLLARGE 0x002
133 #define MG_GUARD_NOFREE 0x004
134 static int memguard_options = MG_GUARD_AROUND;
135 TUNABLE_INT("vm.memguard.options", &memguard_options);
136 SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
137 &memguard_options, 0,
138 "MemGuard options:\n"
139 "\t0x001 - add guard pages around each allocation\n"
140 "\t0x002 - always use MemGuard for allocations over a page\n"
141 "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag");
142
143 static u_int memguard_minsize;
144 static u_long memguard_minsize_reject;
145 SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
146 &memguard_minsize, 0, "Minimum size for page promotion");
147 SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
148 &memguard_minsize_reject, 0, "# times rejected for size");
149
150 static u_int memguard_frequency;
151 static u_long memguard_frequency_hits;
152 TUNABLE_INT("vm.memguard.frequency", &memguard_frequency);
153 SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW,
154 &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
155 SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
156 &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
157
158
159 /*
160 * Return a fudged value to be used for vm_kmem_size for allocating
161 * the kmem_map. The memguard memory will be a submap.
162 */
163 unsigned long
164 memguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
165 {
166 u_long mem_pgs, parent_size;
167
168 vm_memguard_divisor = 10;
169 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
170
171 parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) +
172 PAGE_SIZE;
173 /* Pick a conservative value if provided value sucks. */
174 if ((vm_memguard_divisor <= 0) ||
175 ((parent_size / vm_memguard_divisor) == 0))
176 vm_memguard_divisor = 10;
177 /*
178 * Limit consumption of physical pages to
179 * 1/vm_memguard_divisor of system memory. If the KVA is
180 * smaller than this then the KVA limit comes into play first.
181 * This prevents memguard's page promotions from completely
182 * using up memory, since most malloc(9) calls are sub-page.
183 */
184 mem_pgs = cnt.v_page_count;
185 memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
186 /*
187 * We want as much KVA as we can take safely. Use at most our
188 * allotted fraction of the parent map's size. Limit this to
189 * twice the physical memory to avoid using too much memory as
190 * pagetable pages (size must be multiple of PAGE_SIZE).
191 */
192 memguard_mapsize = round_page(parent_size / vm_memguard_divisor);
193 if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
194 memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
195 if (km_size + memguard_mapsize > parent_size)
196 memguard_mapsize = 0;
197 return (km_size + memguard_mapsize);
198 }
199
200 /*
201 * Initialize the MemGuard mock allocator. All objects from MemGuard come
202 * out of a single VM map (contiguous chunk of address space).
203 */
204 void
205 memguard_init(vmem_t *parent)
206 {
207 vm_offset_t base;
208
209 vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
210 vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
211 PAGE_SIZE, 0, M_WAITOK);
212 memguard_cursor = base;
213 memguard_base = base;
214
215 printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
216 printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
217 printf("\tMEMGUARD map size: %jd KBytes\n",
218 (uintmax_t)memguard_mapsize >> 10);
219 }
220
221 /*
222 * Run things that can't be done as early as memguard_init().
223 */
224 static void
225 memguard_sysinit(void)
226 {
227 struct sysctl_oid_list *parent;
228
229 parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
230
231 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
232 &memguard_base, "MemGuard KVA base");
233 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
234 &memguard_mapsize, "MemGuard KVA size");
235 #if 0
236 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
237 &memguard_map->size, "MemGuard KVA used");
238 #endif
239 }
240 SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
241
242 /*
243 * v2sizep() converts a virtual address of the first page allocated for
244 * an item to a pointer to u_long recording the size of the original
245 * allocation request.
246 *
247 * This routine is very similar to those defined by UMA in uma_int.h.
248 * The difference is that this routine stores the originally allocated
249 * size in one of the page's fields that is unused when the page is
250 * wired rather than the object field, which is used.
251 */
252 static u_long *
253 v2sizep(vm_offset_t va)
254 {
255 vm_paddr_t pa;
256 struct vm_page *p;
257
258 pa = pmap_kextract(va);
259 if (pa == 0)
260 panic("MemGuard detected double-free of %p", (void *)va);
261 p = PHYS_TO_VM_PAGE(pa);
262 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
263 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
264 return (&p->plinks.memguard.p);
265 }
266
267 static u_long *
268 v2sizev(vm_offset_t va)
269 {
270 vm_paddr_t pa;
271 struct vm_page *p;
272
273 pa = pmap_kextract(va);
274 if (pa == 0)
275 panic("MemGuard detected double-free of %p", (void *)va);
276 p = PHYS_TO_VM_PAGE(pa);
277 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
278 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
279 return (&p->plinks.memguard.v);
280 }
281
282 /*
283 * Allocate a single object of specified size with specified flags
284 * (either M_WAITOK or M_NOWAIT).
285 */
286 void *
287 memguard_alloc(unsigned long req_size, int flags)
288 {
289 vm_offset_t addr;
290 u_long size_p, size_v;
291 int do_guard, rv;
292
293 size_p = round_page(req_size);
294 if (size_p == 0)
295 return (NULL);
296 /*
297 * To ensure there are holes on both sides of the allocation,
298 * request 2 extra pages of KVA. We will only actually add a
299 * vm_map_entry and get pages for the original request. Save
300 * the value of memguard_options so we have a consistent
301 * value.
302 */
303 size_v = size_p;
304 do_guard = (memguard_options & MG_GUARD_AROUND) != 0;
305 if (do_guard)
306 size_v += 2 * PAGE_SIZE;
307
308 /*
309 * When we pass our memory limit, reject sub-page allocations.
310 * Page-size and larger allocations will use the same amount
311 * of physical memory whether we allocate or hand off to
312 * uma_large_alloc(), so keep those.
313 */
314 if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit &&
315 req_size < PAGE_SIZE) {
316 addr = (vm_offset_t)NULL;
317 memguard_fail_pgs++;
318 goto out;
319 }
320 /*
321 * Keep a moving cursor so we don't recycle KVA as long as
322 * possible. It's not perfect, since we don't know in what
323 * order previous allocations will be free'd, but it's simple
324 * and fast, and requires O(1) additional storage if guard
325 * pages are not used.
326 *
327 * XXX This scheme will lead to greater fragmentation of the
328 * map, unless vm_map_findspace() is tweaked.
329 */
330 for (;;) {
331 if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0,
332 memguard_cursor, VMEM_ADDR_MAX,
333 M_BESTFIT | M_NOWAIT, &addr) == 0)
334 break;
335 /*
336 * The map has no space. This may be due to
337 * fragmentation, or because the cursor is near the
338 * end of the map.
339 */
340 if (memguard_cursor == memguard_base) {
341 memguard_fail_kva++;
342 addr = (vm_offset_t)NULL;
343 goto out;
344 }
345 memguard_wrap++;
346 memguard_cursor = memguard_base;
347 }
348 if (do_guard)
349 addr += PAGE_SIZE;
350 rv = kmem_back(kmem_object, addr, size_p, flags);
351 if (rv != KERN_SUCCESS) {
352 vmem_xfree(memguard_arena, addr, size_v);
353 memguard_fail_pgs++;
354 addr = (vm_offset_t)NULL;
355 goto out;
356 }
357 memguard_cursor = addr + size_v;
358 *v2sizep(trunc_page(addr)) = req_size;
359 *v2sizev(trunc_page(addr)) = size_v;
360 memguard_succ++;
361 if (req_size < PAGE_SIZE) {
362 memguard_wasted += (PAGE_SIZE - req_size);
363 if (do_guard) {
364 /*
365 * Align the request to 16 bytes, and return
366 * an address near the end of the page, to
367 * better detect array overrun.
368 */
369 req_size = roundup2(req_size, 16);
370 addr += (PAGE_SIZE - req_size);
371 }
372 }
373 out:
374 return ((void *)addr);
375 }
376
377 int
378 is_memguard_addr(void *addr)
379 {
380 vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
381
382 return (a >= memguard_base && a < memguard_base + memguard_mapsize);
383 }
384
385 /*
386 * Free specified single object.
387 */
388 void
389 memguard_free(void *ptr)
390 {
391 vm_offset_t addr;
392 u_long req_size, size, sizev;
393 char *temp;
394 int i;
395
396 addr = trunc_page((uintptr_t)ptr);
397 req_size = *v2sizep(addr);
398 sizev = *v2sizev(addr);
399 size = round_page(req_size);
400
401 /*
402 * Page should not be guarded right now, so force a write.
403 * The purpose of this is to increase the likelihood of
404 * catching a double-free, but not necessarily a
405 * tamper-after-free (the second thread freeing might not
406 * write before freeing, so this forces it to and,
407 * subsequently, trigger a fault).
408 */
409 temp = ptr;
410 for (i = 0; i < size; i += PAGE_SIZE)
411 temp[i] = 'M';
412
413 /*
414 * This requires carnal knowledge of the implementation of
415 * kmem_free(), but since we've already replaced kmem_malloc()
416 * above, it's not really any worse. We want to use the
417 * vm_map lock to serialize updates to memguard_wasted, since
418 * we had the lock at increment.
419 */
420 kmem_unback(kmem_object, addr, size);
421 if (sizev > size)
422 addr -= PAGE_SIZE;
423 vmem_xfree(memguard_arena, addr, sizev);
424 if (req_size < PAGE_SIZE)
425 memguard_wasted -= (PAGE_SIZE - req_size);
426 }
427
428 /*
429 * Re-allocate an allocation that was originally guarded.
430 */
431 void *
432 memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
433 int flags)
434 {
435 void *newaddr;
436 u_long old_size;
437
438 /*
439 * Allocate the new block. Force the allocation to be guarded
440 * as the original may have been guarded through random
441 * chance, and that should be preserved.
442 */
443 if ((newaddr = memguard_alloc(size, flags)) == NULL)
444 return (NULL);
445
446 /* Copy over original contents. */
447 old_size = *v2sizep(trunc_page((uintptr_t)addr));
448 bcopy(addr, newaddr, min(size, old_size));
449 memguard_free(addr);
450 return (newaddr);
451 }
452
453 static int
454 memguard_cmp(unsigned long size)
455 {
456
457 if (size < memguard_minsize) {
458 memguard_minsize_reject++;
459 return (0);
460 }
461 if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE)
462 return (1);
463 if (memguard_frequency > 0 &&
464 (random() % 100000) < memguard_frequency) {
465 memguard_frequency_hits++;
466 return (1);
467 }
468
469 return (0);
470 }
471
472 int
473 memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size)
474 {
475
476 if (memguard_cmp(size))
477 return(1);
478
479 #if 1
480 /*
481 * The safest way of comparsion is to always compare short description
482 * string of memory type, but it is also the slowest way.
483 */
484 return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
485 #else
486 /*
487 * If we compare pointers, there are two possible problems:
488 * 1. Memory type was unloaded and new memory type was allocated at the
489 * same address.
490 * 2. Memory type was unloaded and loaded again, but allocated at a
491 * different address.
492 */
493 if (vm_memguard_mtype != NULL)
494 return (mtp == vm_memguard_mtype);
495 if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
496 vm_memguard_mtype = mtp;
497 return (1);
498 }
499 return (0);
500 #endif
501 }
502
503 int
504 memguard_cmp_zone(uma_zone_t zone)
505 {
506
507 if ((memguard_options & MG_GUARD_NOFREE) == 0 &&
508 zone->uz_flags & UMA_ZONE_NOFREE)
509 return (0);
510
511 if (memguard_cmp(zone->uz_size))
512 return (1);
513
514 /*
515 * The safest way of comparsion is to always compare zone name,
516 * but it is also the slowest way.
517 */
518 return (strcmp(zone->uz_name, vm_memguard_desc) == 0);
519 }
Cache object: f9613abce9096f874b940beb17560817
|