1 /**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <dev/drm2/drmP.h>
32 #include <dev/drm2/ttm/ttm_memory.h>
33 #include <dev/drm2/ttm/ttm_module.h>
34 #include <dev/drm2/ttm/ttm_page_alloc.h>
35
36 #define TTM_MEMORY_ALLOC_RETRIES 4
37
38 struct ttm_mem_zone {
39 u_int kobj_ref;
40 struct ttm_mem_global *glob;
41 const char *name;
42 uint64_t zone_mem;
43 uint64_t emer_mem;
44 uint64_t max_mem;
45 uint64_t swap_limit;
46 uint64_t used_mem;
47 };
48
49 MALLOC_DEFINE(M_TTM_ZONE, "ttm_zone", "TTM Zone");
50
51 static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone)
52 {
53
54 printf("[TTM] Zone %7s: Used memory at exit: %llu kiB\n",
55 zone->name, (unsigned long long)zone->used_mem >> 10);
56 free(zone, M_TTM_ZONE);
57 }
58
59 #if 0
60 /* XXXKIB sysctl */
61 static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone;
62 struct attribute *attr,
63 char *buffer)
64 {
65 uint64_t val = 0;
66
67 mtx_lock(&zone->glob->lock);
68 if (attr == &ttm_mem_sys)
69 val = zone->zone_mem;
70 else if (attr == &ttm_mem_emer)
71 val = zone->emer_mem;
72 else if (attr == &ttm_mem_max)
73 val = zone->max_mem;
74 else if (attr == &ttm_mem_swap)
75 val = zone->swap_limit;
76 else if (attr == &ttm_mem_used)
77 val = zone->used_mem;
78 mtx_unlock(&zone->glob->lock);
79
80 return snprintf(buffer, PAGE_SIZE, "%llu\n",
81 (unsigned long long) val >> 10);
82 }
83 #endif
84
85 static void ttm_check_swapping(struct ttm_mem_global *glob);
86
87 #if 0
88 /* XXXKIB sysctl */
89 static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
90 struct attribute *attr,
91 const char *buffer,
92 size_t size)
93 {
94 int chars;
95 unsigned long val;
96 uint64_t val64;
97
98 chars = sscanf(buffer, "%lu", &val);
99 if (chars == 0)
100 return size;
101
102 val64 = val;
103 val64 <<= 10;
104
105 mtx_lock(&zone->glob->lock);
106 if (val64 > zone->zone_mem)
107 val64 = zone->zone_mem;
108 if (attr == &ttm_mem_emer) {
109 zone->emer_mem = val64;
110 if (zone->max_mem > val64)
111 zone->max_mem = val64;
112 } else if (attr == &ttm_mem_max) {
113 zone->max_mem = val64;
114 if (zone->emer_mem < val64)
115 zone->emer_mem = val64;
116 } else if (attr == &ttm_mem_swap)
117 zone->swap_limit = val64;
118 mtx_unlock(&zone->glob->lock);
119
120 ttm_check_swapping(zone->glob);
121
122 return size;
123 }
124 #endif
125
126 static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
127 {
128 }
129
130 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
131 bool from_wq, uint64_t extra)
132 {
133 unsigned int i;
134 struct ttm_mem_zone *zone;
135 uint64_t target;
136
137 for (i = 0; i < glob->num_zones; ++i) {
138 zone = glob->zones[i];
139
140 if (from_wq)
141 target = zone->swap_limit;
142 else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
143 target = zone->emer_mem;
144 else
145 target = zone->max_mem;
146
147 target = (extra > target) ? 0ULL : target;
148
149 if (zone->used_mem > target)
150 return true;
151 }
152 return false;
153 }
154
155 /**
156 * At this point we only support a single shrink callback.
157 * Extend this if needed, perhaps using a linked list of callbacks.
158 * Note that this function is reentrant:
159 * many threads may try to swap out at any given time.
160 */
161
162 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
163 uint64_t extra)
164 {
165 int ret;
166 struct ttm_mem_shrink *shrink;
167
168 mtx_lock(&glob->lock);
169 if (glob->shrink == NULL)
170 goto out;
171
172 while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
173 shrink = glob->shrink;
174 mtx_unlock(&glob->lock);
175 ret = shrink->do_shrink(shrink);
176 mtx_lock(&glob->lock);
177 if (unlikely(ret != 0))
178 goto out;
179 }
180 out:
181 mtx_unlock(&glob->lock);
182 }
183
184
185
186 static void ttm_shrink_work(void *arg, int pending __unused)
187 {
188 struct ttm_mem_global *glob = arg;
189
190 ttm_shrink(glob, true, 0ULL);
191 }
192
193 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
194 uint64_t mem)
195 {
196 struct ttm_mem_zone *zone;
197
198 zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
199
200 zone->name = "kernel";
201 zone->zone_mem = mem;
202 zone->max_mem = mem >> 1;
203 zone->emer_mem = (mem >> 1) + (mem >> 2);
204 zone->swap_limit = zone->max_mem - (mem >> 3);
205 zone->used_mem = 0;
206 zone->glob = glob;
207 glob->zone_kernel = zone;
208 refcount_init(&zone->kobj_ref, 1);
209 glob->zones[glob->num_zones++] = zone;
210 return 0;
211 }
212
213 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
214 uint64_t mem)
215 {
216 struct ttm_mem_zone *zone;
217
218 zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
219
220 /**
221 * No special dma32 zone needed.
222 */
223
224 if (mem <= ((uint64_t) 1ULL << 32)) {
225 free(zone, M_TTM_ZONE);
226 return 0;
227 }
228
229 /*
230 * Limit max dma32 memory to 4GB for now
231 * until we can figure out how big this
232 * zone really is.
233 */
234
235 mem = ((uint64_t) 1ULL << 32);
236 zone->name = "dma32";
237 zone->zone_mem = mem;
238 zone->max_mem = mem >> 1;
239 zone->emer_mem = (mem >> 1) + (mem >> 2);
240 zone->swap_limit = zone->max_mem - (mem >> 3);
241 zone->used_mem = 0;
242 zone->glob = glob;
243 glob->zone_dma32 = zone;
244 refcount_init(&zone->kobj_ref, 1);
245 glob->zones[glob->num_zones++] = zone;
246 return 0;
247 }
248
249 int ttm_mem_global_init(struct ttm_mem_global *glob)
250 {
251 u_int64_t mem;
252 int ret;
253 int i;
254 struct ttm_mem_zone *zone;
255
256 mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF);
257 glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
258 taskqueue_thread_enqueue, &glob->swap_queue);
259 taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap");
260 TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
261
262 refcount_init(&glob->kobj_ref, 1);
263
264 mem = physmem * PAGE_SIZE;
265
266 ret = ttm_mem_init_kernel_zone(glob, mem);
267 if (unlikely(ret != 0))
268 goto out_no_zone;
269 ret = ttm_mem_init_dma32_zone(glob, mem);
270 if (unlikely(ret != 0))
271 goto out_no_zone;
272 for (i = 0; i < glob->num_zones; ++i) {
273 zone = glob->zones[i];
274 printf("[TTM] Zone %7s: Available graphics memory: %llu kiB\n",
275 zone->name, (unsigned long long)zone->max_mem >> 10);
276 }
277 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
278 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
279 return 0;
280 out_no_zone:
281 ttm_mem_global_release(glob);
282 return ret;
283 }
284
285 void ttm_mem_global_release(struct ttm_mem_global *glob)
286 {
287 unsigned int i;
288 struct ttm_mem_zone *zone;
289
290 /* let the page allocator first stop the shrink work. */
291 ttm_page_alloc_fini();
292 ttm_dma_page_alloc_fini();
293
294 taskqueue_drain(glob->swap_queue, &glob->work);
295 taskqueue_free(glob->swap_queue);
296 glob->swap_queue = NULL;
297 for (i = 0; i < glob->num_zones; ++i) {
298 zone = glob->zones[i];
299 if (refcount_release(&zone->kobj_ref))
300 ttm_mem_zone_kobj_release(zone);
301 }
302 if (refcount_release(&glob->kobj_ref))
303 ttm_mem_global_kobj_release(glob);
304 }
305
306 static void ttm_check_swapping(struct ttm_mem_global *glob)
307 {
308 bool needs_swapping = false;
309 unsigned int i;
310 struct ttm_mem_zone *zone;
311
312 mtx_lock(&glob->lock);
313 for (i = 0; i < glob->num_zones; ++i) {
314 zone = glob->zones[i];
315 if (zone->used_mem > zone->swap_limit) {
316 needs_swapping = true;
317 break;
318 }
319 }
320
321 mtx_unlock(&glob->lock);
322
323 if (unlikely(needs_swapping))
324 taskqueue_enqueue(glob->swap_queue, &glob->work);
325
326 }
327
328 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
329 struct ttm_mem_zone *single_zone,
330 uint64_t amount)
331 {
332 unsigned int i;
333 struct ttm_mem_zone *zone;
334
335 mtx_lock(&glob->lock);
336 for (i = 0; i < glob->num_zones; ++i) {
337 zone = glob->zones[i];
338 if (single_zone && zone != single_zone)
339 continue;
340 zone->used_mem -= amount;
341 }
342 mtx_unlock(&glob->lock);
343 }
344
345 void ttm_mem_global_free(struct ttm_mem_global *glob,
346 uint64_t amount)
347 {
348 return ttm_mem_global_free_zone(glob, NULL, amount);
349 }
350
351 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
352 struct ttm_mem_zone *single_zone,
353 uint64_t amount, bool reserve)
354 {
355 uint64_t limit;
356 int ret = -ENOMEM;
357 unsigned int i;
358 struct ttm_mem_zone *zone;
359
360 mtx_lock(&glob->lock);
361 for (i = 0; i < glob->num_zones; ++i) {
362 zone = glob->zones[i];
363 if (single_zone && zone != single_zone)
364 continue;
365
366 limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
367 zone->emer_mem : zone->max_mem;
368
369 if (zone->used_mem > limit)
370 goto out_unlock;
371 }
372
373 if (reserve) {
374 for (i = 0; i < glob->num_zones; ++i) {
375 zone = glob->zones[i];
376 if (single_zone && zone != single_zone)
377 continue;
378 zone->used_mem += amount;
379 }
380 }
381
382 ret = 0;
383 out_unlock:
384 mtx_unlock(&glob->lock);
385 ttm_check_swapping(glob);
386
387 return ret;
388 }
389
390
391 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
392 struct ttm_mem_zone *single_zone,
393 uint64_t memory,
394 bool no_wait, bool interruptible)
395 {
396 int count = TTM_MEMORY_ALLOC_RETRIES;
397
398 while (unlikely(ttm_mem_global_reserve(glob,
399 single_zone,
400 memory, true)
401 != 0)) {
402 if (no_wait)
403 return -ENOMEM;
404 if (unlikely(count-- == 0))
405 return -ENOMEM;
406 ttm_shrink(glob, false, memory + (memory >> 2) + 16);
407 }
408
409 return 0;
410 }
411
412 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
413 bool no_wait, bool interruptible)
414 {
415 /**
416 * Normal allocations of kernel memory are registered in
417 * all zones.
418 */
419
420 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
421 interruptible);
422 }
423
424 #define page_to_pfn(pp) OFF_TO_IDX(VM_PAGE_TO_PHYS(pp))
425
426 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
427 struct vm_page *page,
428 bool no_wait, bool interruptible)
429 {
430
431 struct ttm_mem_zone *zone = NULL;
432
433 /**
434 * Page allocations may be registed in a single zone
435 * only if highmem or !dma32.
436 */
437
438 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
439 zone = glob->zone_kernel;
440 return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
441 interruptible);
442 }
443
444 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
445 {
446 struct ttm_mem_zone *zone = NULL;
447
448 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
449 zone = glob->zone_kernel;
450 ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
451 }
452
453
454 size_t ttm_round_pot(size_t size)
455 {
456 if ((size & (size - 1)) == 0)
457 return size;
458 else if (size > PAGE_SIZE)
459 return PAGE_ALIGN(size);
460 else {
461 size_t tmp_size = 4;
462
463 while (tmp_size < size)
464 tmp_size <<= 1;
465
466 return tmp_size;
467 }
468 return 0;
469 }
Cache object: 71caa79543a18c13480a313836054b2c
|