1 /*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27 /*
28 * Copyright (c) 2013 The FreeBSD Foundation
29 * All rights reserved.
30 *
31 * Portions of this software were developed by Konstantin Belousov
32 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33 */
34
35 /* simple list based uncached page pool
36 * - Pool collects resently freed pages for reuse
37 * - Use page->lru to keep a free list
38 * - doesn't track currently in use pages
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include <dev/drm2/drmP.h>
45 #include <dev/drm2/ttm/ttm_bo_driver.h>
46 #include <dev/drm2/ttm/ttm_page_alloc.h>
47 #include <sys/eventhandler.h>
48 #include <vm/vm_pageout.h>
49
50 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t))
51 #define SMALL_ALLOCATION 16
52 #define FREE_ALL_PAGES (~0U)
53 /* times are in msecs */
54 #define PAGE_FREE_INTERVAL 1000
55
56 /**
57 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
58 *
59 * @lock: Protects the shared pool from concurrnet access. Must be used with
60 * irqsave/irqrestore variants because pool allocator maybe called from
61 * delayed work.
62 * @fill_lock: Prevent concurrent calls to fill.
63 * @list: Pool of free uc/wc pages for fast reuse.
64 * @gfp_flags: Flags to pass for alloc_page.
65 * @npages: Number of pages in pool.
66 */
67 struct ttm_page_pool {
68 struct mtx lock;
69 bool fill_lock;
70 bool dma32;
71 struct pglist list;
72 int ttm_page_alloc_flags;
73 unsigned npages;
74 char *name;
75 unsigned long nfrees;
76 unsigned long nrefills;
77 };
78
79 /**
80 * Limits for the pool. They are handled without locks because only place where
81 * they may change is in sysfs store. They won't have immediate effect anyway
82 * so forcing serialization to access them is pointless.
83 */
84
85 struct ttm_pool_opts {
86 unsigned alloc_size;
87 unsigned max_size;
88 unsigned small;
89 };
90
91 #define NUM_POOLS 4
92
93 /**
94 * struct ttm_pool_manager - Holds memory pools for fst allocation
95 *
96 * Manager is read only object for pool code so it doesn't need locking.
97 *
98 * @free_interval: minimum number of jiffies between freeing pages from pool.
99 * @page_alloc_inited: reference counting for pool allocation.
100 * @work: Work that is used to shrink the pool. Work is only run when there is
101 * some pages to free.
102 * @small_allocation: Limit in number of pages what is small allocation.
103 *
104 * @pools: All pool objects in use.
105 **/
106 struct ttm_pool_manager {
107 unsigned int kobj_ref;
108 eventhandler_tag lowmem_handler;
109 struct ttm_pool_opts options;
110
111 union {
112 struct ttm_page_pool u_pools[NUM_POOLS];
113 struct _utag {
114 struct ttm_page_pool u_wc_pool;
115 struct ttm_page_pool u_uc_pool;
116 struct ttm_page_pool u_wc_pool_dma32;
117 struct ttm_page_pool u_uc_pool_dma32;
118 } _ut;
119 } _u;
120 };
121
122 #define pools _u.u_pools
123 #define wc_pool _u._ut.u_wc_pool
124 #define uc_pool _u._ut.u_uc_pool
125 #define wc_pool_dma32 _u._ut.u_wc_pool_dma32
126 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32
127
128 MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
129
130 static void
131 ttm_vm_page_free(vm_page_t m)
132 {
133
134 KASSERT(m->object == NULL, ("ttm page %p is owned", m));
135 KASSERT(vm_page_wired(m), ("ttm lost wire %p", m));
136 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
137 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
138 m->flags &= ~PG_FICTITIOUS;
139 m->oflags |= VPO_UNMANAGED;
140 vm_page_unwire_noq(m);
141 vm_page_free(m);
142 }
143
144 static vm_memattr_t
145 ttm_caching_state_to_vm(enum ttm_caching_state cstate)
146 {
147
148 switch (cstate) {
149 case tt_uncached:
150 return (VM_MEMATTR_UNCACHEABLE);
151 case tt_wc:
152 return (VM_MEMATTR_WRITE_COMBINING);
153 case tt_cached:
154 return (VM_MEMATTR_WRITE_BACK);
155 }
156 panic("caching state %d\n", cstate);
157 }
158
159 static vm_page_t
160 ttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr)
161 {
162 vm_page_t p;
163 int tries;
164
165 for (tries = 0; ; tries++) {
166 p = vm_page_alloc_noobj_contig(req, 1, 0, 0xffffffff, PAGE_SIZE,
167 0, memattr);
168 if (p != NULL || tries > 2)
169 return (p);
170 if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff,
171 PAGE_SIZE, 0))
172 vm_wait(NULL);
173 }
174 }
175
176 static vm_page_t
177 ttm_vm_page_alloc_any(int req, vm_memattr_t memattr)
178 {
179 vm_page_t p;
180
181 p = vm_page_alloc_noobj(req | VM_ALLOC_WAITOK);
182 pmap_page_set_memattr(p, memattr);
183 return (p);
184 }
185
186 static vm_page_t
187 ttm_vm_page_alloc(int flags, enum ttm_caching_state cstate)
188 {
189 vm_page_t p;
190 vm_memattr_t memattr;
191 int req;
192
193 memattr = ttm_caching_state_to_vm(cstate);
194 req = VM_ALLOC_WIRED;
195 if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0)
196 req |= VM_ALLOC_ZERO;
197
198 if ((flags & TTM_PAGE_FLAG_DMA32) != 0)
199 p = ttm_vm_page_alloc_dma32(req, memattr);
200 else
201 p = ttm_vm_page_alloc_any(req, memattr);
202
203 if (p != NULL) {
204 p->oflags &= ~VPO_UNMANAGED;
205 p->flags |= PG_FICTITIOUS;
206 }
207 return (p);
208 }
209
210 static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
211 {
212
213 free(m, M_TTM_POOLMGR);
214 }
215
216 #if 0
217 /* XXXKIB sysctl */
218 static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
219 struct attribute *attr, const char *buffer, size_t size)
220 {
221 int chars;
222 unsigned val;
223 chars = sscanf(buffer, "%u", &val);
224 if (chars == 0)
225 return size;
226
227 /* Convert kb to number of pages */
228 val = val / (PAGE_SIZE >> 10);
229
230 if (attr == &ttm_page_pool_max)
231 m->options.max_size = val;
232 else if (attr == &ttm_page_pool_small)
233 m->options.small = val;
234 else if (attr == &ttm_page_pool_alloc_size) {
235 if (val > NUM_PAGES_TO_ALLOC*8) {
236 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
237 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
238 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
239 return size;
240 } else if (val > NUM_PAGES_TO_ALLOC) {
241 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
242 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
243 }
244 m->options.alloc_size = val;
245 }
246
247 return size;
248 }
249
250 static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
251 struct attribute *attr, char *buffer)
252 {
253 unsigned val = 0;
254
255 if (attr == &ttm_page_pool_max)
256 val = m->options.max_size;
257 else if (attr == &ttm_page_pool_small)
258 val = m->options.small;
259 else if (attr == &ttm_page_pool_alloc_size)
260 val = m->options.alloc_size;
261
262 val = val * (PAGE_SIZE >> 10);
263
264 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
265 }
266 #endif
267
268 static struct ttm_pool_manager *_manager;
269
270 static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
271 {
272 #ifdef TTM_HAS_AGP
273 int i;
274
275 for (i = 0; i < addrinarray; i++)
276 pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_BACK);
277 #endif
278 return 0;
279 }
280
281 static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
282 {
283 #ifdef TTM_HAS_AGP
284 int i;
285
286 for (i = 0; i < addrinarray; i++)
287 pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_COMBINING);
288 #endif
289 return 0;
290 }
291
292 static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
293 {
294 #ifdef TTM_HAS_AGP
295 int i;
296
297 for (i = 0; i < addrinarray; i++)
298 pmap_page_set_memattr(pages[i], VM_MEMATTR_UNCACHEABLE);
299 #endif
300 return 0;
301 }
302
303 /**
304 * Select the right pool or requested caching state and ttm flags. */
305 static struct ttm_page_pool *ttm_get_pool(int flags,
306 enum ttm_caching_state cstate)
307 {
308 int pool_index;
309
310 if (cstate == tt_cached)
311 return NULL;
312
313 if (cstate == tt_wc)
314 pool_index = 0x0;
315 else
316 pool_index = 0x1;
317
318 if (flags & TTM_PAGE_FLAG_DMA32)
319 pool_index |= 0x2;
320
321 return &_manager->pools[pool_index];
322 }
323
324 /* set memory back to wb and free the pages. */
325 static void ttm_pages_put(vm_page_t *pages, unsigned npages)
326 {
327 unsigned i;
328
329 /* Our VM handles vm memattr automatically on the page free. */
330 if (set_pages_array_wb(pages, npages))
331 printf("[TTM] Failed to set %d pages to wb!\n", npages);
332 for (i = 0; i < npages; ++i)
333 ttm_vm_page_free(pages[i]);
334 }
335
336 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
337 unsigned freed_pages)
338 {
339 pool->npages -= freed_pages;
340 pool->nfrees += freed_pages;
341 }
342
343 /**
344 * Free pages from pool.
345 *
346 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
347 * number of pages in one go.
348 *
349 * @pool: to free the pages from
350 * @free_all: If set to true will free all pages in pool
351 **/
352 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
353 {
354 vm_page_t p, p1;
355 vm_page_t *pages_to_free;
356 unsigned freed_pages = 0,
357 npages_to_free = nr_free;
358 unsigned i;
359
360 if (NUM_PAGES_TO_ALLOC < nr_free)
361 npages_to_free = NUM_PAGES_TO_ALLOC;
362
363 pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
364 M_TEMP, M_WAITOK | M_ZERO);
365
366 restart:
367 mtx_lock(&pool->lock);
368
369 TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) {
370 if (freed_pages >= npages_to_free)
371 break;
372
373 pages_to_free[freed_pages++] = p;
374 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
375 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
376 /* remove range of pages from the pool */
377 for (i = 0; i < freed_pages; i++)
378 TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
379
380 ttm_pool_update_free_locked(pool, freed_pages);
381 /**
382 * Because changing page caching is costly
383 * we unlock the pool to prevent stalling.
384 */
385 mtx_unlock(&pool->lock);
386
387 ttm_pages_put(pages_to_free, freed_pages);
388 if (likely(nr_free != FREE_ALL_PAGES))
389 nr_free -= freed_pages;
390
391 if (NUM_PAGES_TO_ALLOC >= nr_free)
392 npages_to_free = nr_free;
393 else
394 npages_to_free = NUM_PAGES_TO_ALLOC;
395
396 freed_pages = 0;
397
398 /* free all so restart the processing */
399 if (nr_free)
400 goto restart;
401
402 /* Not allowed to fall through or break because
403 * following context is inside spinlock while we are
404 * outside here.
405 */
406 goto out;
407
408 }
409 }
410
411 /* remove range of pages from the pool */
412 if (freed_pages) {
413 for (i = 0; i < freed_pages; i++)
414 TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q);
415
416 ttm_pool_update_free_locked(pool, freed_pages);
417 nr_free -= freed_pages;
418 }
419
420 mtx_unlock(&pool->lock);
421
422 if (freed_pages)
423 ttm_pages_put(pages_to_free, freed_pages);
424 out:
425 free(pages_to_free, M_TEMP);
426 return nr_free;
427 }
428
429 /* Get good estimation how many pages are free in pools */
430 static int ttm_pool_get_num_unused_pages(void)
431 {
432 unsigned i;
433 int total = 0;
434 for (i = 0; i < NUM_POOLS; ++i)
435 total += _manager->pools[i].npages;
436
437 return total;
438 }
439
440 /**
441 * Callback for mm to request pool to reduce number of page held.
442 */
443 static int ttm_pool_mm_shrink(void *arg)
444 {
445 static unsigned int start_pool = 0;
446 unsigned i;
447 unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
448 struct ttm_page_pool *pool;
449 int shrink_pages = 100; /* XXXKIB */
450
451 pool_offset = pool_offset % NUM_POOLS;
452 /* select start pool in round robin fashion */
453 for (i = 0; i < NUM_POOLS; ++i) {
454 unsigned nr_free = shrink_pages;
455 if (shrink_pages == 0)
456 break;
457 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
458 shrink_pages = ttm_page_pool_free(pool, nr_free);
459 }
460 /* return estimated number of unused pages in pool */
461 return ttm_pool_get_num_unused_pages();
462 }
463
464 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
465 {
466
467 manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
468 ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
469 }
470
471 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
472 {
473
474 EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
475 }
476
477 static int ttm_set_pages_caching(vm_page_t *pages,
478 enum ttm_caching_state cstate, unsigned cpages)
479 {
480 int r = 0;
481 /* Set page caching */
482 switch (cstate) {
483 case tt_uncached:
484 r = set_pages_array_uc(pages, cpages);
485 if (r)
486 printf("[TTM] Failed to set %d pages to uc!\n", cpages);
487 break;
488 case tt_wc:
489 r = set_pages_array_wc(pages, cpages);
490 if (r)
491 printf("[TTM] Failed to set %d pages to wc!\n", cpages);
492 break;
493 default:
494 break;
495 }
496 return r;
497 }
498
499 /**
500 * Free pages the pages that failed to change the caching state. If there is
501 * any pages that have changed their caching state already put them to the
502 * pool.
503 */
504 static void ttm_handle_caching_state_failure(struct pglist *pages,
505 int ttm_flags, enum ttm_caching_state cstate,
506 vm_page_t *failed_pages, unsigned cpages)
507 {
508 unsigned i;
509 /* Failed pages have to be freed */
510 for (i = 0; i < cpages; ++i) {
511 TAILQ_REMOVE(pages, failed_pages[i], plinks.q);
512 ttm_vm_page_free(failed_pages[i]);
513 }
514 }
515
516 /**
517 * Allocate new pages with correct caching.
518 *
519 * This function is reentrant if caller updates count depending on number of
520 * pages returned in pages array.
521 */
522 static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
523 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
524 {
525 vm_page_t *caching_array;
526 vm_page_t p;
527 int r = 0;
528 unsigned i, cpages;
529 unsigned max_cpages = min(count,
530 (unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
531
532 /* allocate array for page caching change */
533 caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
534 M_WAITOK | M_ZERO);
535
536 for (i = 0, cpages = 0; i < count; ++i) {
537 p = ttm_vm_page_alloc(ttm_alloc_flags, cstate);
538 if (!p) {
539 printf("[TTM] Unable to get page %u\n", i);
540
541 /* store already allocated pages in the pool after
542 * setting the caching state */
543 if (cpages) {
544 r = ttm_set_pages_caching(caching_array,
545 cstate, cpages);
546 if (r)
547 ttm_handle_caching_state_failure(pages,
548 ttm_flags, cstate,
549 caching_array, cpages);
550 }
551 r = -ENOMEM;
552 goto out;
553 }
554
555 #ifdef CONFIG_HIGHMEM /* KIB: nop */
556 /* gfp flags of highmem page should never be dma32 so we
557 * we should be fine in such case
558 */
559 if (!PageHighMem(p))
560 #endif
561 {
562 caching_array[cpages++] = p;
563 if (cpages == max_cpages) {
564
565 r = ttm_set_pages_caching(caching_array,
566 cstate, cpages);
567 if (r) {
568 ttm_handle_caching_state_failure(pages,
569 ttm_flags, cstate,
570 caching_array, cpages);
571 goto out;
572 }
573 cpages = 0;
574 }
575 }
576
577 TAILQ_INSERT_HEAD(pages, p, plinks.q);
578 }
579
580 if (cpages) {
581 r = ttm_set_pages_caching(caching_array, cstate, cpages);
582 if (r)
583 ttm_handle_caching_state_failure(pages,
584 ttm_flags, cstate,
585 caching_array, cpages);
586 }
587 out:
588 free(caching_array, M_TEMP);
589
590 return r;
591 }
592
593 /**
594 * Fill the given pool if there aren't enough pages and the requested number of
595 * pages is small.
596 */
597 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
598 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
599 {
600 vm_page_t p;
601 int r;
602 unsigned cpages = 0;
603 /**
604 * Only allow one pool fill operation at a time.
605 * If pool doesn't have enough pages for the allocation new pages are
606 * allocated from outside of pool.
607 */
608 if (pool->fill_lock)
609 return;
610
611 pool->fill_lock = true;
612
613 /* If allocation request is small and there are not enough
614 * pages in a pool we fill the pool up first. */
615 if (count < _manager->options.small
616 && count > pool->npages) {
617 struct pglist new_pages;
618 unsigned alloc_size = _manager->options.alloc_size;
619
620 /**
621 * Can't change page caching if in irqsave context. We have to
622 * drop the pool->lock.
623 */
624 mtx_unlock(&pool->lock);
625
626 TAILQ_INIT(&new_pages);
627 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
628 ttm_flags, cstate, alloc_size);
629 mtx_lock(&pool->lock);
630
631 if (!r) {
632 TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
633 ++pool->nrefills;
634 pool->npages += alloc_size;
635 } else {
636 printf("[TTM] Failed to fill pool (%p)\n", pool);
637 /* If we have any pages left put them to the pool. */
638 TAILQ_FOREACH(p, &pool->list, plinks.q) {
639 ++cpages;
640 }
641 TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
642 pool->npages += cpages;
643 }
644
645 }
646 pool->fill_lock = false;
647 }
648
649 /**
650 * Cut 'count' number of pages from the pool and put them on the return list.
651 *
652 * @return count of pages still required to fulfill the request.
653 */
654 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
655 struct pglist *pages,
656 int ttm_flags,
657 enum ttm_caching_state cstate,
658 unsigned count)
659 {
660 vm_page_t p;
661 unsigned i;
662
663 mtx_lock(&pool->lock);
664 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
665
666 if (count >= pool->npages) {
667 /* take all pages from the pool */
668 TAILQ_CONCAT(pages, &pool->list, plinks.q);
669 count -= pool->npages;
670 pool->npages = 0;
671 goto out;
672 }
673 for (i = 0; i < count; i++) {
674 p = TAILQ_FIRST(&pool->list);
675 TAILQ_REMOVE(&pool->list, p, plinks.q);
676 TAILQ_INSERT_TAIL(pages, p, plinks.q);
677 }
678 pool->npages -= count;
679 count = 0;
680 out:
681 mtx_unlock(&pool->lock);
682 return count;
683 }
684
685 /* Put all pages in pages list to correct pool to wait for reuse */
686 static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
687 enum ttm_caching_state cstate)
688 {
689 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
690 unsigned i;
691
692 if (pool == NULL) {
693 /* No pool for this memory type so free the pages */
694 for (i = 0; i < npages; i++) {
695 if (pages[i]) {
696 ttm_vm_page_free(pages[i]);
697 pages[i] = NULL;
698 }
699 }
700 return;
701 }
702
703 mtx_lock(&pool->lock);
704 for (i = 0; i < npages; i++) {
705 if (pages[i]) {
706 TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q);
707 pages[i] = NULL;
708 pool->npages++;
709 }
710 }
711 /* Check that we don't go over the pool limit */
712 npages = 0;
713 if (pool->npages > _manager->options.max_size) {
714 npages = pool->npages - _manager->options.max_size;
715 /* free at least NUM_PAGES_TO_ALLOC number of pages
716 * to reduce calls to set_memory_wb */
717 if (npages < NUM_PAGES_TO_ALLOC)
718 npages = NUM_PAGES_TO_ALLOC;
719 }
720 mtx_unlock(&pool->lock);
721 if (npages)
722 ttm_page_pool_free(pool, npages);
723 }
724
725 /*
726 * On success pages list will hold count number of correctly
727 * cached pages.
728 */
729 static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
730 enum ttm_caching_state cstate)
731 {
732 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
733 struct pglist plist;
734 vm_page_t p = NULL;
735 int gfp_flags;
736 unsigned count;
737 int r;
738
739 /* No pool for cached pages */
740 if (pool == NULL) {
741 for (r = 0; r < npages; ++r) {
742 p = ttm_vm_page_alloc(flags, cstate);
743 if (!p) {
744 printf("[TTM] Unable to allocate page\n");
745 return -ENOMEM;
746 }
747 pages[r] = p;
748 }
749 return 0;
750 }
751
752 /* combine zero flag to pool flags */
753 gfp_flags = flags | pool->ttm_page_alloc_flags;
754
755 /* First we take pages from the pool */
756 TAILQ_INIT(&plist);
757 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
758 count = 0;
759 TAILQ_FOREACH(p, &plist, plinks.q) {
760 pages[count++] = p;
761 }
762
763 /* clear the pages coming from the pool if requested */
764 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
765 TAILQ_FOREACH(p, &plist, plinks.q) {
766 pmap_zero_page(p);
767 }
768 }
769
770 /* If pool didn't have enough pages allocate new one. */
771 if (npages > 0) {
772 /* ttm_alloc_new_pages doesn't reference pool so we can run
773 * multiple requests in parallel.
774 **/
775 TAILQ_INIT(&plist);
776 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
777 npages);
778 TAILQ_FOREACH(p, &plist, plinks.q) {
779 pages[count++] = p;
780 }
781 if (r) {
782 /* If there is any pages in the list put them back to
783 * the pool. */
784 printf("[TTM] Failed to allocate extra pages for large request\n");
785 ttm_put_pages(pages, count, flags, cstate);
786 return r;
787 }
788 }
789
790 return 0;
791 }
792
793 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
794 char *name)
795 {
796 mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
797 pool->fill_lock = false;
798 TAILQ_INIT(&pool->list);
799 pool->npages = pool->nfrees = 0;
800 pool->ttm_page_alloc_flags = flags;
801 pool->name = name;
802 }
803
804 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
805 {
806
807 if (_manager != NULL)
808 printf("[TTM] manager != NULL\n");
809 printf("[TTM] Initializing pool allocator\n");
810
811 _manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
812
813 ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
814 ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
815 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
816 TTM_PAGE_FLAG_DMA32, "wc dma");
817 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
818 TTM_PAGE_FLAG_DMA32, "uc dma");
819
820 _manager->options.max_size = max_pages;
821 _manager->options.small = SMALL_ALLOCATION;
822 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
823
824 refcount_init(&_manager->kobj_ref, 1);
825 ttm_pool_mm_shrink_init(_manager);
826
827 return 0;
828 }
829
830 void ttm_page_alloc_fini(void)
831 {
832 int i;
833
834 printf("[TTM] Finalizing pool allocator\n");
835 ttm_pool_mm_shrink_fini(_manager);
836
837 for (i = 0; i < NUM_POOLS; ++i)
838 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
839
840 if (refcount_release(&_manager->kobj_ref))
841 ttm_pool_kobj_release(_manager);
842 _manager = NULL;
843 }
844
845 int ttm_pool_populate(struct ttm_tt *ttm)
846 {
847 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
848 unsigned i;
849 int ret;
850
851 if (ttm->state != tt_unpopulated)
852 return 0;
853
854 for (i = 0; i < ttm->num_pages; ++i) {
855 ret = ttm_get_pages(&ttm->pages[i], 1,
856 ttm->page_flags,
857 ttm->caching_state);
858 if (ret != 0) {
859 ttm_pool_unpopulate(ttm);
860 return -ENOMEM;
861 }
862
863 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
864 false, false);
865 if (unlikely(ret != 0)) {
866 ttm_pool_unpopulate(ttm);
867 return -ENOMEM;
868 }
869 }
870
871 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
872 ret = ttm_tt_swapin(ttm);
873 if (unlikely(ret != 0)) {
874 ttm_pool_unpopulate(ttm);
875 return ret;
876 }
877 }
878
879 ttm->state = tt_unbound;
880 return 0;
881 }
882
883 void ttm_pool_unpopulate(struct ttm_tt *ttm)
884 {
885 unsigned i;
886
887 for (i = 0; i < ttm->num_pages; ++i) {
888 if (ttm->pages[i]) {
889 ttm_mem_global_free_page(ttm->glob->mem_glob,
890 ttm->pages[i]);
891 ttm_put_pages(&ttm->pages[i], 1,
892 ttm->page_flags,
893 ttm->caching_state);
894 }
895 }
896 ttm->state = tt_unpopulated;
897 }
898
899 #if 0
900 /* XXXKIB sysctl */
901 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
902 {
903 struct ttm_page_pool *p;
904 unsigned i;
905 char *h[] = {"pool", "refills", "pages freed", "size"};
906 if (!_manager) {
907 seq_printf(m, "No pool allocator running.\n");
908 return 0;
909 }
910 seq_printf(m, "%6s %12s %13s %8s\n",
911 h[0], h[1], h[2], h[3]);
912 for (i = 0; i < NUM_POOLS; ++i) {
913 p = &_manager->pools[i];
914
915 seq_printf(m, "%6s %12ld %13ld %8d\n",
916 p->name, p->nrefills,
917 p->nfrees, p->npages);
918 }
919 return 0;
920 }
921 #endif
Cache object: e91c6550c577504d5ccc04e91d54fca5
|