1 /*
2 * Copyright 2011 (c) Oracle Corp.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24 */
25
26 /*
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
28 * over the DMA pools:
29 * - Pool collects resently freed pages for reuse (and hooks up to
30 * the shrinker).
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
33 * when freed).
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #define pr_fmt(fmt) "[TTM] " fmt
40
41 #include <linux/dma-mapping.h>
42 #include <linux/list.h>
43 #include <linux/seq_file.h> /* for seq_printf */
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46 #include <linux/highmem.h>
47 #include <linux/mm_types.h>
48 #include <linux/module.h>
49 #include <linux/mm.h>
50 #include <linux/atomic.h>
51 #include <linux/device.h>
52 #include <linux/kthread.h>
53 #include <drm/ttm/ttm_bo_driver.h>
54 #include <drm/ttm/ttm_page_alloc.h>
55 #ifdef TTM_HAS_AGP
56 #include <asm/agp.h>
57 #endif
58
59 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
60 #define SMALL_ALLOCATION 4
61 #define FREE_ALL_PAGES (~0U)
62 /* times are in msecs */
63 #define IS_UNDEFINED (0)
64 #define IS_WC (1<<1)
65 #define IS_UC (1<<2)
66 #define IS_CACHED (1<<3)
67 #define IS_DMA32 (1<<4)
68
69 enum pool_type {
70 POOL_IS_UNDEFINED,
71 POOL_IS_WC = IS_WC,
72 POOL_IS_UC = IS_UC,
73 POOL_IS_CACHED = IS_CACHED,
74 POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
75 POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
76 POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
77 };
78 /*
79 * The pool structure. There are usually six pools:
80 * - generic (not restricted to DMA32):
81 * - write combined, uncached, cached.
82 * - dma32 (up to 2^32 - so up 4GB):
83 * - write combined, uncached, cached.
84 * for each 'struct device'. The 'cached' is for pages that are actively used.
85 * The other ones can be shrunk by the shrinker API if necessary.
86 * @pools: The 'struct device->dma_pools' link.
87 * @type: Type of the pool
88 * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
89 * used with irqsave/irqrestore variants because pool allocator maybe called
90 * from delayed work.
91 * @inuse_list: Pool of pages that are in use. The order is very important and
92 * it is in the order that the TTM pages that are put back are in.
93 * @free_list: Pool of pages that are free to be used. No order requirements.
94 * @dev: The device that is associated with these pools.
95 * @size: Size used during DMA allocation.
96 * @npages_free: Count of available pages for re-use.
97 * @npages_in_use: Count of pages that are in use.
98 * @nfrees: Stats when pool is shrinking.
99 * @nrefills: Stats when the pool is grown.
100 * @gfp_flags: Flags to pass for alloc_page.
101 * @name: Name of the pool.
102 * @dev_name: Name derieved from dev - similar to how dev_info works.
103 * Used during shutdown as the dev_info during release is unavailable.
104 */
105 struct dma_pool {
106 struct list_head pools; /* The 'struct device->dma_pools link */
107 enum pool_type type;
108 spinlock_t lock;
109 struct list_head inuse_list;
110 struct list_head free_list;
111 struct device *dev;
112 unsigned size;
113 unsigned npages_free;
114 unsigned npages_in_use;
115 unsigned long nfrees; /* Stats when shrunk. */
116 unsigned long nrefills; /* Stats when grown. */
117 gfp_t gfp_flags;
118 char name[13]; /* "cached dma32" */
119 char dev_name[64]; /* Constructed from dev */
120 };
121
122 /*
123 * The accounting page keeping track of the allocated page along with
124 * the DMA address.
125 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
126 * @vaddr: The virtual address of the page
127 * @dma: The bus address of the page. If the page is not allocated
128 * via the DMA API, it will be -1.
129 */
130 struct dma_page {
131 struct list_head page_list;
132 void *vaddr;
133 struct page *p;
134 dma_addr_t dma;
135 };
136
137 /*
138 * Limits for the pool. They are handled without locks because only place where
139 * they may change is in sysfs store. They won't have immediate effect anyway
140 * so forcing serialization to access them is pointless.
141 */
142
143 struct ttm_pool_opts {
144 unsigned alloc_size;
145 unsigned max_size;
146 unsigned small;
147 };
148
149 /*
150 * Contains the list of all of the 'struct device' and their corresponding
151 * DMA pools. Guarded by _mutex->lock.
152 * @pools: The link to 'struct ttm_pool_manager->pools'
153 * @dev: The 'struct device' associated with the 'pool'
154 * @pool: The 'struct dma_pool' associated with the 'dev'
155 */
156 struct device_pools {
157 struct list_head pools;
158 struct device *dev;
159 struct dma_pool *pool;
160 };
161
162 /*
163 * struct ttm_pool_manager - Holds memory pools for fast allocation
164 *
165 * @lock: Lock used when adding/removing from pools
166 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
167 * @options: Limits for the pool.
168 * @npools: Total amount of pools in existence.
169 * @shrinker: The structure used by [un|]register_shrinker
170 */
171 struct ttm_pool_manager {
172 struct mutex lock;
173 struct list_head pools;
174 struct ttm_pool_opts options;
175 unsigned npools;
176 struct shrinker mm_shrink;
177 struct kobject kobj;
178 };
179
180 static struct ttm_pool_manager *_manager;
181
182 static struct attribute ttm_page_pool_max = {
183 .name = "pool_max_size",
184 .mode = S_IRUGO | S_IWUSR
185 };
186 static struct attribute ttm_page_pool_small = {
187 .name = "pool_small_allocation",
188 .mode = S_IRUGO | S_IWUSR
189 };
190 static struct attribute ttm_page_pool_alloc_size = {
191 .name = "pool_allocation_size",
192 .mode = S_IRUGO | S_IWUSR
193 };
194
195 static struct attribute *ttm_pool_attrs[] = {
196 &ttm_page_pool_max,
197 &ttm_page_pool_small,
198 &ttm_page_pool_alloc_size,
199 NULL
200 };
201
202 static void ttm_pool_kobj_release(struct kobject *kobj)
203 {
204 struct ttm_pool_manager *m =
205 container_of(kobj, struct ttm_pool_manager, kobj);
206 kfree(m);
207 }
208
209 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
210 const char *buffer, size_t size)
211 {
212 struct ttm_pool_manager *m =
213 container_of(kobj, struct ttm_pool_manager, kobj);
214 int chars;
215 unsigned val;
216 chars = sscanf(buffer, "%u", &val);
217 if (chars == 0)
218 return size;
219
220 /* Convert kb to number of pages */
221 val = val / (PAGE_SIZE >> 10);
222
223 if (attr == &ttm_page_pool_max)
224 m->options.max_size = val;
225 else if (attr == &ttm_page_pool_small)
226 m->options.small = val;
227 else if (attr == &ttm_page_pool_alloc_size) {
228 if (val > NUM_PAGES_TO_ALLOC*8) {
229 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
230 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
231 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
232 return size;
233 } else if (val > NUM_PAGES_TO_ALLOC) {
234 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
235 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
236 }
237 m->options.alloc_size = val;
238 }
239
240 return size;
241 }
242
243 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
244 char *buffer)
245 {
246 struct ttm_pool_manager *m =
247 container_of(kobj, struct ttm_pool_manager, kobj);
248 unsigned val = 0;
249
250 if (attr == &ttm_page_pool_max)
251 val = m->options.max_size;
252 else if (attr == &ttm_page_pool_small)
253 val = m->options.small;
254 else if (attr == &ttm_page_pool_alloc_size)
255 val = m->options.alloc_size;
256
257 val = val * (PAGE_SIZE >> 10);
258
259 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
260 }
261
262 static const struct sysfs_ops ttm_pool_sysfs_ops = {
263 .show = &ttm_pool_show,
264 .store = &ttm_pool_store,
265 };
266
267 static struct kobj_type ttm_pool_kobj_type = {
268 .release = &ttm_pool_kobj_release,
269 .sysfs_ops = &ttm_pool_sysfs_ops,
270 .default_attrs = ttm_pool_attrs,
271 };
272
273 #ifndef CONFIG_X86
274 static int set_pages_array_wb(struct page **pages, int addrinarray)
275 {
276 #ifdef TTM_HAS_AGP
277 int i;
278
279 for (i = 0; i < addrinarray; i++)
280 unmap_page_from_agp(pages[i]);
281 #endif
282 return 0;
283 }
284
285 static int set_pages_array_wc(struct page **pages, int addrinarray)
286 {
287 #ifdef TTM_HAS_AGP
288 int i;
289
290 for (i = 0; i < addrinarray; i++)
291 map_page_into_agp(pages[i]);
292 #endif
293 return 0;
294 }
295
296 static int set_pages_array_uc(struct page **pages, int addrinarray)
297 {
298 #ifdef TTM_HAS_AGP
299 int i;
300
301 for (i = 0; i < addrinarray; i++)
302 map_page_into_agp(pages[i]);
303 #endif
304 return 0;
305 }
306 #endif /* for !CONFIG_X86 */
307
308 static int ttm_set_pages_caching(struct dma_pool *pool,
309 struct page **pages, unsigned cpages)
310 {
311 int r = 0;
312 /* Set page caching */
313 if (pool->type & IS_UC) {
314 r = set_pages_array_uc(pages, cpages);
315 if (r)
316 pr_err("%s: Failed to set %d pages to uc!\n",
317 pool->dev_name, cpages);
318 }
319 if (pool->type & IS_WC) {
320 r = set_pages_array_wc(pages, cpages);
321 if (r)
322 pr_err("%s: Failed to set %d pages to wc!\n",
323 pool->dev_name, cpages);
324 }
325 return r;
326 }
327
328 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
329 {
330 dma_addr_t dma = d_page->dma;
331 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
332
333 kfree(d_page);
334 d_page = NULL;
335 }
336 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
337 {
338 struct dma_page *d_page;
339
340 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
341 if (!d_page)
342 return NULL;
343
344 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
345 &d_page->dma,
346 pool->gfp_flags);
347 if (d_page->vaddr)
348 d_page->p = virt_to_page(d_page->vaddr);
349 else {
350 kfree(d_page);
351 d_page = NULL;
352 }
353 return d_page;
354 }
355 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
356 {
357 enum pool_type type = IS_UNDEFINED;
358
359 if (flags & TTM_PAGE_FLAG_DMA32)
360 type |= IS_DMA32;
361 if (cstate == tt_cached)
362 type |= IS_CACHED;
363 else if (cstate == tt_uncached)
364 type |= IS_UC;
365 else
366 type |= IS_WC;
367
368 return type;
369 }
370
371 static void ttm_pool_update_free_locked(struct dma_pool *pool,
372 unsigned freed_pages)
373 {
374 pool->npages_free -= freed_pages;
375 pool->nfrees += freed_pages;
376
377 }
378
379 /* set memory back to wb and free the pages. */
380 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
381 struct page *pages[], unsigned npages)
382 {
383 struct dma_page *d_page, *tmp;
384
385 /* Don't set WB on WB page pool. */
386 if (npages && !(pool->type & IS_CACHED) &&
387 set_pages_array_wb(pages, npages))
388 pr_err("%s: Failed to set %d pages to wb!\n",
389 pool->dev_name, npages);
390
391 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
392 list_del(&d_page->page_list);
393 __ttm_dma_free_page(pool, d_page);
394 }
395 }
396
397 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
398 {
399 /* Don't set WB on WB page pool. */
400 if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
401 pr_err("%s: Failed to set %d pages to wb!\n",
402 pool->dev_name, 1);
403
404 list_del(&d_page->page_list);
405 __ttm_dma_free_page(pool, d_page);
406 }
407
408 /*
409 * Free pages from pool.
410 *
411 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
412 * number of pages in one go.
413 *
414 * @pool: to free the pages from
415 * @nr_free: If set to true will free all pages in pool
416 **/
417 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
418 {
419 unsigned long irq_flags;
420 struct dma_page *dma_p, *tmp;
421 struct page **pages_to_free;
422 struct list_head d_pages;
423 unsigned freed_pages = 0,
424 npages_to_free = nr_free;
425
426 if (NUM_PAGES_TO_ALLOC < nr_free)
427 npages_to_free = NUM_PAGES_TO_ALLOC;
428 #if 0
429 if (nr_free > 1) {
430 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
431 pool->dev_name, pool->name, current->pid,
432 npages_to_free, nr_free);
433 }
434 #endif
435 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
436 GFP_KERNEL);
437
438 if (!pages_to_free) {
439 pr_err("%s: Failed to allocate memory for pool free operation\n",
440 pool->dev_name);
441 return 0;
442 }
443 INIT_LIST_HEAD(&d_pages);
444 restart:
445 spin_lock_irqsave(&pool->lock, irq_flags);
446
447 /* We picking the oldest ones off the list */
448 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
449 page_list) {
450 if (freed_pages >= npages_to_free)
451 break;
452
453 /* Move the dma_page from one list to another. */
454 list_move(&dma_p->page_list, &d_pages);
455
456 pages_to_free[freed_pages++] = dma_p->p;
457 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
458 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
459
460 ttm_pool_update_free_locked(pool, freed_pages);
461 /**
462 * Because changing page caching is costly
463 * we unlock the pool to prevent stalling.
464 */
465 spin_unlock_irqrestore(&pool->lock, irq_flags);
466
467 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
468 freed_pages);
469
470 INIT_LIST_HEAD(&d_pages);
471
472 if (likely(nr_free != FREE_ALL_PAGES))
473 nr_free -= freed_pages;
474
475 if (NUM_PAGES_TO_ALLOC >= nr_free)
476 npages_to_free = nr_free;
477 else
478 npages_to_free = NUM_PAGES_TO_ALLOC;
479
480 freed_pages = 0;
481
482 /* free all so restart the processing */
483 if (nr_free)
484 goto restart;
485
486 /* Not allowed to fall through or break because
487 * following context is inside spinlock while we are
488 * outside here.
489 */
490 goto out;
491
492 }
493 }
494
495 /* remove range of pages from the pool */
496 if (freed_pages) {
497 ttm_pool_update_free_locked(pool, freed_pages);
498 nr_free -= freed_pages;
499 }
500
501 spin_unlock_irqrestore(&pool->lock, irq_flags);
502
503 if (freed_pages)
504 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
505 out:
506 kfree(pages_to_free);
507 return nr_free;
508 }
509
510 static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
511 {
512 struct device_pools *p;
513 struct dma_pool *pool;
514
515 if (!dev)
516 return;
517
518 mutex_lock(&_manager->lock);
519 list_for_each_entry_reverse(p, &_manager->pools, pools) {
520 if (p->dev != dev)
521 continue;
522 pool = p->pool;
523 if (pool->type != type)
524 continue;
525
526 list_del(&p->pools);
527 kfree(p);
528 _manager->npools--;
529 break;
530 }
531 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
532 if (pool->type != type)
533 continue;
534 /* Takes a spinlock.. */
535 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
536 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
537 /* This code path is called after _all_ references to the
538 * struct device has been dropped - so nobody should be
539 * touching it. In case somebody is trying to _add_ we are
540 * guarded by the mutex. */
541 list_del(&pool->pools);
542 kfree(pool);
543 break;
544 }
545 mutex_unlock(&_manager->lock);
546 }
547
548 /*
549 * On free-ing of the 'struct device' this deconstructor is run.
550 * Albeit the pool might have already been freed earlier.
551 */
552 static void ttm_dma_pool_release(struct device *dev, void *res)
553 {
554 struct dma_pool *pool = *(struct dma_pool **)res;
555
556 if (pool)
557 ttm_dma_free_pool(dev, pool->type);
558 }
559
560 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
561 {
562 return *(struct dma_pool **)res == match_data;
563 }
564
565 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
566 enum pool_type type)
567 {
568 char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
569 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
570 struct device_pools *sec_pool = NULL;
571 struct dma_pool *pool = NULL, **ptr;
572 unsigned i;
573 int ret = -ENODEV;
574 char *p;
575
576 if (!dev)
577 return NULL;
578
579 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
580 if (!ptr)
581 return NULL;
582
583 ret = -ENOMEM;
584
585 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
586 dev_to_node(dev));
587 if (!pool)
588 goto err_mem;
589
590 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
591 dev_to_node(dev));
592 if (!sec_pool)
593 goto err_mem;
594
595 INIT_LIST_HEAD(&sec_pool->pools);
596 sec_pool->dev = dev;
597 sec_pool->pool = pool;
598
599 INIT_LIST_HEAD(&pool->free_list);
600 INIT_LIST_HEAD(&pool->inuse_list);
601 INIT_LIST_HEAD(&pool->pools);
602 spin_lock_init(&pool->lock);
603 pool->dev = dev;
604 pool->npages_free = pool->npages_in_use = 0;
605 pool->nfrees = 0;
606 pool->gfp_flags = flags;
607 pool->size = PAGE_SIZE;
608 pool->type = type;
609 pool->nrefills = 0;
610 p = pool->name;
611 for (i = 0; i < 5; i++) {
612 if (type & t[i]) {
613 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
614 "%s", n[i]);
615 }
616 }
617 *p = 0;
618 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
619 * - the kobj->name has already been deallocated.*/
620 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
621 dev_driver_string(dev), dev_name(dev));
622 mutex_lock(&_manager->lock);
623 /* You can get the dma_pool from either the global: */
624 list_add(&sec_pool->pools, &_manager->pools);
625 _manager->npools++;
626 /* or from 'struct device': */
627 list_add(&pool->pools, &dev->dma_pools);
628 mutex_unlock(&_manager->lock);
629
630 *ptr = pool;
631 devres_add(dev, ptr);
632
633 return pool;
634 err_mem:
635 devres_free(ptr);
636 kfree(sec_pool);
637 kfree(pool);
638 return ERR_PTR(ret);
639 }
640
641 static struct dma_pool *ttm_dma_find_pool(struct device *dev,
642 enum pool_type type)
643 {
644 struct dma_pool *pool, *tmp, *found = NULL;
645
646 if (type == IS_UNDEFINED)
647 return found;
648
649 /* NB: We iterate on the 'struct dev' which has no spinlock, but
650 * it does have a kref which we have taken. The kref is taken during
651 * graphic driver loading - in the drm_pci_init it calls either
652 * pci_dev_get or pci_register_driver which both end up taking a kref
653 * on 'struct device'.
654 *
655 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
656 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
657 * thing is at that point of time there are no pages associated with the
658 * driver so this function will not be called.
659 */
660 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
661 if (pool->type != type)
662 continue;
663 found = pool;
664 break;
665 }
666 return found;
667 }
668
669 /*
670 * Free pages the pages that failed to change the caching state. If there
671 * are pages that have changed their caching state already put them to the
672 * pool.
673 */
674 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
675 struct list_head *d_pages,
676 struct page **failed_pages,
677 unsigned cpages)
678 {
679 struct dma_page *d_page, *tmp;
680 struct page *p;
681 unsigned i = 0;
682
683 p = failed_pages[0];
684 if (!p)
685 return;
686 /* Find the failed page. */
687 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
688 if (d_page->p != p)
689 continue;
690 /* .. and then progress over the full list. */
691 list_del(&d_page->page_list);
692 __ttm_dma_free_page(pool, d_page);
693 if (++i < cpages)
694 p = failed_pages[i];
695 else
696 break;
697 }
698
699 }
700
701 /*
702 * Allocate 'count' pages, and put 'need' number of them on the
703 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
704 * The full list of pages should also be on 'd_pages'.
705 * We return zero for success, and negative numbers as errors.
706 */
707 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
708 struct list_head *d_pages,
709 unsigned count)
710 {
711 struct page **caching_array;
712 struct dma_page *dma_p;
713 struct page *p;
714 int r = 0;
715 unsigned i, cpages;
716 unsigned max_cpages = min(count,
717 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
718
719 /* allocate array for page caching change */
720 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
721
722 if (!caching_array) {
723 pr_err("%s: Unable to allocate table for new pages\n",
724 pool->dev_name);
725 return -ENOMEM;
726 }
727
728 if (count > 1) {
729 pr_debug("%s: (%s:%d) Getting %d pages\n",
730 pool->dev_name, pool->name, current->pid, count);
731 }
732
733 for (i = 0, cpages = 0; i < count; ++i) {
734 dma_p = __ttm_dma_alloc_page(pool);
735 if (!dma_p) {
736 pr_err("%s: Unable to get page %u\n",
737 pool->dev_name, i);
738
739 /* store already allocated pages in the pool after
740 * setting the caching state */
741 if (cpages) {
742 r = ttm_set_pages_caching(pool, caching_array,
743 cpages);
744 if (r)
745 ttm_dma_handle_caching_state_failure(
746 pool, d_pages, caching_array,
747 cpages);
748 }
749 r = -ENOMEM;
750 goto out;
751 }
752 p = dma_p->p;
753 #ifdef CONFIG_HIGHMEM
754 /* gfp flags of highmem page should never be dma32 so we
755 * we should be fine in such case
756 */
757 if (!PageHighMem(p))
758 #endif
759 {
760 caching_array[cpages++] = p;
761 if (cpages == max_cpages) {
762 /* Note: Cannot hold the spinlock */
763 r = ttm_set_pages_caching(pool, caching_array,
764 cpages);
765 if (r) {
766 ttm_dma_handle_caching_state_failure(
767 pool, d_pages, caching_array,
768 cpages);
769 goto out;
770 }
771 cpages = 0;
772 }
773 }
774 list_add(&dma_p->page_list, d_pages);
775 }
776
777 if (cpages) {
778 r = ttm_set_pages_caching(pool, caching_array, cpages);
779 if (r)
780 ttm_dma_handle_caching_state_failure(pool, d_pages,
781 caching_array, cpages);
782 }
783 out:
784 kfree(caching_array);
785 return r;
786 }
787
788 /*
789 * @return count of pages still required to fulfill the request.
790 */
791 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
792 unsigned long *irq_flags)
793 {
794 unsigned count = _manager->options.small;
795 int r = pool->npages_free;
796
797 if (count > pool->npages_free) {
798 struct list_head d_pages;
799
800 INIT_LIST_HEAD(&d_pages);
801
802 spin_unlock_irqrestore(&pool->lock, *irq_flags);
803
804 /* Returns how many more are necessary to fulfill the
805 * request. */
806 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
807
808 spin_lock_irqsave(&pool->lock, *irq_flags);
809 if (!r) {
810 /* Add the fresh to the end.. */
811 list_splice(&d_pages, &pool->free_list);
812 ++pool->nrefills;
813 pool->npages_free += count;
814 r = count;
815 } else {
816 struct dma_page *d_page;
817 unsigned cpages = 0;
818
819 pr_err("%s: Failed to fill %s pool (r:%d)!\n",
820 pool->dev_name, pool->name, r);
821
822 list_for_each_entry(d_page, &d_pages, page_list) {
823 cpages++;
824 }
825 list_splice_tail(&d_pages, &pool->free_list);
826 pool->npages_free += cpages;
827 r = cpages;
828 }
829 }
830 return r;
831 }
832
833 /*
834 * @return count of pages still required to fulfill the request.
835 * The populate list is actually a stack (not that is matters as TTM
836 * allocates one page at a time.
837 */
838 static int ttm_dma_pool_get_pages(struct dma_pool *pool,
839 struct ttm_dma_tt *ttm_dma,
840 unsigned index)
841 {
842 struct dma_page *d_page;
843 struct ttm_tt *ttm = &ttm_dma->ttm;
844 unsigned long irq_flags;
845 int count, r = -ENOMEM;
846
847 spin_lock_irqsave(&pool->lock, irq_flags);
848 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
849 if (count) {
850 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
851 ttm->pages[index] = d_page->p;
852 ttm_dma->dma_address[index] = d_page->dma;
853 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
854 r = 0;
855 pool->npages_in_use += 1;
856 pool->npages_free -= 1;
857 }
858 spin_unlock_irqrestore(&pool->lock, irq_flags);
859 return r;
860 }
861
862 /*
863 * On success pages list will hold count number of correctly
864 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
865 */
866 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
867 {
868 struct ttm_tt *ttm = &ttm_dma->ttm;
869 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
870 struct dma_pool *pool;
871 enum pool_type type;
872 unsigned i;
873 gfp_t gfp_flags;
874 int ret;
875
876 if (ttm->state != tt_unpopulated)
877 return 0;
878
879 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
880 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
881 gfp_flags = GFP_USER | GFP_DMA32;
882 else
883 gfp_flags = GFP_HIGHUSER;
884 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
885 gfp_flags |= __GFP_ZERO;
886
887 pool = ttm_dma_find_pool(dev, type);
888 if (!pool) {
889 pool = ttm_dma_pool_init(dev, gfp_flags, type);
890 if (IS_ERR_OR_NULL(pool)) {
891 return -ENOMEM;
892 }
893 }
894
895 INIT_LIST_HEAD(&ttm_dma->pages_list);
896 for (i = 0; i < ttm->num_pages; ++i) {
897 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
898 if (ret != 0) {
899 ttm_dma_unpopulate(ttm_dma, dev);
900 return -ENOMEM;
901 }
902
903 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
904 false, false);
905 if (unlikely(ret != 0)) {
906 ttm_dma_unpopulate(ttm_dma, dev);
907 return -ENOMEM;
908 }
909 }
910
911 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
912 ret = ttm_tt_swapin(ttm);
913 if (unlikely(ret != 0)) {
914 ttm_dma_unpopulate(ttm_dma, dev);
915 return ret;
916 }
917 }
918
919 ttm->state = tt_unbound;
920 return 0;
921 }
922 EXPORT_SYMBOL_GPL(ttm_dma_populate);
923
924 /* Get good estimation how many pages are free in pools */
925 static int ttm_dma_pool_get_num_unused_pages(void)
926 {
927 struct device_pools *p;
928 unsigned total = 0;
929
930 mutex_lock(&_manager->lock);
931 list_for_each_entry(p, &_manager->pools, pools)
932 total += p->pool->npages_free;
933 mutex_unlock(&_manager->lock);
934 return total;
935 }
936
937 /* Put all pages in pages list to correct pool to wait for reuse */
938 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
939 {
940 struct ttm_tt *ttm = &ttm_dma->ttm;
941 struct dma_pool *pool;
942 struct dma_page *d_page, *next;
943 enum pool_type type;
944 bool is_cached = false;
945 unsigned count = 0, i, npages = 0;
946 unsigned long irq_flags;
947
948 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
949 pool = ttm_dma_find_pool(dev, type);
950 if (!pool)
951 return;
952
953 is_cached = (ttm_dma_find_pool(pool->dev,
954 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
955
956 /* make sure pages array match list and count number of pages */
957 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
958 ttm->pages[count] = d_page->p;
959 count++;
960 }
961
962 spin_lock_irqsave(&pool->lock, irq_flags);
963 pool->npages_in_use -= count;
964 if (is_cached) {
965 pool->nfrees += count;
966 } else {
967 pool->npages_free += count;
968 list_splice(&ttm_dma->pages_list, &pool->free_list);
969 npages = count;
970 if (pool->npages_free > _manager->options.max_size) {
971 npages = pool->npages_free - _manager->options.max_size;
972 /* free at least NUM_PAGES_TO_ALLOC number of pages
973 * to reduce calls to set_memory_wb */
974 if (npages < NUM_PAGES_TO_ALLOC)
975 npages = NUM_PAGES_TO_ALLOC;
976 }
977 }
978 spin_unlock_irqrestore(&pool->lock, irq_flags);
979
980 if (is_cached) {
981 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
982 ttm_mem_global_free_page(ttm->glob->mem_glob,
983 d_page->p);
984 ttm_dma_page_put(pool, d_page);
985 }
986 } else {
987 for (i = 0; i < count; i++) {
988 ttm_mem_global_free_page(ttm->glob->mem_glob,
989 ttm->pages[i]);
990 }
991 }
992
993 INIT_LIST_HEAD(&ttm_dma->pages_list);
994 for (i = 0; i < ttm->num_pages; i++) {
995 ttm->pages[i] = NULL;
996 ttm_dma->dma_address[i] = 0;
997 }
998
999 /* shrink pool if necessary (only on !is_cached pools)*/
1000 if (npages)
1001 ttm_dma_page_pool_free(pool, npages);
1002 ttm->state = tt_unpopulated;
1003 }
1004 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1005
1006 /**
1007 * Callback for mm to request pool to reduce number of page held.
1008 */
1009 static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1010 struct shrink_control *sc)
1011 {
1012 static atomic_t start_pool = ATOMIC_INIT(0);
1013 unsigned idx = 0;
1014 unsigned pool_offset = atomic_add_return(1, &start_pool);
1015 unsigned shrink_pages = sc->nr_to_scan;
1016 struct device_pools *p;
1017
1018 if (list_empty(&_manager->pools))
1019 return 0;
1020
1021 mutex_lock(&_manager->lock);
1022 pool_offset = pool_offset % _manager->npools;
1023 list_for_each_entry(p, &_manager->pools, pools) {
1024 unsigned nr_free;
1025
1026 if (!p->dev)
1027 continue;
1028 if (shrink_pages == 0)
1029 break;
1030 /* Do it in round-robin fashion. */
1031 if (++idx < pool_offset)
1032 continue;
1033 nr_free = shrink_pages;
1034 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1035 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1036 p->pool->dev_name, p->pool->name, current->pid,
1037 nr_free, shrink_pages);
1038 }
1039 mutex_unlock(&_manager->lock);
1040 /* return estimated number of unused pages in pool */
1041 return ttm_dma_pool_get_num_unused_pages();
1042 }
1043
1044 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1045 {
1046 manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1047 manager->mm_shrink.seeks = 1;
1048 register_shrinker(&manager->mm_shrink);
1049 }
1050
1051 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1052 {
1053 unregister_shrinker(&manager->mm_shrink);
1054 }
1055
1056 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1057 {
1058 int ret = -ENOMEM;
1059
1060 WARN_ON(_manager);
1061
1062 pr_info("Initializing DMA pool allocator\n");
1063
1064 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1065 if (!_manager)
1066 goto err;
1067
1068 mutex_init(&_manager->lock);
1069 INIT_LIST_HEAD(&_manager->pools);
1070
1071 _manager->options.max_size = max_pages;
1072 _manager->options.small = SMALL_ALLOCATION;
1073 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1074
1075 /* This takes care of auto-freeing the _manager */
1076 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1077 &glob->kobj, "dma_pool");
1078 if (unlikely(ret != 0)) {
1079 kobject_put(&_manager->kobj);
1080 goto err;
1081 }
1082 ttm_dma_pool_mm_shrink_init(_manager);
1083 return 0;
1084 err:
1085 return ret;
1086 }
1087
1088 void ttm_dma_page_alloc_fini(void)
1089 {
1090 struct device_pools *p, *t;
1091
1092 pr_info("Finalizing DMA pool allocator\n");
1093 ttm_dma_pool_mm_shrink_fini(_manager);
1094
1095 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1096 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1097 current->pid);
1098 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1099 ttm_dma_pool_match, p->pool));
1100 ttm_dma_free_pool(p->dev, p->pool->type);
1101 }
1102 kobject_put(&_manager->kobj);
1103 _manager = NULL;
1104 }
1105
1106 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1107 {
1108 struct device_pools *p;
1109 struct dma_pool *pool = NULL;
1110 char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1111 "name", "virt", "busaddr"};
1112
1113 if (!_manager) {
1114 seq_printf(m, "No pool allocator running.\n");
1115 return 0;
1116 }
1117 seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1118 h[0], h[1], h[2], h[3], h[4], h[5]);
1119 mutex_lock(&_manager->lock);
1120 list_for_each_entry(p, &_manager->pools, pools) {
1121 struct device *dev = p->dev;
1122 if (!dev)
1123 continue;
1124 pool = p->pool;
1125 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1126 pool->name, pool->nrefills,
1127 pool->nfrees, pool->npages_in_use,
1128 pool->npages_free,
1129 pool->dev_name);
1130 }
1131 mutex_unlock(&_manager->lock);
1132 return 0;
1133 }
1134 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
Cache object: 2b16694e25220cf3714675f43eb986af
|