1 /**************************************************************************
2 *
3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <dev/drm2/drmP.h>
35 #include <dev/drm2/ttm/ttm_module.h>
36 #include <dev/drm2/ttm/ttm_bo_driver.h>
37 #include <dev/drm2/ttm/ttm_placement.h>
38 #include <dev/drm2/drm_mm.h>
39
40 /**
41 * Currently we use a spinlock for the lock, but a mutex *may* be
42 * more appropriate to reduce scheduling latency if the range manager
43 * ends up with very fragmented allocation patterns.
44 */
45
46 struct ttm_range_manager {
47 struct drm_mm mm;
48 struct mtx lock;
49 };
50
51 MALLOC_DEFINE(M_TTM_RMAN, "ttm_rman", "TTM Range Manager");
52
53 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
54 struct ttm_buffer_object *bo,
55 struct ttm_placement *placement,
56 struct ttm_mem_reg *mem)
57 {
58 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
59 struct drm_mm *mm = &rman->mm;
60 struct drm_mm_node *node = NULL;
61 unsigned long lpfn;
62 int ret;
63
64 lpfn = placement->lpfn;
65 if (!lpfn)
66 lpfn = man->size;
67 do {
68 ret = drm_mm_pre_get(mm);
69 if (unlikely(ret))
70 return ret;
71
72 mtx_lock(&rman->lock);
73 node = drm_mm_search_free_in_range(mm,
74 mem->num_pages, mem->page_alignment,
75 placement->fpfn, lpfn, 1);
76 if (unlikely(node == NULL)) {
77 mtx_unlock(&rman->lock);
78 return 0;
79 }
80 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
81 mem->page_alignment,
82 placement->fpfn,
83 lpfn);
84 mtx_unlock(&rman->lock);
85 } while (node == NULL);
86
87 mem->mm_node = node;
88 mem->start = node->start;
89 return 0;
90 }
91
92 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93 struct ttm_mem_reg *mem)
94 {
95 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
96
97 if (mem->mm_node) {
98 mtx_lock(&rman->lock);
99 drm_mm_put_block(mem->mm_node);
100 mtx_unlock(&rman->lock);
101 mem->mm_node = NULL;
102 }
103 }
104
105 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
106 unsigned long p_size)
107 {
108 struct ttm_range_manager *rman;
109 int ret;
110
111 rman = malloc(sizeof(*rman), M_TTM_RMAN, M_ZERO | M_WAITOK);
112 ret = drm_mm_init(&rman->mm, 0, p_size);
113 if (ret) {
114 free(rman, M_TTM_RMAN);
115 return ret;
116 }
117
118 mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF);
119 man->priv = rman;
120 return 0;
121 }
122
123 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
124 {
125 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
126 struct drm_mm *mm = &rman->mm;
127
128 mtx_lock(&rman->lock);
129 if (drm_mm_clean(mm)) {
130 drm_mm_takedown(mm);
131 mtx_unlock(&rman->lock);
132 mtx_destroy(&rman->lock);
133 free(rman, M_TTM_RMAN);
134 man->priv = NULL;
135 return 0;
136 }
137 mtx_unlock(&rman->lock);
138 return -EBUSY;
139 }
140
141 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
142 const char *prefix)
143 {
144 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
145
146 mtx_lock(&rman->lock);
147 drm_mm_debug_table(&rman->mm, prefix);
148 mtx_unlock(&rman->lock);
149 }
150
151 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
152 ttm_bo_man_init,
153 ttm_bo_man_takedown,
154 ttm_bo_man_get_node,
155 ttm_bo_man_put_node,
156 ttm_bo_man_debug
157 };
Cache object: 8e7d7d84f33fff3166dbbaa4c46a64d7
|