1 /*-
2 * Copyright (c) 2013-2020, Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/io-mapping.h>
34 #include <dev/mlx5/driver.h>
35 #include <dev/mlx5/mlx5_core/mlx5_core.h>
36
37 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
38 {
39 u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
40 u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
41 int err;
42
43 MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
44 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
45 if (!err)
46 *uarn = MLX5_GET(alloc_uar_out, out, uar);
47 return err;
48 }
49 EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
50
51 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
52 {
53 u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
54 u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
55
56 MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
57 MLX5_SET(dealloc_uar_in, in, uar, uarn);
58 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
59 }
60 EXPORT_SYMBOL(mlx5_cmd_free_uar);
61
62 static int uars_per_sys_page(struct mlx5_core_dev *mdev)
63 {
64 if (MLX5_CAP_GEN(mdev, uar_4k))
65 return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
66
67 return 1;
68 }
69
70 static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
71 {
72 u32 system_page_index;
73
74 if (MLX5_CAP_GEN(mdev, uar_4k))
75 system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
76 else
77 system_page_index = index;
78
79 return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
80 }
81
82 static void up_rel_func(struct kref *kref)
83 {
84 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
85
86 list_del(&up->list);
87 iounmap(up->map);
88 if (mlx5_cmd_free_uar(up->mdev, up->index))
89 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
90 bitmap_free(up->reg_bitmap);
91 bitmap_free(up->fp_bitmap);
92 kfree(up);
93 }
94
95 static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
96 bool map_wc)
97 {
98 struct mlx5_uars_page *up;
99 int err = -ENOMEM;
100 phys_addr_t pfn;
101 int bfregs;
102 int i;
103
104 bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
105 up = kzalloc(sizeof(*up), GFP_KERNEL);
106 if (!up)
107 return ERR_PTR(err);
108
109 up->mdev = mdev;
110 up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
111 if (!up->reg_bitmap)
112 goto error1;
113
114 up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
115 if (!up->fp_bitmap)
116 goto error1;
117
118 for (i = 0; i < bfregs; i++)
119 if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
120 set_bit(i, up->reg_bitmap);
121 else
122 set_bit(i, up->fp_bitmap);
123
124 up->bfregs = bfregs;
125 up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
126 up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
127
128 err = mlx5_cmd_alloc_uar(mdev, &up->index);
129 if (err) {
130 mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
131 goto error1;
132 }
133
134 pfn = uar2pfn(mdev, up->index);
135 if (map_wc) {
136 up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
137 if (!up->map) {
138 err = -EAGAIN;
139 goto error2;
140 }
141 } else {
142 up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
143 if (!up->map) {
144 err = -ENOMEM;
145 goto error2;
146 }
147 }
148 kref_init(&up->ref_count);
149 mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
150 up->index, up->bfregs);
151 return up;
152
153 error2:
154 if (mlx5_cmd_free_uar(mdev, up->index))
155 mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
156 error1:
157 bitmap_free(up->fp_bitmap);
158 bitmap_free(up->reg_bitmap);
159 kfree(up);
160 return ERR_PTR(err);
161 }
162
163 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
164 {
165 struct mlx5_uars_page *ret;
166
167 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
168 if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
169 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
170 struct mlx5_uars_page, list);
171 kref_get(&ret->ref_count);
172 goto out;
173 }
174 ret = alloc_uars_page(mdev, false);
175 if (IS_ERR(ret))
176 goto out;
177 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
178 out:
179 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
180
181 return ret;
182 }
183 EXPORT_SYMBOL(mlx5_get_uars_page);
184
185 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
186 {
187 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
188 kref_put(&up->ref_count, up_rel_func);
189 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
190 }
191 EXPORT_SYMBOL(mlx5_put_uars_page);
192
193 static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
194 {
195 /* return the offset in bytes from the start of the page to the
196 * blue flame area of the UAR
197 */
198 return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
199 (dbi % MLX5_BFREGS_PER_UAR) *
200 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
201 }
202
203 static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
204 bool map_wc, bool fast_path)
205 {
206 struct mlx5_bfreg_data *bfregs;
207 struct mlx5_uars_page *up;
208 struct list_head *head;
209 unsigned long *bitmap;
210 unsigned int *avail;
211 struct mutex *lock; /* pointer to right mutex */
212 int dbi;
213
214 bfregs = &mdev->priv.bfregs;
215 if (map_wc) {
216 head = &bfregs->wc_head.list;
217 lock = &bfregs->wc_head.lock;
218 } else {
219 head = &bfregs->reg_head.list;
220 lock = &bfregs->reg_head.lock;
221 }
222 mutex_lock(lock);
223 if (list_empty(head)) {
224 up = alloc_uars_page(mdev, map_wc);
225 if (IS_ERR(up)) {
226 mutex_unlock(lock);
227 return PTR_ERR(up);
228 }
229 list_add(&up->list, head);
230 } else {
231 up = list_entry(head->next, struct mlx5_uars_page, list);
232 kref_get(&up->ref_count);
233 }
234 if (fast_path) {
235 bitmap = up->fp_bitmap;
236 avail = &up->fp_avail;
237 } else {
238 bitmap = up->reg_bitmap;
239 avail = &up->reg_avail;
240 }
241 dbi = find_first_bit(bitmap, up->bfregs);
242 clear_bit(dbi, bitmap);
243 (*avail)--;
244 if (!(*avail))
245 list_del(&up->list);
246
247 bfreg->map = up->map + map_offset(mdev, dbi);
248 bfreg->up = up;
249 bfreg->wc = map_wc;
250 bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
251 mutex_unlock(lock);
252
253 return 0;
254 }
255
256 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
257 bool map_wc, bool fast_path)
258 {
259 int err;
260
261 err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
262 if (!err)
263 return 0;
264
265 if (err == -EAGAIN && map_wc)
266 return alloc_bfreg(mdev, bfreg, false, fast_path);
267
268 return err;
269 }
270 EXPORT_SYMBOL(mlx5_alloc_bfreg);
271
272 static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
273 struct mlx5_uars_page *up,
274 struct mlx5_sq_bfreg *bfreg)
275 {
276 unsigned int uar_idx;
277 unsigned int bfreg_idx;
278 unsigned int bf_reg_size;
279
280 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
281
282 uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
283 bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
284
285 return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
286 }
287
288 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
289 {
290 struct mlx5_bfreg_data *bfregs;
291 struct mlx5_uars_page *up;
292 struct mutex *lock; /* pointer to right mutex */
293 unsigned int dbi;
294 bool fp;
295 unsigned int *avail;
296 unsigned long *bitmap;
297 struct list_head *head;
298
299 bfregs = &mdev->priv.bfregs;
300 if (bfreg->wc) {
301 head = &bfregs->wc_head.list;
302 lock = &bfregs->wc_head.lock;
303 } else {
304 head = &bfregs->reg_head.list;
305 lock = &bfregs->reg_head.lock;
306 }
307 up = bfreg->up;
308 dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
309 fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
310 if (fp) {
311 avail = &up->fp_avail;
312 bitmap = up->fp_bitmap;
313 } else {
314 avail = &up->reg_avail;
315 bitmap = up->reg_bitmap;
316 }
317 mutex_lock(lock);
318 (*avail)++;
319 set_bit(dbi, bitmap);
320 if (*avail == 1)
321 list_add_tail(&up->list, head);
322
323 kref_put(&up->ref_count, up_rel_func);
324 mutex_unlock(lock);
325 }
326 EXPORT_SYMBOL(mlx5_free_bfreg);
Cache object: f35b0d133332ece5e67622dca315e624
|