1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/vmalloc.h>
36 #include <dev/mlx5/driver.h>
37 #include <dev/mlx5/mlx5_core/mlx5_core.h>
38
39 /* Handling for queue buffers -- we allocate a bunch of memory and
40 * register it in a memory region at HCA virtual address 0. If the
41 * requested size is > max_direct, we split the allocation into
42 * multiple pages, so we don't require too much contiguous memory.
43 */
44
45 static void
46 mlx5_buf_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
47 {
48 struct mlx5_buf *buf;
49 uint8_t owned;
50 int x;
51
52 buf = (struct mlx5_buf *)arg;
53 owned = MLX5_DMA_OWNED(buf->dev);
54
55 if (!owned)
56 MLX5_DMA_LOCK(buf->dev);
57
58 if (error == 0) {
59 for (x = 0; x != nseg; x++) {
60 buf->page_list[x] = segs[x].ds_addr;
61 KASSERT(segs[x].ds_len == PAGE_SIZE, ("Invalid segment size"));
62 }
63 buf->load_done = MLX5_LOAD_ST_SUCCESS;
64 } else {
65 buf->load_done = MLX5_LOAD_ST_FAILURE;
66 }
67 MLX5_DMA_DONE(buf->dev);
68
69 if (!owned)
70 MLX5_DMA_UNLOCK(buf->dev);
71 }
72
73 int
74 mlx5_buf_alloc(struct mlx5_core_dev *dev, int size,
75 int max_direct, struct mlx5_buf *buf)
76 {
77 int err;
78
79 buf->npages = howmany(size, PAGE_SIZE);
80 buf->page_shift = PAGE_SHIFT;
81 buf->load_done = MLX5_LOAD_ST_NONE;
82 buf->dev = dev;
83 buf->page_list = kcalloc(buf->npages, sizeof(*buf->page_list),
84 GFP_KERNEL);
85
86 err = -bus_dma_tag_create(
87 bus_get_dma_tag(dev->pdev->dev.bsddev),
88 PAGE_SIZE, /* alignment */
89 0, /* no boundary */
90 BUS_SPACE_MAXADDR, /* lowaddr */
91 BUS_SPACE_MAXADDR, /* highaddr */
92 NULL, NULL, /* filter, filterarg */
93 PAGE_SIZE * buf->npages, /* maxsize */
94 buf->npages, /* nsegments */
95 PAGE_SIZE, /* maxsegsize */
96 0, /* flags */
97 NULL, NULL, /* lockfunc, lockfuncarg */
98 &buf->dma_tag);
99
100 if (err != 0)
101 goto err_dma_tag;
102
103 /* allocate memory */
104 err = -bus_dmamem_alloc(buf->dma_tag, &buf->direct.buf,
105 BUS_DMA_WAITOK | BUS_DMA_COHERENT, &buf->dma_map);
106 if (err != 0)
107 goto err_dma_alloc;
108
109 /* load memory into DMA */
110 MLX5_DMA_LOCK(dev);
111 err = bus_dmamap_load(
112 buf->dma_tag, buf->dma_map, buf->direct.buf,
113 PAGE_SIZE * buf->npages, &mlx5_buf_load_mem_cb,
114 buf, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
115
116 while (buf->load_done == MLX5_LOAD_ST_NONE)
117 MLX5_DMA_WAIT(dev);
118 MLX5_DMA_UNLOCK(dev);
119
120 /* check for error */
121 if (buf->load_done != MLX5_LOAD_ST_SUCCESS) {
122 err = -ENOMEM;
123 goto err_dma_load;
124 }
125
126 /* clean memory */
127 memset(buf->direct.buf, 0, PAGE_SIZE * buf->npages);
128
129 /* flush memory to RAM */
130 bus_dmamap_sync(buf->dev->cmd.dma_tag, buf->dma_map, BUS_DMASYNC_PREWRITE);
131 return (0);
132
133 err_dma_load:
134 bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map);
135 err_dma_alloc:
136 bus_dma_tag_destroy(buf->dma_tag);
137 err_dma_tag:
138 kfree(buf->page_list);
139 return (err);
140 }
141
142 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
143 {
144
145 bus_dmamap_unload(buf->dma_tag, buf->dma_map);
146 bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map);
147 bus_dma_tag_destroy(buf->dma_tag);
148 kfree(buf->page_list);
149 }
150 EXPORT_SYMBOL_GPL(mlx5_buf_free);
151
152 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev)
153 {
154 struct mlx5_db_pgdir *pgdir;
155
156 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
157
158 bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
159
160 pgdir->fw_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
161 if (pgdir->fw_page != NULL) {
162 pgdir->db_page = pgdir->fw_page->virt_addr;
163 pgdir->db_dma = pgdir->fw_page->dma_addr;
164
165 /* clean allocated memory */
166 memset(pgdir->db_page, 0, MLX5_ADAPTER_PAGE_SIZE);
167
168 /* flush memory to RAM */
169 mlx5_fwp_flush(pgdir->fw_page);
170 }
171 if (!pgdir->db_page) {
172 kfree(pgdir);
173 return NULL;
174 }
175
176 return pgdir;
177 }
178
179 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
180 struct mlx5_db *db)
181 {
182 int offset;
183 int i;
184
185 i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
186 if (i >= MLX5_DB_PER_PAGE)
187 return -ENOMEM;
188
189 __clear_bit(i, pgdir->bitmap);
190
191 db->u.pgdir = pgdir;
192 db->index = i;
193 offset = db->index * L1_CACHE_BYTES;
194 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
195 db->dma = pgdir->db_dma + offset;
196
197 db->db[0] = 0;
198 db->db[1] = 0;
199
200 return 0;
201 }
202
203 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
204 {
205 struct mlx5_db_pgdir *pgdir;
206 int ret = 0;
207
208 mutex_lock(&dev->priv.pgdir_mutex);
209
210 list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
211 if (!mlx5_alloc_db_from_pgdir(pgdir, db))
212 goto out;
213
214 pgdir = mlx5_alloc_db_pgdir(dev);
215 if (!pgdir) {
216 ret = -ENOMEM;
217 goto out;
218 }
219
220 list_add(&pgdir->list, &dev->priv.pgdir_list);
221
222 /* This should never fail -- we just allocated an empty page: */
223 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
224
225 out:
226 mutex_unlock(&dev->priv.pgdir_mutex);
227
228 return ret;
229 }
230 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
231
232 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
233 {
234 mutex_lock(&dev->priv.pgdir_mutex);
235
236 __set_bit(db->index, db->u.pgdir->bitmap);
237
238 if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
239 mlx5_fwp_free(db->u.pgdir->fw_page);
240 list_del(&db->u.pgdir->list);
241 kfree(db->u.pgdir);
242 }
243
244 mutex_unlock(&dev->priv.pgdir_mutex);
245 }
246 EXPORT_SYMBOL_GPL(mlx5_db_free);
247
248 void
249 mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
250 {
251 int i;
252
253 for (i = 0; i != buf->npages; i++)
254 pas[i] = cpu_to_be64(buf->page_list[i]);
255 }
256 EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
Cache object: 76c0efed13a9b5ab6d5df19b152b85d2
|