1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2021 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 /*$FreeBSD$*/
35
36 #include "osdep.h"
37 #include "irdma_hmc.h"
38 #include "irdma_defs.h"
39 #include "irdma_type.h"
40 #include "irdma_protos.h"
41 #include "irdma_pble.h"
42
43 static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
44
45 /**
46 * irdma_destroy_pble_prm - destroy prm during module unload
47 * @pble_rsrc: pble resources
48 */
49 void
50 irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
51 {
52 struct irdma_chunk *chunk;
53 struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
54
55 while (!list_empty(&pinfo->clist)) {
56 chunk = (struct irdma_chunk *)(&pinfo->clist)->next;
57 list_del(&chunk->list);
58 if (chunk->type == PBLE_SD_PAGED)
59 irdma_pble_free_paged_mem(chunk);
60 if (chunk->bitmapbuf)
61 irdma_prm_rem_bitmapmem(pble_rsrc->dev->hw, chunk);
62 kfree(chunk->chunkmem.va);
63 }
64 spin_lock_destroy(&pinfo->prm_lock);
65 mutex_destroy(&pble_rsrc->pble_mutex_lock);
66 }
67
68 /**
69 * irdma_hmc_init_pble - Initialize pble resources during module load
70 * @dev: irdma_sc_dev struct
71 * @pble_rsrc: pble resources
72 */
73 int
74 irdma_hmc_init_pble(struct irdma_sc_dev *dev,
75 struct irdma_hmc_pble_rsrc *pble_rsrc)
76 {
77 struct irdma_hmc_info *hmc_info;
78 u32 fpm_idx = 0;
79 int status = 0;
80
81 hmc_info = dev->hmc_info;
82 pble_rsrc->dev = dev;
83 pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
84 /* Start pble' on 4k boundary */
85 if (pble_rsrc->fpm_base_addr & 0xfff)
86 fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
87 pble_rsrc->unallocated_pble =
88 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
89 pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
90 pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
91
92 mutex_init(&pble_rsrc->pble_mutex_lock);
93
94 spin_lock_init(&pble_rsrc->pinfo.prm_lock);
95 INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
96 if (add_pble_prm(pble_rsrc)) {
97 irdma_destroy_pble_prm(pble_rsrc);
98 status = -ENOMEM;
99 }
100
101 return status;
102 }
103
104 /**
105 * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
106 * @pble_rsrc: structure containing fpm address
107 * @idx: where to return indexes
108 */
109 static void
110 get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
111 struct sd_pd_idx *idx)
112 {
113 idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
114 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
115 idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
116 }
117
118 /**
119 * add_sd_direct - add sd direct for pble
120 * @pble_rsrc: pble resource ptr
121 * @info: page info for sd
122 */
123 static int
124 add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
125 struct irdma_add_page_info *info)
126 {
127 struct irdma_sc_dev *dev = pble_rsrc->dev;
128 int ret_code = 0;
129 struct sd_pd_idx *idx = &info->idx;
130 struct irdma_chunk *chunk = info->chunk;
131 struct irdma_hmc_info *hmc_info = info->hmc_info;
132 struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
133 u32 offset = 0;
134
135 if (!sd_entry->valid) {
136 ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
137 info->idx.sd_idx,
138 IRDMA_SD_TYPE_DIRECT,
139 IRDMA_HMC_DIRECT_BP_SIZE);
140 if (ret_code)
141 return ret_code;
142
143 chunk->type = PBLE_SD_CONTIGOUS;
144 }
145
146 offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
147 chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
148 chunk->vaddr = (u8 *)sd_entry->u.bp.addr.va + offset;
149 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
150 irdma_debug(dev, IRDMA_DEBUG_PBLE,
151 "chunk_size[%ld] = 0x%lx vaddr=0x%p fpm_addr = %lx\n",
152 chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
153
154 return 0;
155 }
156
157 /**
158 * fpm_to_idx - given fpm address, get pble index
159 * @pble_rsrc: pble resource management
160 * @addr: fpm address for index
161 */
162 static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr){
163 u64 idx;
164
165 idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
166
167 return (u32)idx;
168 }
169
170 /**
171 * add_bp_pages - add backing pages for sd
172 * @pble_rsrc: pble resource management
173 * @info: page info for sd
174 */
175 static int
176 add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
177 struct irdma_add_page_info *info)
178 {
179 struct irdma_sc_dev *dev = pble_rsrc->dev;
180 u8 *addr;
181 struct irdma_dma_mem mem;
182 struct irdma_hmc_pd_entry *pd_entry;
183 struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
184 struct irdma_hmc_info *hmc_info = info->hmc_info;
185 struct irdma_chunk *chunk = info->chunk;
186 int status = 0;
187 u32 rel_pd_idx = info->idx.rel_pd_idx;
188 u32 pd_idx = info->idx.pd_idx;
189 u32 i;
190
191 if (irdma_pble_get_paged_mem(chunk, info->pages))
192 return -ENOMEM;
193
194 status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
195 IRDMA_SD_TYPE_PAGED,
196 IRDMA_HMC_DIRECT_BP_SIZE);
197 if (status)
198 goto error;
199
200 addr = chunk->vaddr;
201 for (i = 0; i < info->pages; i++) {
202 mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
203 mem.size = 4096;
204 mem.va = addr;
205 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
206 if (!pd_entry->valid) {
207 status = irdma_add_pd_table_entry(dev, hmc_info,
208 pd_idx++, &mem);
209 if (status)
210 goto error;
211
212 addr += 4096;
213 }
214 }
215
216 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
217 return 0;
218
219 error:
220 irdma_pble_free_paged_mem(chunk);
221
222 return status;
223 }
224
225 /**
226 * irdma_get_type - add a sd entry type for sd
227 * @dev: irdma_sc_dev struct
228 * @idx: index of sd
229 * @pages: pages in the sd
230 */
231 static enum irdma_sd_entry_type
232 irdma_get_type(struct irdma_sc_dev *dev,
233 struct sd_pd_idx *idx, u32 pages)
234 {
235 enum irdma_sd_entry_type sd_entry_type;
236
237 sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
238 IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
239 return sd_entry_type;
240 }
241
242 /**
243 * add_pble_prm - add a sd entry for pble resoure
244 * @pble_rsrc: pble resource management
245 */
246 static int
247 add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
248 {
249 struct irdma_sc_dev *dev = pble_rsrc->dev;
250 struct irdma_hmc_sd_entry *sd_entry;
251 struct irdma_hmc_info *hmc_info;
252 struct irdma_chunk *chunk;
253 struct irdma_add_page_info info;
254 struct sd_pd_idx *idx = &info.idx;
255 int ret_code = 0;
256 enum irdma_sd_entry_type sd_entry_type;
257 u64 sd_reg_val = 0;
258 struct irdma_virt_mem chunkmem;
259 u32 pages;
260
261 if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
262 return -ENOMEM;
263
264 if (pble_rsrc->next_fpm_addr & 0xfff)
265 return -EINVAL;
266
267 chunkmem.size = sizeof(*chunk);
268 chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
269 if (!chunkmem.va)
270 return -ENOMEM;
271
272 chunk = chunkmem.va;
273 chunk->chunkmem = chunkmem;
274 hmc_info = dev->hmc_info;
275 chunk->dev = dev;
276 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
277 get_sd_pd_idx(pble_rsrc, idx);
278 sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
279 pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
280 IRDMA_HMC_PD_CNT_IN_SD;
281 pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
282 info.chunk = chunk;
283 info.hmc_info = hmc_info;
284 info.pages = pages;
285 info.sd_entry = sd_entry;
286 if (!sd_entry->valid)
287 sd_entry_type = irdma_get_type(dev, idx, pages);
288 else
289 sd_entry_type = sd_entry->entry_type;
290
291 irdma_debug(dev, IRDMA_DEBUG_PBLE,
292 "pages = %d, unallocated_pble[%d] current_fpm_addr = %lx\n",
293 pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
294 irdma_debug(dev, IRDMA_DEBUG_PBLE, "sd_entry_type = %d\n",
295 sd_entry_type);
296 if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
297 ret_code = add_sd_direct(pble_rsrc, &info);
298
299 if (ret_code)
300 sd_entry_type = IRDMA_SD_TYPE_PAGED;
301 else
302 pble_rsrc->stats_direct_sds++;
303
304 if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
305 ret_code = add_bp_pages(pble_rsrc, &info);
306 if (ret_code)
307 goto error;
308 else
309 pble_rsrc->stats_paged_sds++;
310 }
311
312 ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
313 if (ret_code)
314 goto error;
315
316 pble_rsrc->next_fpm_addr += chunk->size;
317 irdma_debug(dev, IRDMA_DEBUG_PBLE,
318 "next_fpm_addr = %lx chunk_size[%lu] = 0x%lx\n",
319 pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
320 pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
321 sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
322 sd_entry->u.pd_table.pd_page_addr.pa :
323 sd_entry->u.bp.addr.pa;
324 if (!sd_entry->valid) {
325 ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
326 idx->sd_idx, sd_entry->entry_type, true);
327 if (ret_code)
328 goto error;
329 }
330
331 sd_entry->valid = true;
332 list_add(&chunk->list, &pble_rsrc->pinfo.clist);
333 return 0;
334
335 error:
336 if (chunk->bitmapbuf)
337 irdma_prm_rem_bitmapmem(pble_rsrc->dev->hw, chunk);
338 kfree(chunk->chunkmem.va);
339
340 return ret_code;
341 }
342
343 /**
344 * free_lvl2 - fee level 2 pble
345 * @pble_rsrc: pble resource management
346 * @palloc: level 2 pble allocation
347 */
348 static void
349 free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
350 struct irdma_pble_alloc *palloc)
351 {
352 u32 i;
353 struct irdma_pble_level2 *lvl2 = &palloc->level2;
354 struct irdma_pble_info *root = &lvl2->root;
355 struct irdma_pble_info *leaf = lvl2->leaf;
356
357 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
358 if (leaf->addr)
359 irdma_prm_return_pbles(&pble_rsrc->pinfo,
360 &leaf->chunkinfo);
361 else
362 break;
363 }
364
365 if (root->addr)
366 irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
367
368 kfree(lvl2->leafmem.va);
369 lvl2->leaf = NULL;
370 }
371
372 /**
373 * get_lvl2_pble - get level 2 pble resource
374 * @pble_rsrc: pble resource management
375 * @palloc: level 2 pble allocation
376 */
377 static int
378 get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
379 struct irdma_pble_alloc *palloc)
380 {
381 u32 lf4k, lflast, total, i;
382 u32 pblcnt = PBLE_PER_PAGE;
383 u64 *addr;
384 struct irdma_pble_level2 *lvl2 = &palloc->level2;
385 struct irdma_pble_info *root = &lvl2->root;
386 struct irdma_pble_info *leaf;
387 int ret_code;
388 u64 fpm_addr;
389
390 /* number of full 512 (4K) leafs) */
391 lf4k = palloc->total_cnt >> 9;
392 lflast = palloc->total_cnt % PBLE_PER_PAGE;
393 total = (lflast == 0) ? lf4k : lf4k + 1;
394 lvl2->leaf_cnt = total;
395
396 lvl2->leafmem.size = (sizeof(*leaf) * total);
397 lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
398 if (!lvl2->leafmem.va)
399 return -ENOMEM;
400
401 lvl2->leaf = lvl2->leafmem.va;
402 leaf = lvl2->leaf;
403 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
404 total << 3, &root->addr, &fpm_addr);
405 if (ret_code) {
406 kfree(lvl2->leafmem.va);
407 lvl2->leaf = NULL;
408 return -ENOMEM;
409 }
410
411 root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
412 root->cnt = total;
413 addr = root->addr;
414 for (i = 0; i < total; i++, leaf++) {
415 pblcnt = (lflast && ((i + 1) == total)) ?
416 lflast : PBLE_PER_PAGE;
417 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
418 &leaf->chunkinfo, pblcnt << 3,
419 &leaf->addr, &fpm_addr);
420 if (ret_code)
421 goto error;
422
423 leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
424
425 leaf->cnt = pblcnt;
426 *addr = (u64)leaf->idx;
427 addr++;
428 }
429
430 palloc->level = PBLE_LEVEL_2;
431 pble_rsrc->stats_lvl2++;
432 return 0;
433
434 error:
435 free_lvl2(pble_rsrc, palloc);
436
437 return -ENOMEM;
438 }
439
440 /**
441 * get_lvl1_pble - get level 1 pble resource
442 * @pble_rsrc: pble resource management
443 * @palloc: level 1 pble allocation
444 */
445 static int
446 get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
447 struct irdma_pble_alloc *palloc)
448 {
449 int ret_code;
450 u64 fpm_addr;
451 struct irdma_pble_info *lvl1 = &palloc->level1;
452
453 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
454 palloc->total_cnt << 3, &lvl1->addr,
455 &fpm_addr);
456 if (ret_code)
457 return -ENOMEM;
458
459 palloc->level = PBLE_LEVEL_1;
460 lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
461 lvl1->cnt = palloc->total_cnt;
462 pble_rsrc->stats_lvl1++;
463
464 return 0;
465 }
466
467 /**
468 * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
469 * @pble_rsrc: pble resources
470 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
471 * @level1_only: flag for a level 1 PBLE
472 */
473 static int
474 get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
475 struct irdma_pble_alloc *palloc, bool level1_only)
476 {
477 int status = 0;
478
479 status = get_lvl1_pble(pble_rsrc, palloc);
480 if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
481 return status;
482
483 status = get_lvl2_pble(pble_rsrc, palloc);
484
485 return status;
486 }
487
488 /**
489 * irdma_get_pble - allocate pbles from the prm
490 * @pble_rsrc: pble resources
491 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
492 * @pble_cnt: #of pbles requested
493 * @level1_only: true if only pble level 1 to acquire
494 */
495 int
496 irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
497 struct irdma_pble_alloc *palloc, u32 pble_cnt,
498 bool level1_only)
499 {
500 int status = 0;
501 int max_sds = 0;
502 int i;
503
504 palloc->total_cnt = pble_cnt;
505 palloc->level = PBLE_LEVEL_0;
506
507 mutex_lock(&pble_rsrc->pble_mutex_lock);
508
509 /*
510 * check first to see if we can get pble's without acquiring additional sd's
511 */
512 status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
513 if (!status)
514 goto exit;
515
516 max_sds = (palloc->total_cnt >> 18) + 1;
517 for (i = 0; i < max_sds; i++) {
518 status = add_pble_prm(pble_rsrc);
519 if (status)
520 break;
521
522 status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
523 /* if level1_only, only go through it once */
524 if (!status || level1_only)
525 break;
526 }
527
528 exit:
529 if (!status) {
530 pble_rsrc->allocdpbles += pble_cnt;
531 pble_rsrc->stats_alloc_ok++;
532 } else {
533 pble_rsrc->stats_alloc_fail++;
534 }
535 mutex_unlock(&pble_rsrc->pble_mutex_lock);
536
537 return status;
538 }
539
540 /**
541 * irdma_free_pble - put pbles back into prm
542 * @pble_rsrc: pble resources
543 * @palloc: contains all information regarding pble resource being freed
544 */
545 void
546 irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
547 struct irdma_pble_alloc *palloc)
548 {
549 pble_rsrc->freedpbles += palloc->total_cnt;
550
551 if (palloc->level == PBLE_LEVEL_2)
552 free_lvl2(pble_rsrc, palloc);
553 else
554 irdma_prm_return_pbles(&pble_rsrc->pinfo,
555 &palloc->level1.chunkinfo);
556 pble_rsrc->stats_alloc_freed++;
557 }
Cache object: dd06c9fde84c659a109f2cf994b8db23
|