1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2012-2014 Matteo Landi
5 * Copyright (C) 2012-2016 Luigi Rizzo
6 * Copyright (C) 2012-2016 Giuseppe Lettieri
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #ifdef linux
32 #include "bsd_glue.h"
33 #endif /* linux */
34
35 #ifdef __APPLE__
36 #include "osx_glue.h"
37 #endif /* __APPLE__ */
38
39 #ifdef __FreeBSD__
40 #include <sys/cdefs.h> /* prerequisite */
41 __FBSDID("$FreeBSD$");
42
43 #include <sys/types.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h> /* MALLOC_DEFINE */
46 #include <sys/proc.h>
47 #include <vm/vm.h> /* vtophys */
48 #include <vm/pmap.h> /* vtophys */
49 #include <sys/socket.h> /* sockaddrs */
50 #include <sys/selinfo.h>
51 #include <sys/sysctl.h>
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/vnet.h>
55 #include <machine/bus.h> /* bus_dmamap_* */
56
57 /* M_NETMAP only used in here */
58 MALLOC_DECLARE(M_NETMAP);
59 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
60
61 #endif /* __FreeBSD__ */
62
63 #ifdef _WIN32
64 #include <win_glue.h>
65 #endif
66
67 #include <net/netmap.h>
68 #include <dev/netmap/netmap_kern.h>
69 #include <net/netmap_virt.h>
70 #include "netmap_mem2.h"
71
72 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY
73 #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */
74 #else
75 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
76 #endif
77
78 #define NETMAP_POOL_MAX_NAMSZ 32
79
80
81 enum {
82 NETMAP_IF_POOL = 0,
83 NETMAP_RING_POOL,
84 NETMAP_BUF_POOL,
85 NETMAP_POOLS_NR
86 };
87
88
89 struct netmap_obj_params {
90 u_int size;
91 u_int num;
92
93 u_int last_size;
94 u_int last_num;
95 };
96
97 struct netmap_obj_pool {
98 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */
99
100 /* ---------------------------------------------------*/
101 /* these are only meaningful if the pool is finalized */
102 /* (see 'finalized' field in netmap_mem_d) */
103 size_t memtotal; /* actual total memory space */
104
105 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
106 uint32_t *bitmap; /* one bit per buffer, 1 means free */
107 uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
108 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
109
110 u_int objtotal; /* actual total number of objects. */
111 u_int numclusters; /* actual number of clusters */
112 u_int objfree; /* number of free objects. */
113
114 int alloc_done; /* we have allocated the memory */
115 /* ---------------------------------------------------*/
116
117 /* limits */
118 u_int objminsize; /* minimum object size */
119 u_int objmaxsize; /* maximum object size */
120 u_int nummin; /* minimum number of objects */
121 u_int nummax; /* maximum number of objects */
122
123 /* these are changed only by config */
124 u_int _objtotal; /* total number of objects */
125 u_int _objsize; /* object size */
126 u_int _clustsize; /* cluster size */
127 u_int _clustentries; /* objects per cluster */
128 u_int _numclusters; /* number of clusters */
129
130 /* requested values */
131 u_int r_objtotal;
132 u_int r_objsize;
133 };
134
135 #define NMA_LOCK_T NM_MTX_T
136 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx)
137 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx)
138 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx)
139 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx)
140 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx)
141
142 struct netmap_mem_ops {
143 int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
144 int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size,
145 u_int *memflags, uint16_t *id);
146
147 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
148 int (*nmd_config)(struct netmap_mem_d *);
149 int (*nmd_finalize)(struct netmap_mem_d *, struct netmap_adapter *);
150 void (*nmd_deref)(struct netmap_mem_d *, struct netmap_adapter *);
151 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
152 void (*nmd_delete)(struct netmap_mem_d *);
153
154 struct netmap_if * (*nmd_if_new)(struct netmap_mem_d *,
155 struct netmap_adapter *, struct netmap_priv_d *);
156 void (*nmd_if_delete)(struct netmap_mem_d *,
157 struct netmap_adapter *, struct netmap_if *);
158 int (*nmd_rings_create)(struct netmap_mem_d *,
159 struct netmap_adapter *);
160 void (*nmd_rings_delete)(struct netmap_mem_d *,
161 struct netmap_adapter *);
162 };
163
164 struct netmap_mem_d {
165 NMA_LOCK_T nm_mtx; /* protect the allocator */
166 size_t nm_totalsize; /* shorthand */
167
168 u_int flags;
169 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
170 #define NETMAP_MEM_HIDDEN 0x8 /* being prepared */
171 #define NETMAP_MEM_NOMAP 0x10 /* do not map/unmap pdevs */
172 int lasterr; /* last error for curr config */
173 int active; /* active users */
174 int refcount;
175 /* the three allocators */
176 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
177
178 nm_memid_t nm_id; /* allocator identifier */
179 int nm_grp; /* iommu group id */
180
181 /* list of all existing allocators, sorted by nm_id */
182 struct netmap_mem_d *prev, *next;
183
184 struct netmap_mem_ops *ops;
185
186 struct netmap_obj_params params[NETMAP_POOLS_NR];
187
188 #define NM_MEM_NAMESZ 16
189 char name[NM_MEM_NAMESZ];
190 };
191
192 int
193 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
194 {
195 int rv;
196
197 NMA_LOCK(nmd);
198 rv = nmd->ops->nmd_get_lut(nmd, lut);
199 NMA_UNLOCK(nmd);
200
201 return rv;
202 }
203
204 int
205 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
206 u_int *memflags, nm_memid_t *memid)
207 {
208 int rv;
209
210 NMA_LOCK(nmd);
211 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
212 NMA_UNLOCK(nmd);
213
214 return rv;
215 }
216
217 vm_paddr_t
218 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
219 {
220 vm_paddr_t pa;
221
222 #if defined(__FreeBSD__)
223 /* This function is called by netmap_dev_pager_fault(), which holds a
224 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
225 * spin on the trylock. */
226 NMA_SPINLOCK(nmd);
227 #else
228 NMA_LOCK(nmd);
229 #endif
230 pa = nmd->ops->nmd_ofstophys(nmd, off);
231 NMA_UNLOCK(nmd);
232
233 return pa;
234 }
235
236 static int
237 netmap_mem_config(struct netmap_mem_d *nmd)
238 {
239 if (nmd->active) {
240 /* already in use. Not fatal, but we
241 * cannot change the configuration
242 */
243 return 0;
244 }
245
246 return nmd->ops->nmd_config(nmd);
247 }
248
249 ssize_t
250 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
251 {
252 ssize_t rv;
253
254 NMA_LOCK(nmd);
255 rv = nmd->ops->nmd_if_offset(nmd, off);
256 NMA_UNLOCK(nmd);
257
258 return rv;
259 }
260
261 static void
262 netmap_mem_delete(struct netmap_mem_d *nmd)
263 {
264 nmd->ops->nmd_delete(nmd);
265 }
266
267 struct netmap_if *
268 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
269 {
270 struct netmap_if *nifp;
271 struct netmap_mem_d *nmd = na->nm_mem;
272
273 NMA_LOCK(nmd);
274 nifp = nmd->ops->nmd_if_new(nmd, na, priv);
275 NMA_UNLOCK(nmd);
276
277 return nifp;
278 }
279
280 void
281 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif)
282 {
283 struct netmap_mem_d *nmd = na->nm_mem;
284
285 NMA_LOCK(nmd);
286 nmd->ops->nmd_if_delete(nmd, na, nif);
287 NMA_UNLOCK(nmd);
288 }
289
290 int
291 netmap_mem_rings_create(struct netmap_adapter *na)
292 {
293 int rv;
294 struct netmap_mem_d *nmd = na->nm_mem;
295
296 NMA_LOCK(nmd);
297 rv = nmd->ops->nmd_rings_create(nmd, na);
298 NMA_UNLOCK(nmd);
299
300 return rv;
301 }
302
303 void
304 netmap_mem_rings_delete(struct netmap_adapter *na)
305 {
306 struct netmap_mem_d *nmd = na->nm_mem;
307
308 NMA_LOCK(nmd);
309 nmd->ops->nmd_rings_delete(nmd, na);
310 NMA_UNLOCK(nmd);
311 }
312
313 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
314 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
315 static int nm_mem_check_group(struct netmap_mem_d *, bus_dma_tag_t);
316 static void nm_mem_release_id(struct netmap_mem_d *);
317
318 nm_memid_t
319 netmap_mem_get_id(struct netmap_mem_d *nmd)
320 {
321 return nmd->nm_id;
322 }
323
324 #ifdef NM_DEBUG_MEM_PUTGET
325 #define NM_DBG_REFC(nmd, func, line) \
326 nm_prinf("%s:%d mem[%d:%d] -> %d", func, line, (nmd)->nm_id, (nmd)->nm_grp, (nmd)->refcount);
327 #else
328 #define NM_DBG_REFC(nmd, func, line)
329 #endif
330
331 /* circular list of all existing allocators */
332 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
333 static NM_MTX_T nm_mem_list_lock;
334
335 struct netmap_mem_d *
336 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
337 {
338 NM_MTX_LOCK(nm_mem_list_lock);
339 nmd->refcount++;
340 NM_DBG_REFC(nmd, func, line);
341 NM_MTX_UNLOCK(nm_mem_list_lock);
342 return nmd;
343 }
344
345 void
346 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
347 {
348 int last;
349 NM_MTX_LOCK(nm_mem_list_lock);
350 last = (--nmd->refcount == 0);
351 if (last)
352 nm_mem_release_id(nmd);
353 NM_DBG_REFC(nmd, func, line);
354 NM_MTX_UNLOCK(nm_mem_list_lock);
355 if (last)
356 netmap_mem_delete(nmd);
357 }
358
359 int
360 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
361 {
362 int lasterr = 0;
363 if (nm_mem_check_group(nmd, na->pdev) < 0) {
364 return ENOMEM;
365 }
366
367 NMA_LOCK(nmd);
368
369 if (netmap_mem_config(nmd))
370 goto out;
371
372 nmd->active++;
373
374 nmd->lasterr = nmd->ops->nmd_finalize(nmd, na);
375
376 if (!nmd->lasterr && !(nmd->flags & NETMAP_MEM_NOMAP)) {
377 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
378 }
379
380 out:
381 lasterr = nmd->lasterr;
382 NMA_UNLOCK(nmd);
383
384 if (lasterr)
385 netmap_mem_deref(nmd, na);
386
387 return lasterr;
388 }
389
390 static int
391 nm_isset(uint32_t *bitmap, u_int i)
392 {
393 return bitmap[ (i>>5) ] & ( 1U << (i & 31U) );
394 }
395
396
397 static int
398 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p)
399 {
400 u_int n, j;
401
402 if (p->bitmap == NULL) {
403 /* Allocate the bitmap */
404 n = (p->objtotal + 31) / 32;
405 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n);
406 if (p->bitmap == NULL) {
407 nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
408 p->name);
409 return ENOMEM;
410 }
411 p->bitmap_slots = n;
412 } else {
413 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0]));
414 }
415
416 p->objfree = 0;
417 /*
418 * Set all the bits in the bitmap that have
419 * corresponding buffers to 1 to indicate they are
420 * free.
421 */
422 for (j = 0; j < p->objtotal; j++) {
423 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
424 if (netmap_debug & NM_DEBUG_MEM)
425 nm_prinf("skipping %s %d", p->name, j);
426 continue;
427 }
428 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) );
429 p->objfree++;
430 }
431
432 if (netmap_verbose)
433 nm_prinf("%s free %u", p->name, p->objfree);
434 if (p->objfree == 0) {
435 if (netmap_verbose)
436 nm_prerr("%s: no objects available", p->name);
437 return ENOMEM;
438 }
439
440 return 0;
441 }
442
443 static int
444 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
445 {
446 int i, error = 0;
447
448 for (i = 0; i < NETMAP_POOLS_NR; i++) {
449 struct netmap_obj_pool *p = &nmd->pools[i];
450
451 error = netmap_init_obj_allocator_bitmap(p);
452 if (error)
453 return error;
454 }
455
456 /*
457 * buffers 0 and 1 are reserved
458 */
459 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
460 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
461 return ENOMEM;
462 }
463
464 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
465 if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
466 /* XXX This check is a workaround that prevents a
467 * NULL pointer crash which currently happens only
468 * with ptnetmap guests.
469 * Removed shared-info --> is the bug still there? */
470 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
471 }
472 return 0;
473 }
474
475 int
476 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
477 {
478 int last_user = 0;
479 NMA_LOCK(nmd);
480 if (na->active_fds <= 0 && !(nmd->flags & NETMAP_MEM_NOMAP))
481 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
482 if (nmd->active == 1) {
483 last_user = 1;
484 /*
485 * Reset the allocator when it falls out of use so that any
486 * pool resources leaked by unclean application exits are
487 * reclaimed.
488 */
489 netmap_mem_init_bitmaps(nmd);
490 }
491 nmd->ops->nmd_deref(nmd, na);
492
493 nmd->active--;
494 if (last_user) {
495 nmd->lasterr = 0;
496 }
497
498 NMA_UNLOCK(nmd);
499 return last_user;
500 }
501
502
503 /* accessor functions */
504 static int
505 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
506 {
507 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
508 #ifdef __FreeBSD__
509 lut->plut = lut->lut;
510 #endif
511 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
512 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
513
514 return 0;
515 }
516
517 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
518 [NETMAP_IF_POOL] = {
519 .size = 1024,
520 .num = 2,
521 },
522 [NETMAP_RING_POOL] = {
523 .size = 5*PAGE_SIZE,
524 .num = 4,
525 },
526 [NETMAP_BUF_POOL] = {
527 .size = 2048,
528 .num = 4098,
529 },
530 };
531
532
533 /*
534 * nm_mem is the memory allocator used for all physical interfaces
535 * running in netmap mode.
536 * Virtual (VALE) ports will have each its own allocator.
537 */
538 extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */
539 struct netmap_mem_d nm_mem = { /* Our memory allocator. */
540 .pools = {
541 [NETMAP_IF_POOL] = {
542 .name = "netmap_if",
543 .objminsize = sizeof(struct netmap_if),
544 .objmaxsize = 4096,
545 .nummin = 10, /* don't be stingy */
546 .nummax = 10000, /* XXX very large */
547 },
548 [NETMAP_RING_POOL] = {
549 .name = "netmap_ring",
550 .objminsize = sizeof(struct netmap_ring),
551 .objmaxsize = 32*PAGE_SIZE,
552 .nummin = 2,
553 .nummax = 1024,
554 },
555 [NETMAP_BUF_POOL] = {
556 .name = "netmap_buf",
557 .objminsize = 64,
558 .objmaxsize = 65536,
559 .nummin = 4,
560 .nummax = 1000000, /* one million! */
561 },
562 },
563
564 .params = {
565 [NETMAP_IF_POOL] = {
566 .size = 1024,
567 .num = 100,
568 },
569 [NETMAP_RING_POOL] = {
570 .size = 9*PAGE_SIZE,
571 .num = 200,
572 },
573 [NETMAP_BUF_POOL] = {
574 .size = 2048,
575 .num = NETMAP_BUF_MAX_NUM,
576 },
577 },
578
579 .nm_id = 1,
580 .nm_grp = -1,
581
582 .prev = &nm_mem,
583 .next = &nm_mem,
584
585 .ops = &netmap_mem_global_ops,
586
587 .name = "1"
588 };
589
590 static struct netmap_mem_d nm_mem_blueprint;
591
592 /* blueprint for the private memory allocators */
593 /* XXX clang is not happy about using name as a print format */
594 static const struct netmap_mem_d nm_blueprint = {
595 .pools = {
596 [NETMAP_IF_POOL] = {
597 .name = "%s_if",
598 .objminsize = sizeof(struct netmap_if),
599 .objmaxsize = 4096,
600 .nummin = 1,
601 .nummax = 100,
602 },
603 [NETMAP_RING_POOL] = {
604 .name = "%s_ring",
605 .objminsize = sizeof(struct netmap_ring),
606 .objmaxsize = 32*PAGE_SIZE,
607 .nummin = 2,
608 .nummax = 1024,
609 },
610 [NETMAP_BUF_POOL] = {
611 .name = "%s_buf",
612 .objminsize = 64,
613 .objmaxsize = 65536,
614 .nummin = 4,
615 .nummax = 1000000, /* one million! */
616 },
617 },
618
619 .nm_grp = -1,
620
621 .flags = NETMAP_MEM_PRIVATE,
622
623 .ops = &netmap_mem_global_ops,
624 };
625
626 /* memory allocator related sysctls */
627
628 #define STRINGIFY(x) #x
629
630
631 #define DECLARE_SYSCTLS(id, name) \
632 SYSBEGIN(mem2_ ## name); \
633 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
634 CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
635 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
636 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
637 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
638 CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
639 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
640 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
641 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
642 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
643 "Default size of private netmap " STRINGIFY(name) "s"); \
644 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
645 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
646 "Default number of private netmap " STRINGIFY(name) "s"); \
647 SYSEND
648
649 SYSCTL_DECL(_dev_netmap);
650 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
651 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
652 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
653
654 /* call with nm_mem_list_lock held */
655 static int
656 nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id)
657 {
658 nm_memid_t id;
659 struct netmap_mem_d *scan = netmap_last_mem_d;
660 int error = ENOMEM;
661
662 do {
663 /* we rely on unsigned wrap around */
664 id = scan->nm_id + 1;
665 if (id == 0) /* reserve 0 as error value */
666 id = 1;
667 scan = scan->next;
668 if (id != scan->nm_id) {
669 nmd->nm_id = id;
670 nmd->nm_grp = grp_id;
671 nmd->prev = scan->prev;
672 nmd->next = scan;
673 scan->prev->next = nmd;
674 scan->prev = nmd;
675 netmap_last_mem_d = nmd;
676 nmd->refcount = 1;
677 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
678 error = 0;
679 break;
680 }
681 } while (scan != netmap_last_mem_d);
682
683 return error;
684 }
685
686 /* call with nm_mem_list_lock *not* held */
687 static int
688 nm_mem_assign_id(struct netmap_mem_d *nmd, int grp_id)
689 {
690 int ret;
691
692 NM_MTX_LOCK(nm_mem_list_lock);
693 ret = nm_mem_assign_id_locked(nmd, grp_id);
694 NM_MTX_UNLOCK(nm_mem_list_lock);
695
696 return ret;
697 }
698
699 /* call with nm_mem_list_lock held */
700 static void
701 nm_mem_release_id(struct netmap_mem_d *nmd)
702 {
703 nmd->prev->next = nmd->next;
704 nmd->next->prev = nmd->prev;
705
706 if (netmap_last_mem_d == nmd)
707 netmap_last_mem_d = nmd->prev;
708
709 nmd->prev = nmd->next = NULL;
710 }
711
712 struct netmap_mem_d *
713 netmap_mem_find(nm_memid_t id)
714 {
715 struct netmap_mem_d *nmd;
716
717 NM_MTX_LOCK(nm_mem_list_lock);
718 nmd = netmap_last_mem_d;
719 do {
720 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
721 nmd->refcount++;
722 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
723 NM_MTX_UNLOCK(nm_mem_list_lock);
724 return nmd;
725 }
726 nmd = nmd->next;
727 } while (nmd != netmap_last_mem_d);
728 NM_MTX_UNLOCK(nm_mem_list_lock);
729 return NULL;
730 }
731
732 static int
733 nm_mem_check_group(struct netmap_mem_d *nmd, bus_dma_tag_t dev)
734 {
735 int err = 0, id;
736
737 /* Skip not hw adapters.
738 * Vale port can use particular allocator through vale-ctl -m option
739 */
740 if (!dev)
741 return 0;
742 id = nm_iommu_group_id(dev);
743 if (netmap_debug & NM_DEBUG_MEM)
744 nm_prinf("iommu_group %d", id);
745
746 NMA_LOCK(nmd);
747
748 if (nmd->nm_grp != id) {
749 if (netmap_verbose)
750 nm_prerr("iommu group mismatch: %d vs %d",
751 nmd->nm_grp, id);
752 nmd->lasterr = err = ENOMEM;
753 }
754
755 NMA_UNLOCK(nmd);
756 return err;
757 }
758
759 static struct lut_entry *
760 nm_alloc_lut(u_int nobj)
761 {
762 size_t n = sizeof(struct lut_entry) * nobj;
763 struct lut_entry *lut;
764 #ifdef linux
765 lut = vmalloc(n);
766 #else
767 lut = nm_os_malloc(n);
768 #endif
769 return lut;
770 }
771
772 static void
773 nm_free_lut(struct lut_entry *lut, u_int objtotal)
774 {
775 bzero(lut, sizeof(struct lut_entry) * objtotal);
776 #ifdef linux
777 vfree(lut);
778 #else
779 nm_os_free(lut);
780 #endif
781 }
782
783 #if defined(linux) || defined(_WIN32)
784 static struct plut_entry *
785 nm_alloc_plut(u_int nobj)
786 {
787 size_t n = sizeof(struct plut_entry) * nobj;
788 struct plut_entry *lut;
789 lut = vmalloc(n);
790 return lut;
791 }
792
793 static void
794 nm_free_plut(struct plut_entry * lut)
795 {
796 vfree(lut);
797 }
798 #endif /* linux or _WIN32 */
799
800
801 /*
802 * First, find the allocator that contains the requested offset,
803 * then locate the cluster through a lookup table.
804 */
805 static vm_paddr_t
806 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
807 {
808 int i;
809 vm_ooffset_t o = offset;
810 vm_paddr_t pa;
811 struct netmap_obj_pool *p;
812
813 p = nmd->pools;
814
815 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
816 if (offset >= p[i].memtotal)
817 continue;
818 // now lookup the cluster's address
819 #ifndef _WIN32
820 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
821 offset % p[i]._objsize;
822 #else
823 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr);
824 pa.QuadPart += offset % p[i]._objsize;
825 #endif
826 return pa;
827 }
828 /* this is only in case of errors */
829 nm_prerr("invalid ofs 0x%x out of 0x%zx 0x%zx 0x%zx", (u_int)o,
830 p[NETMAP_IF_POOL].memtotal,
831 p[NETMAP_IF_POOL].memtotal
832 + p[NETMAP_RING_POOL].memtotal,
833 p[NETMAP_IF_POOL].memtotal
834 + p[NETMAP_RING_POOL].memtotal
835 + p[NETMAP_BUF_POOL].memtotal);
836 #ifndef _WIN32
837 return 0; /* bad address */
838 #else
839 vm_paddr_t res;
840 res.QuadPart = 0;
841 return res;
842 #endif
843 }
844
845 #ifdef _WIN32
846
847 /*
848 * win32_build_virtual_memory_for_userspace
849 *
850 * This function get all the object making part of the pools and maps
851 * a contiguous virtual memory space for the userspace
852 * It works this way
853 * 1 - allocate a Memory Descriptor List wide as the sum
854 * of the memory needed for the pools
855 * 2 - cycle all the objects in every pool and for every object do
856 *
857 * 2a - cycle all the objects in every pool, get the list
858 * of the physical address descriptors
859 * 2b - calculate the offset in the array of pages descriptor in the
860 * main MDL
861 * 2c - copy the descriptors of the object in the main MDL
862 *
863 * 3 - return the resulting MDL that needs to be mapped in userland
864 *
865 * In this way we will have an MDL that describes all the memory for the
866 * objects in a single object
867 */
868
869 PMDL
870 win32_build_user_vm_map(struct netmap_mem_d* nmd)
871 {
872 u_int memflags, ofs = 0;
873 PMDL mainMdl, tempMdl;
874 uint64_t memsize;
875 int i, j;
876
877 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
878 nm_prerr("memory not finalised yet");
879 return NULL;
880 }
881
882 mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL);
883 if (mainMdl == NULL) {
884 nm_prerr("failed to allocate mdl");
885 return NULL;
886 }
887
888 NMA_LOCK(nmd);
889 for (i = 0; i < NETMAP_POOLS_NR; i++) {
890 struct netmap_obj_pool *p = &nmd->pools[i];
891 int clsz = p->_clustsize;
892 int clobjs = p->_clustentries; /* objects per cluster */
893 int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz);
894 PPFN_NUMBER pSrc, pDst;
895
896 /* each pool has a different cluster size so we need to reallocate */
897 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
898 if (tempMdl == NULL) {
899 NMA_UNLOCK(nmd);
900 nm_prerr("fail to allocate tempMdl");
901 IoFreeMdl(mainMdl);
902 return NULL;
903 }
904 pSrc = MmGetMdlPfnArray(tempMdl);
905 /* create one entry per cluster, the lut[] has one entry per object */
906 for (j = 0; j < p->numclusters; j++, ofs += clsz) {
907 pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)];
908 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
909 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
910 RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */
911 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
912 }
913 IoFreeMdl(tempMdl);
914 }
915 NMA_UNLOCK(nmd);
916 return mainMdl;
917 }
918
919 #endif /* _WIN32 */
920
921 /*
922 * helper function for OS-specific mmap routines (currently only windows).
923 * Given an nmd and a pool index, returns the cluster size and number of clusters.
924 * Returns 0 if memory is finalised and the pool is valid, otherwise 1.
925 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
926 */
927
928 int
929 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
930 {
931 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
932 return 1; /* invalid arguments */
933 // NMA_LOCK_ASSERT(nmd);
934 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
935 *clustsize = *numclusters = 0;
936 return 1; /* not ready yet */
937 }
938 *clustsize = nmd->pools[pool]._clustsize;
939 *numclusters = nmd->pools[pool].numclusters;
940 return 0; /* success */
941 }
942
943 static int
944 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
945 u_int *memflags, nm_memid_t *id)
946 {
947 int error = 0;
948 error = netmap_mem_config(nmd);
949 if (error)
950 goto out;
951 if (size) {
952 if (nmd->flags & NETMAP_MEM_FINALIZED) {
953 *size = nmd->nm_totalsize;
954 } else {
955 int i;
956 *size = 0;
957 for (i = 0; i < NETMAP_POOLS_NR; i++) {
958 struct netmap_obj_pool *p = nmd->pools + i;
959 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize);
960 }
961 }
962 }
963 if (memflags)
964 *memflags = nmd->flags;
965 if (id)
966 *id = nmd->nm_id;
967 out:
968 return error;
969 }
970
971 /*
972 * we store objects by kernel address, need to find the offset
973 * within the pool to export the value to userspace.
974 * Algorithm: scan until we find the cluster, then add the
975 * actual offset in the cluster
976 */
977 static ssize_t
978 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
979 {
980 int i, k = p->_clustentries, n = p->objtotal;
981 ssize_t ofs = 0;
982
983 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
984 const char *base = p->lut[i].vaddr;
985 ssize_t relofs = (const char *) vaddr - base;
986
987 if (relofs < 0 || relofs >= p->_clustsize)
988 continue;
989
990 ofs = ofs + relofs;
991 nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
992 p->name, ofs, i, vaddr);
993 return ofs;
994 }
995 nm_prerr("address %p is not contained inside any cluster (%s)",
996 vaddr, p->name);
997 return 0; /* An error occurred */
998 }
999
1000 /* Helper functions which convert virtual addresses to offsets */
1001 #define netmap_if_offset(n, v) \
1002 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
1003
1004 #define netmap_ring_offset(n, v) \
1005 ((n)->pools[NETMAP_IF_POOL].memtotal + \
1006 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
1007
1008 static ssize_t
1009 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
1010 {
1011 return netmap_if_offset(nmd, addr);
1012 }
1013
1014 /*
1015 * report the index, and use start position as a hint,
1016 * otherwise buffer allocation becomes terribly expensive.
1017 */
1018 static void *
1019 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
1020 {
1021 uint32_t i = 0; /* index in the bitmap */
1022 uint32_t mask, j = 0; /* slot counter */
1023 void *vaddr = NULL;
1024
1025 if (len > p->_objsize) {
1026 nm_prerr("%s request size %d too large", p->name, len);
1027 return NULL;
1028 }
1029
1030 if (p->objfree == 0) {
1031 nm_prerr("no more %s objects", p->name);
1032 return NULL;
1033 }
1034 if (start)
1035 i = *start;
1036
1037 /* termination is guaranteed by p->free, but better check bounds on i */
1038 while (vaddr == NULL && i < p->bitmap_slots) {
1039 uint32_t cur = p->bitmap[i];
1040 if (cur == 0) { /* bitmask is fully used */
1041 i++;
1042 continue;
1043 }
1044 /* locate a slot */
1045 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
1046 ;
1047
1048 p->bitmap[i] &= ~mask; /* mark object as in use */
1049 p->objfree--;
1050
1051 vaddr = p->lut[i * 32 + j].vaddr;
1052 if (index)
1053 *index = i * 32 + j;
1054 }
1055 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1056
1057 if (start)
1058 *start = i;
1059 return vaddr;
1060 }
1061
1062
1063 /*
1064 * free by index, not by address.
1065 * XXX should we also cleanup the content ?
1066 */
1067 static int
1068 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
1069 {
1070 uint32_t *ptr, mask;
1071
1072 if (j >= p->objtotal) {
1073 nm_prerr("invalid index %u, max %u", j, p->objtotal);
1074 return 1;
1075 }
1076 ptr = &p->bitmap[j / 32];
1077 mask = (1 << (j % 32));
1078 if (*ptr & mask) {
1079 nm_prerr("ouch, double free on buffer %d", j);
1080 return 1;
1081 } else {
1082 *ptr |= mask;
1083 p->objfree++;
1084 return 0;
1085 }
1086 }
1087
1088 /*
1089 * free by address. This is slow but is only used for a few
1090 * objects (rings, nifp)
1091 */
1092 static void
1093 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
1094 {
1095 u_int i, j, n = p->numclusters;
1096
1097 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1098 void *base = p->lut[i * p->_clustentries].vaddr;
1099 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1100
1101 /* Given address, is out of the scope of the current cluster.*/
1102 if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1103 continue;
1104
1105 j = j + relofs / p->_objsize;
1106 /* KASSERT(j != 0, ("Cannot free object 0")); */
1107 netmap_obj_free(p, j);
1108 return;
1109 }
1110 nm_prerr("address %p is not contained inside any cluster (%s)",
1111 vaddr, p->name);
1112 }
1113
1114 unsigned
1115 netmap_mem_bufsize(struct netmap_mem_d *nmd)
1116 {
1117 return nmd->pools[NETMAP_BUF_POOL]._objsize;
1118 }
1119
1120 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1121 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1122 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1123 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1124 #define netmap_buf_malloc(n, _pos, _index) \
1125 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1126
1127
1128 #if 0 /* currently unused */
1129 /* Return the index associated to the given packet buffer */
1130 #define netmap_buf_index(n, v) \
1131 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1132 #endif
1133
1134 /*
1135 * allocate extra buffers in a linked list.
1136 * returns the actual number.
1137 */
1138 uint32_t
1139 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
1140 {
1141 struct netmap_mem_d *nmd = na->nm_mem;
1142 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
1143
1144 NMA_LOCK(nmd);
1145
1146 *head = 0; /* default, 'null' index ie empty list */
1147 for (i = 0 ; i < n; i++) {
1148 uint32_t cur = *head; /* save current head */
1149 uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1150 if (p == NULL) {
1151 nm_prerr("no more buffers after %d of %d", i, n);
1152 *head = cur; /* restore */
1153 break;
1154 }
1155 nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
1156 *p = cur; /* link to previous head */
1157 }
1158
1159 NMA_UNLOCK(nmd);
1160
1161 return i;
1162 }
1163
1164 static void
1165 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
1166 {
1167 struct lut_entry *lut = na->na_lut.lut;
1168 struct netmap_mem_d *nmd = na->nm_mem;
1169 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1170 uint32_t i, cur, *buf;
1171
1172 nm_prdis("freeing the extra list");
1173 for (i = 0; head >=2 && head < p->objtotal; i++) {
1174 cur = head;
1175 buf = lut[head].vaddr;
1176 head = *buf;
1177 *buf = 0;
1178 if (netmap_obj_free(p, cur))
1179 break;
1180 }
1181 if (head != 0)
1182 nm_prerr("breaking with head %d", head);
1183 if (netmap_debug & NM_DEBUG_MEM)
1184 nm_prinf("freed %d buffers", i);
1185 }
1186
1187
1188 /* Return nonzero on error */
1189 static int
1190 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1191 {
1192 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1193 u_int i = 0; /* slot counter */
1194 uint32_t pos = 0; /* slot in p->bitmap */
1195 uint32_t index = 0; /* buffer index */
1196
1197 for (i = 0; i < n; i++) {
1198 void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1199 if (vaddr == NULL) {
1200 nm_prerr("no more buffers after %d of %d", i, n);
1201 goto cleanup;
1202 }
1203 slot[i].buf_idx = index;
1204 slot[i].len = p->_objsize;
1205 slot[i].flags = 0;
1206 slot[i].ptr = 0;
1207 }
1208
1209 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1210 return (0);
1211
1212 cleanup:
1213 while (i > 0) {
1214 i--;
1215 netmap_obj_free(p, slot[i].buf_idx);
1216 }
1217 bzero(slot, n * sizeof(slot[0]));
1218 return (ENOMEM);
1219 }
1220
1221 static void
1222 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1223 {
1224 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1225 u_int i;
1226
1227 for (i = 0; i < n; i++) {
1228 slot[i].buf_idx = index;
1229 slot[i].len = p->_objsize;
1230 slot[i].flags = 0;
1231 }
1232 }
1233
1234
1235 static void
1236 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1237 {
1238 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1239
1240 if (i < 2 || i >= p->objtotal) {
1241 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1242 return;
1243 }
1244 netmap_obj_free(p, i);
1245 }
1246
1247
1248 static void
1249 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1250 {
1251 u_int i;
1252
1253 for (i = 0; i < n; i++) {
1254 if (slot[i].buf_idx > 1)
1255 netmap_free_buf(nmd, slot[i].buf_idx);
1256 }
1257 nm_prdis("%s: released some buffers, available: %u",
1258 p->name, p->objfree);
1259 }
1260
1261 static void
1262 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
1263 {
1264
1265 if (p == NULL)
1266 return;
1267 if (p->bitmap)
1268 nm_os_free(p->bitmap);
1269 p->bitmap = NULL;
1270 if (p->invalid_bitmap)
1271 nm_os_free(p->invalid_bitmap);
1272 p->invalid_bitmap = NULL;
1273 if (!p->alloc_done) {
1274 /* allocation was done by somebody else.
1275 * Let them clean up after themselves.
1276 */
1277 return;
1278 }
1279 if (p->lut) {
1280 u_int i;
1281
1282 /*
1283 * Free each cluster allocated in
1284 * netmap_finalize_obj_allocator(). The cluster start
1285 * addresses are stored at multiples of p->_clusterentries
1286 * in the lut.
1287 */
1288 for (i = 0; i < p->objtotal; i += p->_clustentries) {
1289 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
1290 }
1291 nm_free_lut(p->lut, p->objtotal);
1292 }
1293 p->lut = NULL;
1294 p->objtotal = 0;
1295 p->memtotal = 0;
1296 p->numclusters = 0;
1297 p->objfree = 0;
1298 p->alloc_done = 0;
1299 }
1300
1301 /*
1302 * Free all resources related to an allocator.
1303 */
1304 static void
1305 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
1306 {
1307 if (p == NULL)
1308 return;
1309 netmap_reset_obj_allocator(p);
1310 }
1311
1312 /*
1313 * We receive a request for objtotal objects, of size objsize each.
1314 * Internally we may round up both numbers, as we allocate objects
1315 * in small clusters multiple of the page size.
1316 * We need to keep track of objtotal and clustentries,
1317 * as they are needed when freeing memory.
1318 *
1319 * XXX note -- userspace needs the buffers to be contiguous,
1320 * so we cannot afford gaps at the end of a cluster.
1321 */
1322
1323
1324 /* call with NMA_LOCK held */
1325 static int
1326 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
1327 {
1328 int i;
1329 u_int clustsize; /* the cluster size, multiple of page size */
1330 u_int clustentries; /* how many objects per entry */
1331
1332 /* we store the current request, so we can
1333 * detect configuration changes later */
1334 p->r_objtotal = objtotal;
1335 p->r_objsize = objsize;
1336
1337 #define MAX_CLUSTSIZE (1<<22) // 4 MB
1338 #define LINE_ROUND NM_BUF_ALIGN // 64
1339 if (objsize >= MAX_CLUSTSIZE) {
1340 /* we could do it but there is no point */
1341 nm_prerr("unsupported allocation for %d bytes", objsize);
1342 return EINVAL;
1343 }
1344 /* make sure objsize is a multiple of LINE_ROUND */
1345 i = (objsize & (LINE_ROUND - 1));
1346 if (i) {
1347 nm_prinf("aligning object by %d bytes", LINE_ROUND - i);
1348 objsize += LINE_ROUND - i;
1349 }
1350 if (objsize < p->objminsize || objsize > p->objmaxsize) {
1351 nm_prerr("requested objsize %d out of range [%d, %d]",
1352 objsize, p->objminsize, p->objmaxsize);
1353 return EINVAL;
1354 }
1355 if (objtotal < p->nummin || objtotal > p->nummax) {
1356 nm_prerr("requested objtotal %d out of range [%d, %d]",
1357 objtotal, p->nummin, p->nummax);
1358 return EINVAL;
1359 }
1360 /*
1361 * Compute number of objects using a brute-force approach:
1362 * given a max cluster size,
1363 * we try to fill it with objects keeping track of the
1364 * wasted space to the next page boundary.
1365 */
1366 for (clustentries = 0, i = 1;; i++) {
1367 u_int delta, used = i * objsize;
1368 if (used > MAX_CLUSTSIZE)
1369 break;
1370 delta = used % PAGE_SIZE;
1371 if (delta == 0) { // exact solution
1372 clustentries = i;
1373 break;
1374 }
1375 }
1376 /* exact solution not found */
1377 if (clustentries == 0) {
1378 nm_prerr("unsupported allocation for %d bytes", objsize);
1379 return EINVAL;
1380 }
1381 /* compute clustsize */
1382 clustsize = clustentries * objsize;
1383 if (netmap_debug & NM_DEBUG_MEM)
1384 nm_prinf("objsize %d clustsize %d objects %d",
1385 objsize, clustsize, clustentries);
1386
1387 /*
1388 * The number of clusters is n = ceil(objtotal/clustentries)
1389 * objtotal' = n * clustentries
1390 */
1391 p->_clustentries = clustentries;
1392 p->_clustsize = clustsize;
1393 p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1394
1395 /* actual values (may be larger than requested) */
1396 p->_objsize = objsize;
1397 p->_objtotal = p->_numclusters * clustentries;
1398
1399 return 0;
1400 }
1401
1402 /* call with NMA_LOCK held */
1403 static int
1404 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
1405 {
1406 int i; /* must be signed */
1407 size_t n;
1408
1409 if (p->lut) {
1410 /* if the lut is already there we assume that also all the
1411 * clusters have already been allocated, possibly by somebody
1412 * else (e.g., extmem). In the latter case, the alloc_done flag
1413 * will remain at zero, so that we will not attempt to
1414 * deallocate the clusters by ourselves in
1415 * netmap_reset_obj_allocator.
1416 */
1417 return 0;
1418 }
1419
1420 /* optimistically assume we have enough memory */
1421 p->numclusters = p->_numclusters;
1422 p->objtotal = p->_objtotal;
1423 p->alloc_done = 1;
1424
1425 p->lut = nm_alloc_lut(p->objtotal);
1426 if (p->lut == NULL) {
1427 nm_prerr("Unable to create lookup table for '%s'", p->name);
1428 goto clean;
1429 }
1430
1431 /*
1432 * Allocate clusters, init pointers
1433 */
1434
1435 n = p->_clustsize;
1436 for (i = 0; i < (int)p->objtotal;) {
1437 int lim = i + p->_clustentries;
1438 char *clust;
1439
1440 /*
1441 * XXX Note, we only need contigmalloc() for buffers attached
1442 * to native interfaces. In all other cases (nifp, netmap rings
1443 * and even buffers for VALE ports or emulated interfaces) we
1444 * can live with standard malloc, because the hardware will not
1445 * access the pages directly.
1446 */
1447 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
1448 (size_t)0, -1UL, PAGE_SIZE, 0);
1449 if (clust == NULL) {
1450 /*
1451 * If we get here, there is a severe memory shortage,
1452 * so halve the allocated memory to reclaim some.
1453 */
1454 nm_prerr("Unable to create cluster at %d for '%s' allocator",
1455 i, p->name);
1456 if (i < 2) /* nothing to halve */
1457 goto out;
1458 lim = i / 2;
1459 for (i--; i >= lim; i--) {
1460 if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1461 contigfree(p->lut[i].vaddr,
1462 n, M_NETMAP);
1463 p->lut[i].vaddr = NULL;
1464 }
1465 out:
1466 p->objtotal = i;
1467 /* we may have stopped in the middle of a cluster */
1468 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1469 break;
1470 }
1471 /*
1472 * Set lut state for all buffers in the current cluster.
1473 *
1474 * [i, lim) is the set of buffer indexes that cover the
1475 * current cluster.
1476 *
1477 * 'clust' is really the address of the current buffer in
1478 * the current cluster as we index through it with a stride
1479 * of p->_objsize.
1480 */
1481 for (; i < lim; i++, clust += p->_objsize) {
1482 p->lut[i].vaddr = clust;
1483 #if !defined(linux) && !defined(_WIN32)
1484 p->lut[i].paddr = vtophys(clust);
1485 #endif
1486 }
1487 }
1488 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize;
1489 if (netmap_verbose)
1490 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'",
1491 p->numclusters, p->_clustsize >> 10,
1492 p->memtotal >> 10, p->name);
1493
1494 return 0;
1495
1496 clean:
1497 netmap_reset_obj_allocator(p);
1498 return ENOMEM;
1499 }
1500
1501 /* call with lock held */
1502 static int
1503 netmap_mem_params_changed(struct netmap_obj_params* p)
1504 {
1505 int i, rv = 0;
1506
1507 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1508 if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) {
1509 p[i].last_size = p[i].size;
1510 p[i].last_num = p[i].num;
1511 rv = 1;
1512 }
1513 }
1514 return rv;
1515 }
1516
1517 static void
1518 netmap_mem_reset_all(struct netmap_mem_d *nmd)
1519 {
1520 int i;
1521
1522 if (netmap_debug & NM_DEBUG_MEM)
1523 nm_prinf("resetting %p", nmd);
1524 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1525 netmap_reset_obj_allocator(&nmd->pools[i]);
1526 }
1527 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1528 }
1529
1530 static int
1531 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1532 {
1533 int i, lim = p->objtotal;
1534 struct netmap_lut *lut;
1535 if (na == NULL || na->pdev == NULL)
1536 return 0;
1537
1538 lut = &na->na_lut;
1539
1540
1541
1542 #if defined(__FreeBSD__)
1543 /* On FreeBSD mapping and unmapping is performed by the txsync
1544 * and rxsync routine, packet by packet. */
1545 (void)i;
1546 (void)lim;
1547 (void)lut;
1548 #elif defined(_WIN32)
1549 (void)i;
1550 (void)lim;
1551 (void)lut;
1552 nm_prerr("unsupported on Windows");
1553 #else /* linux */
1554 nm_prdis("unmapping and freeing plut for %s", na->name);
1555 if (lut->plut == NULL || na->pdev == NULL)
1556 return 0;
1557 for (i = 0; i < lim; i += p->_clustentries) {
1558 if (lut->plut[i].paddr)
1559 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1560 }
1561 nm_free_plut(lut->plut);
1562 lut->plut = NULL;
1563 #endif /* linux */
1564
1565 return 0;
1566 }
1567
1568 static int
1569 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1570 {
1571 int error = 0;
1572 int i, lim = p->objtotal;
1573 struct netmap_lut *lut = &na->na_lut;
1574
1575 if (na->pdev == NULL)
1576 return 0;
1577
1578 #if defined(__FreeBSD__)
1579 /* On FreeBSD mapping and unmapping is performed by the txsync
1580 * and rxsync routine, packet by packet. */
1581 (void)i;
1582 (void)lim;
1583 (void)lut;
1584 #elif defined(_WIN32)
1585 (void)i;
1586 (void)lim;
1587 (void)lut;
1588 nm_prerr("unsupported on Windows");
1589 #else /* linux */
1590
1591 if (lut->plut != NULL) {
1592 nm_prdis("plut already allocated for %s", na->name);
1593 return 0;
1594 }
1595
1596 nm_prdis("allocating physical lut for %s", na->name);
1597 lut->plut = nm_alloc_plut(lim);
1598 if (lut->plut == NULL) {
1599 nm_prerr("Failed to allocate physical lut for %s", na->name);
1600 return ENOMEM;
1601 }
1602
1603 for (i = 0; i < lim; i += p->_clustentries) {
1604 lut->plut[i].paddr = 0;
1605 }
1606
1607 for (i = 0; i < lim; i += p->_clustentries) {
1608 int j;
1609
1610 if (p->lut[i].vaddr == NULL)
1611 continue;
1612
1613 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1614 p->lut[i].vaddr, p->_clustsize);
1615 if (error) {
1616 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name);
1617 break;
1618 }
1619
1620 for (j = 1; j < p->_clustentries; j++) {
1621 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1622 }
1623 }
1624
1625 if (error)
1626 netmap_mem_unmap(p, na);
1627
1628 #endif /* linux */
1629
1630 return error;
1631 }
1632
1633 static int
1634 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1635 {
1636 int i;
1637 if (nmd->flags & NETMAP_MEM_FINALIZED)
1638 return 0;
1639 nmd->lasterr = 0;
1640 nmd->nm_totalsize = 0;
1641 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1642 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1643 if (nmd->lasterr)
1644 goto error;
1645 nmd->nm_totalsize += nmd->pools[i].memtotal;
1646 }
1647 nmd->nm_totalsize = (nmd->nm_totalsize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
1648 nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1649 if (nmd->lasterr)
1650 goto error;
1651
1652 nmd->flags |= NETMAP_MEM_FINALIZED;
1653
1654 if (netmap_verbose)
1655 nm_prinf("interfaces %zd KB, rings %zd KB, buffers %zd MB",
1656 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1657 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1658 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1659
1660 if (netmap_verbose)
1661 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1662
1663
1664 return 0;
1665 error:
1666 netmap_mem_reset_all(nmd);
1667 return nmd->lasterr;
1668 }
1669
1670 /*
1671 * allocator for private memory
1672 */
1673 static void *
1674 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, int grp_id,
1675 struct netmap_mem_ops *ops, uint64_t memtotal, int *perr)
1676 {
1677 struct netmap_mem_d *d = NULL;
1678 int i, err = 0;
1679 int checksz = 0;
1680
1681 /* if memtotal is !=0 we check that the request fits the available
1682 * memory. Moreover, any surprlus memory is assigned to buffers.
1683 */
1684 checksz = (memtotal > 0);
1685
1686 d = nm_os_malloc(size);
1687 if (d == NULL) {
1688 err = ENOMEM;
1689 goto error;
1690 }
1691
1692 *d = nm_blueprint;
1693 d->ops = ops;
1694
1695 err = nm_mem_assign_id(d, grp_id);
1696 if (err)
1697 goto error_free;
1698 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1699
1700 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1701 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1702 nm_blueprint.pools[i].name,
1703 d->name);
1704 if (checksz) {
1705 uint64_t poolsz = (uint64_t)p[i].num * p[i].size;
1706 if (memtotal < poolsz) {
1707 nm_prerr("%s: request too large", d->pools[i].name);
1708 err = ENOMEM;
1709 goto error_rel_id;
1710 }
1711 memtotal -= poolsz;
1712 }
1713 d->params[i].num = p[i].num;
1714 d->params[i].size = p[i].size;
1715 }
1716 if (checksz && memtotal > 0) {
1717 uint64_t sz = d->params[NETMAP_BUF_POOL].size;
1718 uint64_t n = (memtotal + sz - 1) / sz;
1719
1720 if (n) {
1721 if (netmap_verbose) {
1722 nm_prinf("%s: adding %llu more buffers",
1723 d->pools[NETMAP_BUF_POOL].name,
1724 (unsigned long long)n);
1725 }
1726 d->params[NETMAP_BUF_POOL].num += n;
1727 }
1728 }
1729
1730 NMA_LOCK_INIT(d);
1731
1732 err = netmap_mem_config(d);
1733 if (err)
1734 goto error_destroy_lock;
1735
1736 d->flags &= ~NETMAP_MEM_FINALIZED;
1737
1738 return d;
1739
1740 error_destroy_lock:
1741 NMA_LOCK_DESTROY(d);
1742 error_rel_id:
1743 nm_mem_release_id(d);
1744 error_free:
1745 nm_os_free(d);
1746 error:
1747 if (perr)
1748 *perr = err;
1749 return NULL;
1750 }
1751
1752 struct netmap_mem_d *
1753 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
1754 u_int extra_bufs, u_int npipes, int *perr)
1755 {
1756 struct netmap_mem_d *d = NULL;
1757 struct netmap_obj_params p[NETMAP_POOLS_NR];
1758 int i;
1759 u_int v, maxd;
1760 /* account for the fake host rings */
1761 txr++;
1762 rxr++;
1763
1764 /* copy the min values */
1765 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1766 p[i] = netmap_min_priv_params[i];
1767 }
1768
1769 /* possibly increase them to fit user request */
1770 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1771 if (p[NETMAP_IF_POOL].size < v)
1772 p[NETMAP_IF_POOL].size = v;
1773 v = 2 + 4 * npipes;
1774 if (p[NETMAP_IF_POOL].num < v)
1775 p[NETMAP_IF_POOL].num = v;
1776 maxd = (txd > rxd) ? txd : rxd;
1777 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1778 if (p[NETMAP_RING_POOL].size < v)
1779 p[NETMAP_RING_POOL].size = v;
1780 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1781 * and two rx rings (again, 1 normal and 1 fake host)
1782 */
1783 v = txr + rxr + 8 * npipes;
1784 if (p[NETMAP_RING_POOL].num < v)
1785 p[NETMAP_RING_POOL].num = v;
1786 /* for each pipe we only need the buffers for the 4 "real" rings.
1787 * On the other end, the pipe ring dimension may be different from
1788 * the parent port ring dimension. As a compromise, we allocate twice the
1789 * space actually needed if the pipe rings were the same size as the parent rings
1790 */
1791 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1792 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1793 if (p[NETMAP_BUF_POOL].num < v)
1794 p[NETMAP_BUF_POOL].num = v;
1795
1796 if (netmap_verbose)
1797 nm_prinf("req if %d*%d ring %d*%d buf %d*%d",
1798 p[NETMAP_IF_POOL].num,
1799 p[NETMAP_IF_POOL].size,
1800 p[NETMAP_RING_POOL].num,
1801 p[NETMAP_RING_POOL].size,
1802 p[NETMAP_BUF_POOL].num,
1803 p[NETMAP_BUF_POOL].size);
1804
1805 d = _netmap_mem_private_new(sizeof(*d), p, -1, &netmap_mem_global_ops, 0, perr);
1806
1807 return d;
1808 }
1809
1810 /* Reference iommu allocator - find existing or create new,
1811 * for not hw addapeters fallback to global allocator.
1812 */
1813 struct netmap_mem_d *
1814 netmap_mem_get_iommu(struct netmap_adapter *na)
1815 {
1816 int i, err, grp_id;
1817 struct netmap_mem_d *nmd;
1818
1819 if (na == NULL || na->pdev == NULL)
1820 return netmap_mem_get(&nm_mem);
1821
1822 grp_id = nm_iommu_group_id(na->pdev);
1823
1824 NM_MTX_LOCK(nm_mem_list_lock);
1825 nmd = netmap_last_mem_d;
1826 do {
1827 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_grp == grp_id) {
1828 nmd->refcount++;
1829 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
1830 NM_MTX_UNLOCK(nm_mem_list_lock);
1831 return nmd;
1832 }
1833 nmd = nmd->next;
1834 } while (nmd != netmap_last_mem_d);
1835
1836 nmd = nm_os_malloc(sizeof(*nmd));
1837 if (nmd == NULL)
1838 goto error;
1839
1840 *nmd = nm_mem_blueprint;
1841
1842 err = nm_mem_assign_id_locked(nmd, grp_id);
1843 if (err)
1844 goto error_free;
1845
1846 snprintf(nmd->name, sizeof(nmd->name), "%d", nmd->nm_id);
1847
1848 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1849 snprintf(nmd->pools[i].name, NETMAP_POOL_MAX_NAMSZ, "%s-%s",
1850 nm_mem_blueprint.pools[i].name, nmd->name);
1851 }
1852
1853 NMA_LOCK_INIT(nmd);
1854
1855 NM_MTX_UNLOCK(nm_mem_list_lock);
1856 return nmd;
1857
1858 error_free:
1859 nm_os_free(nmd);
1860 error:
1861 NM_MTX_UNLOCK(nm_mem_list_lock);
1862 return NULL;
1863 }
1864
1865 /* call with lock held */
1866 static int
1867 netmap_mem2_config(struct netmap_mem_d *nmd)
1868 {
1869 int i;
1870
1871 if (!netmap_mem_params_changed(nmd->params))
1872 goto out;
1873
1874 nm_prdis("reconfiguring");
1875
1876 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1877 /* reset previous allocation */
1878 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1879 netmap_reset_obj_allocator(&nmd->pools[i]);
1880 }
1881 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1882 }
1883
1884 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1885 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1886 nmd->params[i].num, nmd->params[i].size);
1887 if (nmd->lasterr)
1888 goto out;
1889 }
1890
1891 out:
1892
1893 return nmd->lasterr;
1894 }
1895
1896 static int
1897 netmap_mem2_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1898 {
1899 if (nmd->flags & NETMAP_MEM_FINALIZED)
1900 goto out;
1901
1902 if (netmap_mem_finalize_all(nmd))
1903 goto out;
1904
1905 nmd->lasterr = 0;
1906
1907 out:
1908 return nmd->lasterr;
1909 }
1910
1911 static void
1912 netmap_mem2_delete(struct netmap_mem_d *nmd)
1913 {
1914 int i;
1915
1916 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1917 netmap_destroy_obj_allocator(&nmd->pools[i]);
1918 }
1919
1920 NMA_LOCK_DESTROY(nmd);
1921 if (nmd != &nm_mem)
1922 nm_os_free(nmd);
1923 }
1924
1925 #ifdef WITH_EXTMEM
1926 /* doubly linekd list of all existing external allocators */
1927 static struct netmap_mem_ext *netmap_mem_ext_list = NULL;
1928 NM_MTX_T nm_mem_ext_list_lock;
1929 #endif /* WITH_EXTMEM */
1930
1931 int
1932 netmap_mem_init(void)
1933 {
1934 nm_mem_blueprint = nm_mem;
1935 NM_MTX_INIT(nm_mem_list_lock);
1936 NMA_LOCK_INIT(&nm_mem);
1937 netmap_mem_get(&nm_mem);
1938 #ifdef WITH_EXTMEM
1939 NM_MTX_INIT(nm_mem_ext_list_lock);
1940 #endif /* WITH_EXTMEM */
1941 return (0);
1942 }
1943
1944 void
1945 netmap_mem_fini(void)
1946 {
1947 netmap_mem_put(&nm_mem);
1948 }
1949
1950 static int
1951 netmap_mem_ring_needed(struct netmap_kring *kring)
1952 {
1953 return kring->ring == NULL &&
1954 (kring->users > 0 ||
1955 (kring->nr_kflags & NKR_NEEDRING));
1956 }
1957
1958 static int
1959 netmap_mem_ring_todelete(struct netmap_kring *kring)
1960 {
1961 return kring->ring != NULL &&
1962 kring->users == 0 &&
1963 !(kring->nr_kflags & NKR_NEEDRING);
1964 }
1965
1966
1967 /* call with NMA_LOCK held *
1968 *
1969 * Allocate netmap rings and buffers for this card
1970 * The rings are contiguous, but have variable size.
1971 * The kring array must follow the layout described
1972 * in netmap_krings_create().
1973 */
1974 static int
1975 netmap_mem2_rings_create(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1976 {
1977 enum txrx t;
1978
1979 for_rx_tx(t) {
1980 u_int i;
1981
1982 for (i = 0; i < netmap_all_rings(na, t); i++) {
1983 struct netmap_kring *kring = NMR(na, t)[i];
1984 struct netmap_ring *ring = kring->ring;
1985 u_int len, ndesc;
1986
1987 if (!netmap_mem_ring_needed(kring)) {
1988 /* unneeded, or already created by somebody else */
1989 if (netmap_debug & NM_DEBUG_MEM)
1990 nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
1991 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1992 continue;
1993 }
1994 if (netmap_debug & NM_DEBUG_MEM)
1995 nm_prinf("creating %s", kring->name);
1996 ndesc = kring->nkr_num_slots;
1997 len = sizeof(struct netmap_ring) +
1998 ndesc * sizeof(struct netmap_slot);
1999 ring = netmap_ring_malloc(nmd, len);
2000 if (ring == NULL) {
2001 nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
2002 goto cleanup;
2003 }
2004 nm_prdis("txring at %p", ring);
2005 kring->ring = ring;
2006 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
2007 *(int64_t *)(uintptr_t)&ring->buf_ofs =
2008 (nmd->pools[NETMAP_IF_POOL].memtotal +
2009 nmd->pools[NETMAP_RING_POOL].memtotal) -
2010 netmap_ring_offset(nmd, ring);
2011
2012 /* copy values from kring */
2013 ring->head = kring->rhead;
2014 ring->cur = kring->rcur;
2015 ring->tail = kring->rtail;
2016 *(uint32_t *)(uintptr_t)&ring->nr_buf_size =
2017 netmap_mem_bufsize(nmd);
2018 nm_prdis("%s h %d c %d t %d", kring->name,
2019 ring->head, ring->cur, ring->tail);
2020 nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
2021 if (!(kring->nr_kflags & NKR_FAKERING)) {
2022 /* this is a real ring */
2023 if (netmap_debug & NM_DEBUG_MEM)
2024 nm_prinf("allocating buffers for %s", kring->name);
2025 if (netmap_new_bufs(nmd, ring->slot, ndesc)) {
2026 nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
2027 goto cleanup;
2028 }
2029 } else {
2030 /* this is a fake ring, set all indices to 0 */
2031 if (netmap_debug & NM_DEBUG_MEM)
2032 nm_prinf("NOT allocating buffers for %s", kring->name);
2033 netmap_mem_set_ring(nmd, ring->slot, ndesc, 0);
2034 }
2035 /* ring info */
2036 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
2037 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
2038 }
2039 }
2040
2041 return 0;
2042
2043 cleanup:
2044 /* we cannot actually cleanup here, since we don't own kring->users
2045 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
2046 * the first or zero-out the second, then call netmap_free_rings()
2047 * to do the cleanup
2048 */
2049
2050 return ENOMEM;
2051 }
2052
2053 static void
2054 netmap_mem2_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2055 {
2056 enum txrx t;
2057
2058 for_rx_tx(t) {
2059 u_int i;
2060 for (i = 0; i < netmap_all_rings(na, t); i++) {
2061 struct netmap_kring *kring = NMR(na, t)[i];
2062 struct netmap_ring *ring = kring->ring;
2063
2064 if (!netmap_mem_ring_todelete(kring)) {
2065 if (netmap_debug & NM_DEBUG_MEM)
2066 nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)",
2067 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
2068 continue;
2069 }
2070 if (netmap_debug & NM_DEBUG_MEM)
2071 nm_prinf("deleting ring %s", kring->name);
2072 if (!(kring->nr_kflags & NKR_FAKERING)) {
2073 nm_prdis("freeing bufs for %s", kring->name);
2074 netmap_free_bufs(nmd, ring->slot, kring->nkr_num_slots);
2075 } else {
2076 nm_prdis("NOT freeing bufs for %s", kring->name);
2077 }
2078 netmap_ring_free(nmd, ring);
2079 kring->ring = NULL;
2080 }
2081 }
2082 }
2083
2084 /* call with NMA_LOCK held */
2085 /*
2086 * Allocate the per-fd structure netmap_if.
2087 *
2088 * We assume that the configuration stored in na
2089 * (number of tx/rx rings and descs) does not change while
2090 * the interface is in netmap mode.
2091 */
2092 static struct netmap_if *
2093 netmap_mem2_if_new(struct netmap_mem_d *nmd,
2094 struct netmap_adapter *na, struct netmap_priv_d *priv)
2095 {
2096 struct netmap_if *nifp;
2097 ssize_t base; /* handy for relative offsets between rings and nifp */
2098 u_int i, len, n[NR_TXRX], ntot;
2099 enum txrx t;
2100
2101 ntot = 0;
2102 for_rx_tx(t) {
2103 /* account for the (eventually fake) host rings */
2104 n[t] = netmap_all_rings(na, t);
2105 ntot += n[t];
2106 }
2107 /*
2108 * the descriptor is followed inline by an array of offsets
2109 * to the tx and rx rings in the shared memory region.
2110 */
2111
2112 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
2113 nifp = netmap_if_malloc(nmd, len);
2114 if (nifp == NULL) {
2115 return NULL;
2116 }
2117
2118 /* initialize base fields -- override const */
2119 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
2120 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
2121 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings =
2122 (na->num_host_tx_rings ? na->num_host_tx_rings : 1);
2123 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings =
2124 (na->num_host_rx_rings ? na->num_host_rx_rings : 1);
2125 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name));
2126
2127 /*
2128 * fill the slots for the rx and tx rings. They contain the offset
2129 * between the ring and nifp, so the information is usable in
2130 * userspace to reach the ring from the nifp.
2131 */
2132 base = netmap_if_offset(nmd, nifp);
2133 for (i = 0; i < n[NR_TX]; i++) {
2134 /* XXX instead of ofs == 0 maybe use the offset of an error
2135 * ring, like we do for buffers? */
2136 ssize_t ofs = 0;
2137
2138 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2139 && i < priv->np_qlast[NR_TX]) {
2140 ofs = netmap_ring_offset(nmd,
2141 na->tx_rings[i]->ring) - base;
2142 }
2143 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2144 }
2145 for (i = 0; i < n[NR_RX]; i++) {
2146 /* XXX instead of ofs == 0 maybe use the offset of an error
2147 * ring, like we do for buffers? */
2148 ssize_t ofs = 0;
2149
2150 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2151 && i < priv->np_qlast[NR_RX]) {
2152 ofs = netmap_ring_offset(nmd,
2153 na->rx_rings[i]->ring) - base;
2154 }
2155 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2156 }
2157
2158 return (nifp);
2159 }
2160
2161 static void
2162 netmap_mem2_if_delete(struct netmap_mem_d *nmd,
2163 struct netmap_adapter *na, struct netmap_if *nifp)
2164 {
2165 if (nifp == NULL)
2166 /* nothing to do */
2167 return;
2168 if (nifp->ni_bufs_head)
2169 netmap_extra_free(na, nifp->ni_bufs_head);
2170 netmap_if_free(nmd, nifp);
2171 }
2172
2173 static void
2174 netmap_mem2_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2175 {
2176
2177 if (netmap_debug & NM_DEBUG_MEM)
2178 nm_prinf("active = %d", nmd->active);
2179
2180 }
2181
2182 struct netmap_mem_ops netmap_mem_global_ops = {
2183 .nmd_get_lut = netmap_mem2_get_lut,
2184 .nmd_get_info = netmap_mem2_get_info,
2185 .nmd_ofstophys = netmap_mem2_ofstophys,
2186 .nmd_config = netmap_mem2_config,
2187 .nmd_finalize = netmap_mem2_finalize,
2188 .nmd_deref = netmap_mem2_deref,
2189 .nmd_delete = netmap_mem2_delete,
2190 .nmd_if_offset = netmap_mem2_if_offset,
2191 .nmd_if_new = netmap_mem2_if_new,
2192 .nmd_if_delete = netmap_mem2_if_delete,
2193 .nmd_rings_create = netmap_mem2_rings_create,
2194 .nmd_rings_delete = netmap_mem2_rings_delete
2195 };
2196
2197 int
2198 netmap_mem_pools_info_get(struct nmreq_pools_info *req,
2199 struct netmap_mem_d *nmd)
2200 {
2201 int ret;
2202
2203 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2204 &req->nr_mem_id);
2205 if (ret) {
2206 return ret;
2207 }
2208
2209 NMA_LOCK(nmd);
2210 req->nr_if_pool_offset = 0;
2211 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2212 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2213
2214 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2215 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2216 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2217
2218 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2219 nmd->pools[NETMAP_RING_POOL].memtotal;
2220 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2221 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2222 NMA_UNLOCK(nmd);
2223
2224 return 0;
2225 }
2226
2227 #ifdef WITH_EXTMEM
2228 struct netmap_mem_ext {
2229 struct netmap_mem_d up;
2230
2231 struct nm_os_extmem *os;
2232 struct netmap_mem_ext *next, *prev;
2233 };
2234
2235 /* call with nm_mem_list_lock held */
2236 static void
2237 netmap_mem_ext_register(struct netmap_mem_ext *e)
2238 {
2239 NM_MTX_LOCK(nm_mem_ext_list_lock);
2240 if (netmap_mem_ext_list)
2241 netmap_mem_ext_list->prev = e;
2242 e->next = netmap_mem_ext_list;
2243 netmap_mem_ext_list = e;
2244 e->prev = NULL;
2245 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2246 }
2247
2248 /* call with nm_mem_list_lock held */
2249 static void
2250 netmap_mem_ext_unregister(struct netmap_mem_ext *e)
2251 {
2252 if (e->prev)
2253 e->prev->next = e->next;
2254 else
2255 netmap_mem_ext_list = e->next;
2256 if (e->next)
2257 e->next->prev = e->prev;
2258 e->prev = e->next = NULL;
2259 }
2260
2261 static struct netmap_mem_ext *
2262 netmap_mem_ext_search(struct nm_os_extmem *os)
2263 {
2264 struct netmap_mem_ext *e;
2265
2266 NM_MTX_LOCK(nm_mem_ext_list_lock);
2267 for (e = netmap_mem_ext_list; e; e = e->next) {
2268 if (nm_os_extmem_isequal(e->os, os)) {
2269 netmap_mem_get(&e->up);
2270 break;
2271 }
2272 }
2273 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2274 return e;
2275 }
2276
2277
2278 static void
2279 netmap_mem_ext_delete(struct netmap_mem_d *d)
2280 {
2281 int i;
2282 struct netmap_mem_ext *e =
2283 (struct netmap_mem_ext *)d;
2284
2285 netmap_mem_ext_unregister(e);
2286
2287 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2288 struct netmap_obj_pool *p = &d->pools[i];
2289
2290 if (p->lut) {
2291 nm_free_lut(p->lut, p->objtotal);
2292 p->lut = NULL;
2293 }
2294 }
2295 if (e->os)
2296 nm_os_extmem_delete(e->os);
2297 netmap_mem2_delete(d);
2298 }
2299
2300 static int
2301 netmap_mem_ext_config(struct netmap_mem_d *nmd)
2302 {
2303 return 0;
2304 }
2305
2306 struct netmap_mem_ops netmap_mem_ext_ops = {
2307 .nmd_get_lut = netmap_mem2_get_lut,
2308 .nmd_get_info = netmap_mem2_get_info,
2309 .nmd_ofstophys = netmap_mem2_ofstophys,
2310 .nmd_config = netmap_mem_ext_config,
2311 .nmd_finalize = netmap_mem2_finalize,
2312 .nmd_deref = netmap_mem2_deref,
2313 .nmd_delete = netmap_mem_ext_delete,
2314 .nmd_if_offset = netmap_mem2_if_offset,
2315 .nmd_if_new = netmap_mem2_if_new,
2316 .nmd_if_delete = netmap_mem2_if_delete,
2317 .nmd_rings_create = netmap_mem2_rings_create,
2318 .nmd_rings_delete = netmap_mem2_rings_delete
2319 };
2320
2321 struct netmap_mem_d *
2322 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
2323 {
2324 int error = 0;
2325 int i, j;
2326 struct netmap_mem_ext *nme;
2327 char *clust;
2328 size_t off;
2329 struct nm_os_extmem *os = NULL;
2330 int nr_pages;
2331
2332 // XXX sanity checks
2333 if (pi->nr_if_pool_objtotal == 0)
2334 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2335 if (pi->nr_if_pool_objsize == 0)
2336 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2337 if (pi->nr_ring_pool_objtotal == 0)
2338 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2339 if (pi->nr_ring_pool_objsize == 0)
2340 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2341 if (pi->nr_buf_pool_objtotal == 0)
2342 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2343 if (pi->nr_buf_pool_objsize == 0)
2344 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2345 if (netmap_verbose & NM_DEBUG_MEM)
2346 nm_prinf("if %d %d ring %d %d buf %d %d",
2347 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2348 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2349 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2350
2351 os = nm_os_extmem_create(usrptr, pi, &error);
2352 if (os == NULL) {
2353 nm_prerr("os extmem creation failed");
2354 goto out;
2355 }
2356
2357 nme = netmap_mem_ext_search(os);
2358 if (nme) {
2359 nm_os_extmem_delete(os);
2360 return &nme->up;
2361 }
2362 if (netmap_verbose & NM_DEBUG_MEM)
2363 nm_prinf("not found, creating new");
2364
2365 nme = _netmap_mem_private_new(sizeof(*nme),
2366
2367 (struct netmap_obj_params[]){
2368 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2369 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2370 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2371 -1,
2372 &netmap_mem_ext_ops,
2373 pi->nr_memsize,
2374 &error);
2375 if (nme == NULL)
2376 goto out_unmap;
2377
2378 nr_pages = nm_os_extmem_nr_pages(os);
2379
2380 /* from now on pages will be released by nme destructor;
2381 * we let res = 0 to prevent release in out_unmap below
2382 */
2383 nme->os = os;
2384 os = NULL; /* pass ownership */
2385
2386 clust = nm_os_extmem_nextpage(nme->os);
2387 off = 0;
2388 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2389 struct netmap_obj_pool *p = &nme->up.pools[i];
2390 struct netmap_obj_params *o = &nme->up.params[i];
2391
2392 p->_objsize = o->size;
2393 p->_clustsize = o->size;
2394 p->_clustentries = 1;
2395
2396 p->lut = nm_alloc_lut(o->num);
2397 if (p->lut == NULL) {
2398 error = ENOMEM;
2399 goto out_delete;
2400 }
2401
2402 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2403 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2404 if (p->invalid_bitmap == NULL) {
2405 error = ENOMEM;
2406 goto out_delete;
2407 }
2408
2409 if (nr_pages == 0) {
2410 p->objtotal = 0;
2411 p->memtotal = 0;
2412 p->objfree = 0;
2413 continue;
2414 }
2415
2416 for (j = 0; j < o->num && nr_pages > 0; j++) {
2417 size_t noff;
2418
2419 p->lut[j].vaddr = clust + off;
2420 #if !defined(linux) && !defined(_WIN32)
2421 p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2422 #endif
2423 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
2424 noff = off + p->_objsize;
2425 if (noff < PAGE_SIZE) {
2426 off = noff;
2427 continue;
2428 }
2429 nm_prdis("too big, recomputing offset...");
2430 while (noff >= PAGE_SIZE) {
2431 char *old_clust = clust;
2432 noff -= PAGE_SIZE;
2433 clust = nm_os_extmem_nextpage(nme->os);
2434 nr_pages--;
2435 nm_prdis("noff %zu page %p nr_pages %d", noff,
2436 page_to_virt(*pages), nr_pages);
2437 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2438 (nr_pages == 0 ||
2439 old_clust + PAGE_SIZE != clust))
2440 {
2441 /* out of space or non contiguous,
2442 * drop this object
2443 * */
2444 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2445 nm_prdis("non contiguous at off %zu, drop", noff);
2446 }
2447 if (nr_pages == 0)
2448 break;
2449 }
2450 off = noff;
2451 }
2452 p->objtotal = j;
2453 p->numclusters = p->objtotal;
2454 p->memtotal = j * (size_t)p->_objsize;
2455 nm_prdis("%d memtotal %zu", j, p->memtotal);
2456 }
2457
2458 netmap_mem_ext_register(nme);
2459
2460 return &nme->up;
2461
2462 out_delete:
2463 netmap_mem_put(&nme->up);
2464 out_unmap:
2465 if (os)
2466 nm_os_extmem_delete(os);
2467 out:
2468 if (perror)
2469 *perror = error;
2470 return NULL;
2471
2472 }
2473 #endif /* WITH_EXTMEM */
2474
2475
2476 #ifdef WITH_PTNETMAP
2477 struct mem_pt_if {
2478 struct mem_pt_if *next;
2479 struct ifnet *ifp;
2480 unsigned int nifp_offset;
2481 };
2482
2483 /* Netmap allocator for ptnetmap guests. */
2484 struct netmap_mem_ptg {
2485 struct netmap_mem_d up;
2486
2487 vm_paddr_t nm_paddr; /* physical address in the guest */
2488 void *nm_addr; /* virtual address in the guest */
2489 struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */
2490 nm_memid_t host_mem_id; /* allocator identifier in the host */
2491 struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */
2492 struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */
2493 };
2494
2495 /* Link a passthrough interface to a passthrough netmap allocator. */
2496 static int
2497 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp,
2498 unsigned int nifp_offset)
2499 {
2500 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2501 struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif));
2502
2503 if (!ptif) {
2504 return ENOMEM;
2505 }
2506
2507 NMA_LOCK(nmd);
2508
2509 ptif->ifp = ifp;
2510 ptif->nifp_offset = nifp_offset;
2511
2512 if (ptnmd->pt_ifs) {
2513 ptif->next = ptnmd->pt_ifs;
2514 }
2515 ptnmd->pt_ifs = ptif;
2516
2517 NMA_UNLOCK(nmd);
2518
2519 nm_prinf("ifp=%s,nifp_offset=%u",
2520 ptif->ifp->if_xname, ptif->nifp_offset);
2521
2522 return 0;
2523 }
2524
2525 /* Called with NMA_LOCK(nmd) held. */
2526 static struct mem_pt_if *
2527 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp)
2528 {
2529 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2530 struct mem_pt_if *curr;
2531
2532 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2533 if (curr->ifp == ifp) {
2534 return curr;
2535 }
2536 }
2537
2538 return NULL;
2539 }
2540
2541 /* Unlink a passthrough interface from a passthrough netmap allocator. */
2542 int
2543 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp)
2544 {
2545 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2546 struct mem_pt_if *prev = NULL;
2547 struct mem_pt_if *curr;
2548 int ret = -1;
2549
2550 NMA_LOCK(nmd);
2551
2552 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2553 if (curr->ifp == ifp) {
2554 if (prev) {
2555 prev->next = curr->next;
2556 } else {
2557 ptnmd->pt_ifs = curr->next;
2558 }
2559 nm_prinf("removed (ifp=%s,nifp_offset=%u)",
2560 curr->ifp->if_xname, curr->nifp_offset);
2561 nm_os_free(curr);
2562 ret = 0;
2563 break;
2564 }
2565 prev = curr;
2566 }
2567
2568 NMA_UNLOCK(nmd);
2569
2570 return ret;
2571 }
2572
2573 static int
2574 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2575 {
2576 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2577
2578 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2579 return EINVAL;
2580 }
2581
2582 *lut = ptnmd->buf_lut;
2583 return 0;
2584 }
2585
2586 static int
2587 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2588 u_int *memflags, uint16_t *id)
2589 {
2590 int error = 0;
2591
2592 error = nmd->ops->nmd_config(nmd);
2593 if (error)
2594 goto out;
2595
2596 if (size)
2597 *size = nmd->nm_totalsize;
2598 if (memflags)
2599 *memflags = nmd->flags;
2600 if (id)
2601 *id = nmd->nm_id;
2602
2603 out:
2604
2605 return error;
2606 }
2607
2608 static vm_paddr_t
2609 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2610 {
2611 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2612 vm_paddr_t paddr;
2613 /* if the offset is valid, just return csb->base_addr + off */
2614 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2615 nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
2616 return paddr;
2617 }
2618
2619 static int
2620 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2621 {
2622 /* nothing to do, we are configured on creation
2623 * and configuration never changes thereafter
2624 */
2625 return 0;
2626 }
2627
2628 static int
2629 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2630 {
2631 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2632 uint64_t mem_size;
2633 uint32_t bufsize;
2634 uint32_t nbuffers;
2635 uint32_t poolofs;
2636 vm_paddr_t paddr;
2637 char *vaddr;
2638 int i;
2639 int error = 0;
2640
2641 if (nmd->flags & NETMAP_MEM_FINALIZED)
2642 goto out;
2643
2644 if (ptnmd->ptn_dev == NULL) {
2645 nm_prerr("ptnetmap memdev not attached");
2646 error = ENOMEM;
2647 goto out;
2648 }
2649 /* Map memory through ptnetmap-memdev BAR. */
2650 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2651 &ptnmd->nm_addr, &mem_size);
2652 if (error)
2653 goto out;
2654
2655 /* Initialize the lut using the information contained in the
2656 * ptnetmap memory device. */
2657 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2658 PTNET_MDEV_IO_BUF_POOL_OBJSZ);
2659 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2660 PTNET_MDEV_IO_BUF_POOL_OBJNUM);
2661
2662 /* allocate the lut */
2663 if (ptnmd->buf_lut.lut == NULL) {
2664 nm_prinf("allocating lut");
2665 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2666 if (ptnmd->buf_lut.lut == NULL) {
2667 nm_prerr("lut allocation failed");
2668 return ENOMEM;
2669 }
2670 }
2671
2672 /* we have physically contiguous memory mapped through PCI BAR */
2673 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2674 PTNET_MDEV_IO_BUF_POOL_OFS);
2675 vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2676 paddr = ptnmd->nm_paddr + poolofs;
2677
2678 for (i = 0; i < nbuffers; i++) {
2679 ptnmd->buf_lut.lut[i].vaddr = vaddr;
2680 vaddr += bufsize;
2681 paddr += bufsize;
2682 }
2683
2684 ptnmd->buf_lut.objtotal = nbuffers;
2685 ptnmd->buf_lut.objsize = bufsize;
2686 nmd->nm_totalsize = mem_size;
2687
2688 /* Initialize these fields as are needed by
2689 * netmap_mem_bufsize().
2690 * XXX please improve this, why do we need this
2691 * replication? maybe we nmd->pools[] should no be
2692 * there for the guest allocator? */
2693 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2694 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2695
2696 nmd->flags |= NETMAP_MEM_FINALIZED;
2697 out:
2698 return error;
2699 }
2700
2701 static void
2702 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2703 {
2704 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2705
2706 if (nmd->active == 1 &&
2707 (nmd->flags & NETMAP_MEM_FINALIZED)) {
2708 nmd->flags &= ~NETMAP_MEM_FINALIZED;
2709 /* unmap ptnetmap-memdev memory */
2710 if (ptnmd->ptn_dev) {
2711 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2712 }
2713 ptnmd->nm_addr = NULL;
2714 ptnmd->nm_paddr = 0;
2715 }
2716 }
2717
2718 static ssize_t
2719 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2720 {
2721 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2722
2723 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2724 }
2725
2726 static void
2727 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2728 {
2729 if (nmd == NULL)
2730 return;
2731 if (netmap_verbose)
2732 nm_prinf("deleting %p", nmd);
2733 if (nmd->active > 0)
2734 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2735 if (netmap_verbose)
2736 nm_prinf("done deleting %p", nmd);
2737 NMA_LOCK_DESTROY(nmd);
2738 nm_os_free(nmd);
2739 }
2740
2741 static struct netmap_if *
2742 netmap_mem_pt_guest_if_new(struct netmap_mem_d *nmd,
2743 struct netmap_adapter *na, struct netmap_priv_d *priv)
2744 {
2745 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2746 struct mem_pt_if *ptif;
2747 struct netmap_if *nifp = NULL;
2748
2749 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2750 if (ptif == NULL) {
2751 nm_prerr("interface %s is not in passthrough", na->name);
2752 goto out;
2753 }
2754
2755 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2756 ptif->nifp_offset);
2757 out:
2758 return nifp;
2759 }
2760
2761 static void
2762 netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd,
2763 struct netmap_adapter *na, struct netmap_if *nifp)
2764 {
2765 struct mem_pt_if *ptif;
2766
2767 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2768 if (ptif == NULL) {
2769 nm_prerr("interface %s is not in passthrough", na->name);
2770 }
2771 }
2772
2773 static int
2774 netmap_mem_pt_guest_rings_create(struct netmap_mem_d *nmd,
2775 struct netmap_adapter *na)
2776 {
2777 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2778 struct mem_pt_if *ptif;
2779 struct netmap_if *nifp;
2780 int i, error = -1;
2781
2782 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2783 if (ptif == NULL) {
2784 nm_prerr("interface %s is not in passthrough", na->name);
2785 goto out;
2786 }
2787
2788
2789 /* point each kring to the corresponding backend ring */
2790 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2791 for (i = 0; i < netmap_all_rings(na, NR_TX); i++) {
2792 struct netmap_kring *kring = na->tx_rings[i];
2793 if (kring->ring)
2794 continue;
2795 kring->ring = (struct netmap_ring *)
2796 ((char *)nifp + nifp->ring_ofs[i]);
2797 }
2798 for (i = 0; i < netmap_all_rings(na, NR_RX); i++) {
2799 struct netmap_kring *kring = na->rx_rings[i];
2800 if (kring->ring)
2801 continue;
2802 kring->ring = (struct netmap_ring *)
2803 ((char *)nifp +
2804 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]);
2805 }
2806
2807 error = 0;
2808 out:
2809 return error;
2810 }
2811
2812 static void
2813 netmap_mem_pt_guest_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2814 {
2815 #if 0
2816 enum txrx t;
2817
2818 for_rx_tx(t) {
2819 u_int i;
2820 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
2821 struct netmap_kring *kring = &NMR(na, t)[i];
2822
2823 kring->ring = NULL;
2824 }
2825 }
2826 #endif
2827 (void)nmd;
2828 (void)na;
2829 }
2830
2831 static struct netmap_mem_ops netmap_mem_pt_guest_ops = {
2832 .nmd_get_lut = netmap_mem_pt_guest_get_lut,
2833 .nmd_get_info = netmap_mem_pt_guest_get_info,
2834 .nmd_ofstophys = netmap_mem_pt_guest_ofstophys,
2835 .nmd_config = netmap_mem_pt_guest_config,
2836 .nmd_finalize = netmap_mem_pt_guest_finalize,
2837 .nmd_deref = netmap_mem_pt_guest_deref,
2838 .nmd_if_offset = netmap_mem_pt_guest_if_offset,
2839 .nmd_delete = netmap_mem_pt_guest_delete,
2840 .nmd_if_new = netmap_mem_pt_guest_if_new,
2841 .nmd_if_delete = netmap_mem_pt_guest_if_delete,
2842 .nmd_rings_create = netmap_mem_pt_guest_rings_create,
2843 .nmd_rings_delete = netmap_mem_pt_guest_rings_delete
2844 };
2845
2846 /* Called with nm_mem_list_lock held. */
2847 static struct netmap_mem_d *
2848 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)
2849 {
2850 struct netmap_mem_d *mem = NULL;
2851 struct netmap_mem_d *scan = netmap_last_mem_d;
2852
2853 do {
2854 /* find ptnetmap allocator through host ID */
2855 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2856 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2857 mem = scan;
2858 mem->refcount++;
2859 NM_DBG_REFC(mem, __FUNCTION__, __LINE__);
2860 break;
2861 }
2862 scan = scan->next;
2863 } while (scan != netmap_last_mem_d);
2864
2865 return mem;
2866 }
2867
2868 /* Called with nm_mem_list_lock held. */
2869 static struct netmap_mem_d *
2870 netmap_mem_pt_guest_create(nm_memid_t mem_id)
2871 {
2872 struct netmap_mem_ptg *ptnmd;
2873 int err = 0;
2874
2875 ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg));
2876 if (ptnmd == NULL) {
2877 err = ENOMEM;
2878 goto error;
2879 }
2880
2881 ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2882 ptnmd->host_mem_id = mem_id;
2883 ptnmd->pt_ifs = NULL;
2884
2885 /* Assign new id in the guest (We have the lock) */
2886 err = nm_mem_assign_id_locked(&ptnmd->up, -1);
2887 if (err)
2888 goto error;
2889
2890 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2891 ptnmd->up.flags |= NETMAP_MEM_IO;
2892
2893 NMA_LOCK_INIT(&ptnmd->up);
2894
2895 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2896
2897
2898 return &ptnmd->up;
2899 error:
2900 netmap_mem_pt_guest_delete(&ptnmd->up);
2901 return NULL;
2902 }
2903
2904 /*
2905 * find host id in guest allocators and create guest allocator
2906 * if it is not there
2907 */
2908 static struct netmap_mem_d *
2909 netmap_mem_pt_guest_get(nm_memid_t mem_id)
2910 {
2911 struct netmap_mem_d *nmd;
2912
2913 NM_MTX_LOCK(nm_mem_list_lock);
2914 nmd = netmap_mem_pt_guest_find_memid(mem_id);
2915 if (nmd == NULL) {
2916 nmd = netmap_mem_pt_guest_create(mem_id);
2917 }
2918 NM_MTX_UNLOCK(nm_mem_list_lock);
2919
2920 return nmd;
2921 }
2922
2923 /*
2924 * The guest allocator can be created by ptnetmap_memdev (during the device
2925 * attach) or by ptnetmap device (ptnet), during the netmap_attach.
2926 *
2927 * The order is not important (we have different order in LINUX and FreeBSD).
2928 * The first one, creates the device, and the second one simply attaches it.
2929 */
2930
2931 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in
2932 * the guest */
2933 struct netmap_mem_d *
2934 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id)
2935 {
2936 struct netmap_mem_d *nmd;
2937 struct netmap_mem_ptg *ptnmd;
2938
2939 nmd = netmap_mem_pt_guest_get(mem_id);
2940
2941 /* assign this device to the guest allocator */
2942 if (nmd) {
2943 ptnmd = (struct netmap_mem_ptg *)nmd;
2944 ptnmd->ptn_dev = ptn_dev;
2945 }
2946
2947 return nmd;
2948 }
2949
2950 /* Called when ptnet device is attaching */
2951 struct netmap_mem_d *
2952 netmap_mem_pt_guest_new(struct ifnet *ifp,
2953 unsigned int nifp_offset,
2954 unsigned int memid)
2955 {
2956 struct netmap_mem_d *nmd;
2957
2958 if (ifp == NULL) {
2959 return NULL;
2960 }
2961
2962 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2963
2964 if (nmd) {
2965 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2966 }
2967
2968 return nmd;
2969 }
2970
2971 #endif /* WITH_PTNETMAP */
Cache object: 690c266e8e8960dfed4be8b2489693c7
|