1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /* $FreeBSD$ */
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31
32 #include <sys/param.h>
33 #include <sys/module.h>
34 #include <sys/errno.h>
35 #include <sys/eventhandler.h>
36 #include <sys/jail.h>
37 #include <sys/poll.h> /* POLLIN, POLLOUT */
38 #include <sys/kernel.h> /* types used in module initialization */
39 #include <sys/conf.h> /* DEV_MODULE_ORDERED */
40 #include <sys/endian.h>
41 #include <sys/syscallsubr.h> /* kern_ioctl() */
42
43 #include <sys/rwlock.h>
44
45 #include <vm/vm.h> /* vtophys */
46 #include <vm/pmap.h> /* vtophys */
47 #include <vm/vm_param.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pager.h>
51 #include <vm/uma.h>
52
53
54 #include <sys/malloc.h>
55 #include <sys/socket.h> /* sockaddrs */
56 #include <sys/selinfo.h>
57 #include <sys/kthread.h> /* kthread_add() */
58 #include <sys/proc.h> /* PROC_LOCK() */
59 #include <sys/unistd.h> /* RFNOWAIT */
60 #include <sys/sched.h> /* sched_bind() */
61 #include <sys/smp.h> /* mp_maxid */
62 #include <sys/taskqueue.h> /* taskqueue_enqueue(), taskqueue_create(), ... */
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/if_types.h> /* IFT_ETHER */
66 #include <net/ethernet.h> /* ether_ifdetach */
67 #include <net/if_dl.h> /* LLADDR */
68 #include <machine/bus.h> /* bus_dmamap_* */
69 #include <netinet/in.h> /* in6_cksum_pseudo() */
70 #include <machine/in_cksum.h> /* in_pseudo(), in_cksum_hdr() */
71
72 #include <net/netmap.h>
73 #include <dev/netmap/netmap_kern.h>
74 #include <net/netmap_virt.h>
75 #include <dev/netmap/netmap_mem2.h>
76
77
78 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
79
80 static void
81 nm_kqueue_notify(void *opaque, int pending)
82 {
83 struct nm_selinfo *si = opaque;
84
85 /* We use a non-zero hint to distinguish this notification call
86 * from the call done in kqueue_scan(), which uses hint=0.
87 */
88 KNOTE_UNLOCKED(&si->si.si_note, /*hint=*/0x100);
89 }
90
91 int nm_os_selinfo_init(NM_SELINFO_T *si, const char *name) {
92 int err;
93
94 TASK_INIT(&si->ntfytask, 0, nm_kqueue_notify, si);
95 si->ntfytq = taskqueue_create(name, M_NOWAIT,
96 taskqueue_thread_enqueue, &si->ntfytq);
97 if (si->ntfytq == NULL)
98 return -ENOMEM;
99 err = taskqueue_start_threads(&si->ntfytq, 1, PI_NET, "tq %s", name);
100 if (err) {
101 taskqueue_free(si->ntfytq);
102 si->ntfytq = NULL;
103 return err;
104 }
105
106 snprintf(si->mtxname, sizeof(si->mtxname), "nmkl%s", name);
107 mtx_init(&si->m, si->mtxname, NULL, MTX_DEF);
108 knlist_init_mtx(&si->si.si_note, &si->m);
109 si->kqueue_users = 0;
110
111 return (0);
112 }
113
114 void
115 nm_os_selinfo_uninit(NM_SELINFO_T *si)
116 {
117 if (si->ntfytq == NULL) {
118 return; /* si was not initialized */
119 }
120 taskqueue_drain(si->ntfytq, &si->ntfytask);
121 taskqueue_free(si->ntfytq);
122 si->ntfytq = NULL;
123 knlist_delete(&si->si.si_note, curthread, /*islocked=*/0);
124 knlist_destroy(&si->si.si_note);
125 /* now we don't need the mutex anymore */
126 mtx_destroy(&si->m);
127 }
128
129 void *
130 nm_os_malloc(size_t size)
131 {
132 return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
133 }
134
135 void *
136 nm_os_realloc(void *addr, size_t new_size, size_t old_size __unused)
137 {
138 return realloc(addr, new_size, M_DEVBUF, M_NOWAIT | M_ZERO);
139 }
140
141 void
142 nm_os_free(void *addr)
143 {
144 free(addr, M_DEVBUF);
145 }
146
147 void
148 nm_os_ifnet_lock(void)
149 {
150 IFNET_RLOCK();
151 }
152
153 void
154 nm_os_ifnet_unlock(void)
155 {
156 IFNET_RUNLOCK();
157 }
158
159 static int netmap_use_count = 0;
160
161 void
162 nm_os_get_module(void)
163 {
164 netmap_use_count++;
165 }
166
167 void
168 nm_os_put_module(void)
169 {
170 netmap_use_count--;
171 }
172
173 static void
174 netmap_ifnet_arrival_handler(void *arg __unused, struct ifnet *ifp)
175 {
176 netmap_undo_zombie(ifp);
177 }
178
179 static void
180 netmap_ifnet_departure_handler(void *arg __unused, struct ifnet *ifp)
181 {
182 netmap_make_zombie(ifp);
183 }
184
185 static eventhandler_tag nm_ifnet_ah_tag;
186 static eventhandler_tag nm_ifnet_dh_tag;
187
188 int
189 nm_os_ifnet_init(void)
190 {
191 nm_ifnet_ah_tag =
192 EVENTHANDLER_REGISTER(ifnet_arrival_event,
193 netmap_ifnet_arrival_handler,
194 NULL, EVENTHANDLER_PRI_ANY);
195 nm_ifnet_dh_tag =
196 EVENTHANDLER_REGISTER(ifnet_departure_event,
197 netmap_ifnet_departure_handler,
198 NULL, EVENTHANDLER_PRI_ANY);
199 return 0;
200 }
201
202 void
203 nm_os_ifnet_fini(void)
204 {
205 EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
206 nm_ifnet_ah_tag);
207 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
208 nm_ifnet_dh_tag);
209 }
210
211 unsigned
212 nm_os_ifnet_mtu(struct ifnet *ifp)
213 {
214 return ifp->if_mtu;
215 }
216
217 rawsum_t
218 nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum)
219 {
220 /* TODO XXX please use the FreeBSD implementation for this. */
221 uint16_t *words = (uint16_t *)data;
222 int nw = len / 2;
223 int i;
224
225 for (i = 0; i < nw; i++)
226 cur_sum += be16toh(words[i]);
227
228 if (len & 1)
229 cur_sum += (data[len-1] << 8);
230
231 return cur_sum;
232 }
233
234 /* Fold a raw checksum: 'cur_sum' is in host byte order, while the
235 * return value is in network byte order.
236 */
237 uint16_t
238 nm_os_csum_fold(rawsum_t cur_sum)
239 {
240 /* TODO XXX please use the FreeBSD implementation for this. */
241 while (cur_sum >> 16)
242 cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16);
243
244 return htobe16((~cur_sum) & 0xFFFF);
245 }
246
247 uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph)
248 {
249 #if 0
250 return in_cksum_hdr((void *)iph);
251 #else
252 return nm_os_csum_fold(nm_os_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0));
253 #endif
254 }
255
256 void
257 nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
258 size_t datalen, uint16_t *check)
259 {
260 #ifdef INET
261 uint16_t pseudolen = datalen + iph->protocol;
262
263 /* Compute and insert the pseudo-header checksum. */
264 *check = in_pseudo(iph->saddr, iph->daddr,
265 htobe16(pseudolen));
266 /* Compute the checksum on TCP/UDP header + payload
267 * (includes the pseudo-header).
268 */
269 *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0));
270 #else
271 static int notsupported = 0;
272 if (!notsupported) {
273 notsupported = 1;
274 nm_prerr("inet4 segmentation not supported");
275 }
276 #endif
277 }
278
279 void
280 nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
281 size_t datalen, uint16_t *check)
282 {
283 #ifdef INET6
284 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0);
285 *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0));
286 #else
287 static int notsupported = 0;
288 if (!notsupported) {
289 notsupported = 1;
290 nm_prerr("inet6 segmentation not supported");
291 }
292 #endif
293 }
294
295 /* on FreeBSD we send up one packet at a time */
296 void *
297 nm_os_send_up(struct ifnet *ifp, struct mbuf *m, struct mbuf *prev)
298 {
299 NA(ifp)->if_input(ifp, m);
300 return NULL;
301 }
302
303 int
304 nm_os_mbuf_has_csum_offld(struct mbuf *m)
305 {
306 return m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_SCTP |
307 CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |
308 CSUM_SCTP_IPV6);
309 }
310
311 int
312 nm_os_mbuf_has_seg_offld(struct mbuf *m)
313 {
314 return m->m_pkthdr.csum_flags & CSUM_TSO;
315 }
316
317 static void
318 freebsd_generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
319 {
320 int stolen;
321
322 if (unlikely(!NM_NA_VALID(ifp))) {
323 nm_prlim(1, "Warning: RX packet intercepted, but no"
324 " emulated adapter");
325 return;
326 }
327
328 stolen = generic_rx_handler(ifp, m);
329 if (!stolen) {
330 struct netmap_generic_adapter *gna =
331 (struct netmap_generic_adapter *)NA(ifp);
332 gna->save_if_input(ifp, m);
333 }
334 }
335
336 /*
337 * Intercept the rx routine in the standard device driver.
338 * Second argument is non-zero to intercept, 0 to restore
339 */
340 int
341 nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept)
342 {
343 struct netmap_adapter *na = &gna->up.up;
344 struct ifnet *ifp = na->ifp;
345 int ret = 0;
346
347 nm_os_ifnet_lock();
348 if (intercept) {
349 if (gna->save_if_input) {
350 nm_prerr("RX on %s already intercepted", na->name);
351 ret = EBUSY; /* already set */
352 goto out;
353 }
354
355 ifp->if_capenable |= IFCAP_NETMAP;
356 gna->save_if_input = ifp->if_input;
357 ifp->if_input = freebsd_generic_rx_handler;
358 } else {
359 if (!gna->save_if_input) {
360 nm_prerr("Failed to undo RX intercept on %s",
361 na->name);
362 ret = EINVAL; /* not saved */
363 goto out;
364 }
365
366 ifp->if_capenable &= ~IFCAP_NETMAP;
367 ifp->if_input = gna->save_if_input;
368 gna->save_if_input = NULL;
369 }
370 out:
371 nm_os_ifnet_unlock();
372
373 return ret;
374 }
375
376
377 /*
378 * Intercept the packet steering routine in the tx path,
379 * so that we can decide which queue is used for an mbuf.
380 * Second argument is non-zero to intercept, 0 to restore.
381 * On freebsd we just intercept if_transmit.
382 */
383 int
384 nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept)
385 {
386 struct netmap_adapter *na = &gna->up.up;
387 struct ifnet *ifp = netmap_generic_getifp(gna);
388
389 nm_os_ifnet_lock();
390 if (intercept) {
391 na->if_transmit = ifp->if_transmit;
392 ifp->if_transmit = netmap_transmit;
393 } else {
394 ifp->if_transmit = na->if_transmit;
395 }
396 nm_os_ifnet_unlock();
397
398 return 0;
399 }
400
401
402 /*
403 * Transmit routine used by generic_netmap_txsync(). Returns 0 on success
404 * and non-zero on error (which may be packet drops or other errors).
405 * addr and len identify the netmap buffer, m is the (preallocated)
406 * mbuf to use for transmissions.
407 *
408 * We should add a reference to the mbuf so the m_freem() at the end
409 * of the transmission does not consume resources.
410 *
411 * On FreeBSD, and on multiqueue cards, we can force the queue using
412 * if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
413 * i = m->m_pkthdr.flowid % adapter->num_queues;
414 * else
415 * i = curcpu % adapter->num_queues;
416 *
417 */
418 int
419 nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
420 {
421 int ret;
422 u_int len = a->len;
423 struct ifnet *ifp = a->ifp;
424 struct mbuf *m = a->m;
425
426 /* Link the external storage to
427 * the netmap buffer, so that no copy is necessary. */
428 m->m_ext.ext_buf = m->m_data = a->addr;
429 m->m_ext.ext_size = len;
430
431 m->m_flags |= M_PKTHDR;
432 m->m_len = m->m_pkthdr.len = len;
433
434 /* mbuf refcnt is not contended, no need to use atomic
435 * (a memory barrier is enough). */
436 SET_MBUF_REFCNT(m, 2);
437 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
438 m->m_pkthdr.flowid = a->ring_nr;
439 m->m_pkthdr.rcvif = ifp; /* used for tx notification */
440 CURVNET_SET(ifp->if_vnet);
441 ret = NA(ifp)->if_transmit(ifp, m);
442 CURVNET_RESTORE();
443 return ret ? -1 : 0;
444 }
445
446
447 struct netmap_adapter *
448 netmap_getna(if_t ifp)
449 {
450 return (NA((struct ifnet *)ifp));
451 }
452
453 /*
454 * The following two functions are empty until we have a generic
455 * way to extract the info from the ifp
456 */
457 int
458 nm_os_generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx)
459 {
460 return 0;
461 }
462
463
464 void
465 nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq)
466 {
467 unsigned num_rings = netmap_generic_rings ? netmap_generic_rings : 1;
468
469 *txq = num_rings;
470 *rxq = num_rings;
471 }
472
473 void
474 nm_os_generic_set_features(struct netmap_generic_adapter *gna)
475 {
476
477 gna->rxsg = 1; /* Supported through m_copydata. */
478 gna->txqdisc = 0; /* Not supported. */
479 }
480
481 void
482 nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na)
483 {
484 mit->mit_pending = 0;
485 mit->mit_ring_idx = idx;
486 mit->mit_na = na;
487 }
488
489
490 void
491 nm_os_mitigation_start(struct nm_generic_mit *mit)
492 {
493 }
494
495
496 void
497 nm_os_mitigation_restart(struct nm_generic_mit *mit)
498 {
499 }
500
501
502 int
503 nm_os_mitigation_active(struct nm_generic_mit *mit)
504 {
505
506 return 0;
507 }
508
509
510 void
511 nm_os_mitigation_cleanup(struct nm_generic_mit *mit)
512 {
513 }
514
515 static int
516 nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr)
517 {
518
519 return EINVAL;
520 }
521
522 static void
523 nm_vi_start(struct ifnet *ifp)
524 {
525 panic("nm_vi_start() must not be called");
526 }
527
528 /*
529 * Index manager of persistent virtual interfaces.
530 * It is used to decide the lowest byte of the MAC address.
531 * We use the same algorithm with management of bridge port index.
532 */
533 #define NM_VI_MAX 255
534 static struct {
535 uint8_t index[NM_VI_MAX]; /* XXX just for a reasonable number */
536 uint8_t active;
537 struct mtx lock;
538 } nm_vi_indices;
539
540 void
541 nm_os_vi_init_index(void)
542 {
543 int i;
544 for (i = 0; i < NM_VI_MAX; i++)
545 nm_vi_indices.index[i] = i;
546 nm_vi_indices.active = 0;
547 mtx_init(&nm_vi_indices.lock, "nm_vi_indices_lock", NULL, MTX_DEF);
548 }
549
550 /* return -1 if no index available */
551 static int
552 nm_vi_get_index(void)
553 {
554 int ret;
555
556 mtx_lock(&nm_vi_indices.lock);
557 ret = nm_vi_indices.active == NM_VI_MAX ? -1 :
558 nm_vi_indices.index[nm_vi_indices.active++];
559 mtx_unlock(&nm_vi_indices.lock);
560 return ret;
561 }
562
563 static void
564 nm_vi_free_index(uint8_t val)
565 {
566 int i, lim;
567
568 mtx_lock(&nm_vi_indices.lock);
569 lim = nm_vi_indices.active;
570 for (i = 0; i < lim; i++) {
571 if (nm_vi_indices.index[i] == val) {
572 /* swap index[lim-1] and j */
573 int tmp = nm_vi_indices.index[lim-1];
574 nm_vi_indices.index[lim-1] = val;
575 nm_vi_indices.index[i] = tmp;
576 nm_vi_indices.active--;
577 break;
578 }
579 }
580 if (lim == nm_vi_indices.active)
581 nm_prerr("Index %u not found", val);
582 mtx_unlock(&nm_vi_indices.lock);
583 }
584 #undef NM_VI_MAX
585
586 /*
587 * Implementation of a netmap-capable virtual interface that
588 * registered to the system.
589 * It is based on if_tap.c and ip_fw_log.c in FreeBSD 9.
590 *
591 * Note: Linux sets refcount to 0 on allocation of net_device,
592 * then increments it on registration to the system.
593 * FreeBSD sets refcount to 1 on if_alloc(), and does not
594 * increment this refcount on if_attach().
595 */
596 int
597 nm_os_vi_persist(const char *name, struct ifnet **ret)
598 {
599 struct ifnet *ifp;
600 u_short macaddr_hi;
601 uint32_t macaddr_mid;
602 u_char eaddr[6];
603 int unit = nm_vi_get_index(); /* just to decide MAC address */
604
605 if (unit < 0)
606 return EBUSY;
607 /*
608 * We use the same MAC address generation method with tap
609 * except for the highest octet is 00:be instead of 00:bd
610 */
611 macaddr_hi = htons(0x00be); /* XXX tap + 1 */
612 macaddr_mid = (uint32_t) ticks;
613 bcopy(&macaddr_hi, eaddr, sizeof(short));
614 bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
615 eaddr[5] = (uint8_t)unit;
616
617 ifp = if_alloc(IFT_ETHER);
618 if (ifp == NULL) {
619 nm_prerr("if_alloc failed");
620 return ENOMEM;
621 }
622 if_initname(ifp, name, IF_DUNIT_NONE);
623 ifp->if_mtu = 65536;
624 ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
625 ifp->if_init = (void *)nm_vi_dummy;
626 ifp->if_ioctl = nm_vi_dummy;
627 ifp->if_start = nm_vi_start;
628 ifp->if_mtu = ETHERMTU;
629 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
630 ifp->if_capabilities |= IFCAP_LINKSTATE;
631 ifp->if_capenable |= IFCAP_LINKSTATE;
632
633 ether_ifattach(ifp, eaddr);
634 *ret = ifp;
635 return 0;
636 }
637
638 /* unregister from the system and drop the final refcount */
639 void
640 nm_os_vi_detach(struct ifnet *ifp)
641 {
642 nm_vi_free_index(((char *)IF_LLADDR(ifp))[5]);
643 ether_ifdetach(ifp);
644 if_free(ifp);
645 }
646
647 #ifdef WITH_EXTMEM
648 #include <vm/vm_map.h>
649 #include <vm/vm_extern.h>
650 #include <vm/vm_kern.h>
651 struct nm_os_extmem {
652 vm_object_t obj;
653 vm_offset_t kva;
654 vm_offset_t size;
655 uintptr_t scan;
656 };
657
658 void
659 nm_os_extmem_delete(struct nm_os_extmem *e)
660 {
661 nm_prinf("freeing %zx bytes", (size_t)e->size);
662 vm_map_remove(kernel_map, e->kva, e->kva + e->size);
663 nm_os_free(e);
664 }
665
666 char *
667 nm_os_extmem_nextpage(struct nm_os_extmem *e)
668 {
669 char *rv = NULL;
670 if (e->scan < e->kva + e->size) {
671 rv = (char *)e->scan;
672 e->scan += PAGE_SIZE;
673 }
674 return rv;
675 }
676
677 int
678 nm_os_extmem_isequal(struct nm_os_extmem *e1, struct nm_os_extmem *e2)
679 {
680 return (e1->obj == e2->obj);
681 }
682
683 int
684 nm_os_extmem_nr_pages(struct nm_os_extmem *e)
685 {
686 return e->size >> PAGE_SHIFT;
687 }
688
689 struct nm_os_extmem *
690 nm_os_extmem_create(unsigned long p, struct nmreq_pools_info *pi, int *perror)
691 {
692 vm_map_t map;
693 vm_map_entry_t entry;
694 vm_object_t obj;
695 vm_prot_t prot;
696 vm_pindex_t index;
697 boolean_t wired;
698 struct nm_os_extmem *e = NULL;
699 int rv, error = 0;
700
701 e = nm_os_malloc(sizeof(*e));
702 if (e == NULL) {
703 error = ENOMEM;
704 goto out;
705 }
706
707 map = &curthread->td_proc->p_vmspace->vm_map;
708 rv = vm_map_lookup(&map, p, VM_PROT_RW, &entry,
709 &obj, &index, &prot, &wired);
710 if (rv != KERN_SUCCESS) {
711 nm_prerr("address %lx not found", p);
712 error = vm_mmap_to_errno(rv);
713 goto out_free;
714 }
715 vm_object_reference(obj);
716
717 /* check that we are given the whole vm_object ? */
718 vm_map_lookup_done(map, entry);
719
720 e->obj = obj;
721 /* Wire the memory and add the vm_object to the kernel map,
722 * to make sure that it is not freed even if all the processes
723 * that are mmap()ing should munmap() it.
724 */
725 e->kva = vm_map_min(kernel_map);
726 e->size = obj->size << PAGE_SHIFT;
727 rv = vm_map_find(kernel_map, obj, 0, &e->kva, e->size, 0,
728 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
729 VM_PROT_READ | VM_PROT_WRITE, 0);
730 if (rv != KERN_SUCCESS) {
731 nm_prerr("vm_map_find(%zx) failed", (size_t)e->size);
732 error = vm_mmap_to_errno(rv);
733 goto out_rel;
734 }
735 rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size,
736 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
737 if (rv != KERN_SUCCESS) {
738 nm_prerr("vm_map_wire failed");
739 error = vm_mmap_to_errno(rv);
740 goto out_rem;
741 }
742
743 e->scan = e->kva;
744
745 return e;
746
747 out_rem:
748 vm_map_remove(kernel_map, e->kva, e->kva + e->size);
749 out_rel:
750 vm_object_deallocate(e->obj);
751 e->obj = NULL;
752 out_free:
753 nm_os_free(e);
754 out:
755 if (perror)
756 *perror = error;
757 return NULL;
758 }
759 #endif /* WITH_EXTMEM */
760
761 /* ================== PTNETMAP GUEST SUPPORT ==================== */
762
763 #ifdef WITH_PTNETMAP
764 #include <sys/bus.h>
765 #include <sys/rman.h>
766 #include <machine/bus.h> /* bus_dmamap_* */
767 #include <machine/resource.h>
768 #include <dev/pci/pcivar.h>
769 #include <dev/pci/pcireg.h>
770 /*
771 * ptnetmap memory device (memdev) for freebsd guest,
772 * ssed to expose host netmap memory to the guest through a PCI BAR.
773 */
774
775 /*
776 * ptnetmap memdev private data structure
777 */
778 struct ptnetmap_memdev {
779 device_t dev;
780 struct resource *pci_io;
781 struct resource *pci_mem;
782 struct netmap_mem_d *nm_mem;
783 };
784
785 static int ptn_memdev_probe(device_t);
786 static int ptn_memdev_attach(device_t);
787 static int ptn_memdev_detach(device_t);
788 static int ptn_memdev_shutdown(device_t);
789
790 static device_method_t ptn_memdev_methods[] = {
791 DEVMETHOD(device_probe, ptn_memdev_probe),
792 DEVMETHOD(device_attach, ptn_memdev_attach),
793 DEVMETHOD(device_detach, ptn_memdev_detach),
794 DEVMETHOD(device_shutdown, ptn_memdev_shutdown),
795 DEVMETHOD_END
796 };
797
798 static driver_t ptn_memdev_driver = {
799 PTNETMAP_MEMDEV_NAME,
800 ptn_memdev_methods,
801 sizeof(struct ptnetmap_memdev),
802 };
803
804 /* We use (SI_ORDER_MIDDLE+1) here, see DEV_MODULE_ORDERED() invocation
805 * below. */
806 DRIVER_MODULE_ORDERED(ptn_memdev, pci, ptn_memdev_driver, NULL, NULL,
807 SI_ORDER_MIDDLE + 1);
808
809 /*
810 * Map host netmap memory through PCI-BAR in the guest OS,
811 * returning physical (nm_paddr) and virtual (nm_addr) addresses
812 * of the netmap memory mapped in the guest.
813 */
814 int
815 nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev, vm_paddr_t *nm_paddr,
816 void **nm_addr, uint64_t *mem_size)
817 {
818 int rid;
819
820 nm_prinf("ptn_memdev_driver iomap");
821
822 rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR);
823 *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI);
824 *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_LO) |
825 (*mem_size << 32);
826
827 /* map memory allocator */
828 ptn_dev->pci_mem = bus_alloc_resource(ptn_dev->dev, SYS_RES_MEMORY,
829 &rid, 0, ~0, *mem_size, RF_ACTIVE);
830 if (ptn_dev->pci_mem == NULL) {
831 *nm_paddr = 0;
832 *nm_addr = NULL;
833 return ENOMEM;
834 }
835
836 *nm_paddr = rman_get_start(ptn_dev->pci_mem);
837 *nm_addr = rman_get_virtual(ptn_dev->pci_mem);
838
839 nm_prinf("=== BAR %d start %lx len %lx mem_size %lx ===",
840 PTNETMAP_MEM_PCI_BAR,
841 (unsigned long)(*nm_paddr),
842 (unsigned long)rman_get_size(ptn_dev->pci_mem),
843 (unsigned long)*mem_size);
844 return (0);
845 }
846
847 uint32_t
848 nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev, unsigned int reg)
849 {
850 return bus_read_4(ptn_dev->pci_io, reg);
851 }
852
853 /* Unmap host netmap memory. */
854 void
855 nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev)
856 {
857 nm_prinf("ptn_memdev_driver iounmap");
858
859 if (ptn_dev->pci_mem) {
860 bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY,
861 PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem);
862 ptn_dev->pci_mem = NULL;
863 }
864 }
865
866 /* Device identification routine, return BUS_PROBE_DEFAULT on success,
867 * positive on failure */
868 static int
869 ptn_memdev_probe(device_t dev)
870 {
871 char desc[256];
872
873 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID)
874 return (ENXIO);
875 if (pci_get_device(dev) != PTNETMAP_PCI_DEVICE_ID)
876 return (ENXIO);
877
878 snprintf(desc, sizeof(desc), "%s PCI adapter",
879 PTNETMAP_MEMDEV_NAME);
880 device_set_desc_copy(dev, desc);
881
882 return (BUS_PROBE_DEFAULT);
883 }
884
885 /* Device initialization routine. */
886 static int
887 ptn_memdev_attach(device_t dev)
888 {
889 struct ptnetmap_memdev *ptn_dev;
890 int rid;
891 uint16_t mem_id;
892
893 ptn_dev = device_get_softc(dev);
894 ptn_dev->dev = dev;
895
896 pci_enable_busmaster(dev);
897
898 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
899 ptn_dev->pci_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
900 RF_ACTIVE);
901 if (ptn_dev->pci_io == NULL) {
902 device_printf(dev, "cannot map I/O space\n");
903 return (ENXIO);
904 }
905
906 mem_id = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMID);
907
908 /* create guest allocator */
909 ptn_dev->nm_mem = netmap_mem_pt_guest_attach(ptn_dev, mem_id);
910 if (ptn_dev->nm_mem == NULL) {
911 ptn_memdev_detach(dev);
912 return (ENOMEM);
913 }
914 netmap_mem_get(ptn_dev->nm_mem);
915
916 nm_prinf("ptnetmap memdev attached, host memid: %u", mem_id);
917
918 return (0);
919 }
920
921 /* Device removal routine. */
922 static int
923 ptn_memdev_detach(device_t dev)
924 {
925 struct ptnetmap_memdev *ptn_dev;
926
927 ptn_dev = device_get_softc(dev);
928
929 if (ptn_dev->nm_mem) {
930 nm_prinf("ptnetmap memdev detached, host memid %u",
931 netmap_mem_get_id(ptn_dev->nm_mem));
932 netmap_mem_put(ptn_dev->nm_mem);
933 ptn_dev->nm_mem = NULL;
934 }
935 if (ptn_dev->pci_mem) {
936 bus_release_resource(dev, SYS_RES_MEMORY,
937 PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem);
938 ptn_dev->pci_mem = NULL;
939 }
940 if (ptn_dev->pci_io) {
941 bus_release_resource(dev, SYS_RES_IOPORT,
942 PCIR_BAR(PTNETMAP_IO_PCI_BAR), ptn_dev->pci_io);
943 ptn_dev->pci_io = NULL;
944 }
945
946 return (0);
947 }
948
949 static int
950 ptn_memdev_shutdown(device_t dev)
951 {
952 return bus_generic_shutdown(dev);
953 }
954
955 #endif /* WITH_PTNETMAP */
956
957 /*
958 * In order to track whether pages are still mapped, we hook into
959 * the standard cdev_pager and intercept the constructor and
960 * destructor.
961 */
962
963 struct netmap_vm_handle_t {
964 struct cdev *dev;
965 struct netmap_priv_d *priv;
966 };
967
968
969 static int
970 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
971 vm_ooffset_t foff, struct ucred *cred, u_short *color)
972 {
973 struct netmap_vm_handle_t *vmh = handle;
974
975 if (netmap_verbose)
976 nm_prinf("handle %p size %jd prot %d foff %jd",
977 handle, (intmax_t)size, prot, (intmax_t)foff);
978 if (color)
979 *color = 0;
980 dev_ref(vmh->dev);
981 return 0;
982 }
983
984
985 static void
986 netmap_dev_pager_dtor(void *handle)
987 {
988 struct netmap_vm_handle_t *vmh = handle;
989 struct cdev *dev = vmh->dev;
990 struct netmap_priv_d *priv = vmh->priv;
991
992 if (netmap_verbose)
993 nm_prinf("handle %p", handle);
994 netmap_dtor(priv);
995 free(vmh, M_DEVBUF);
996 dev_rel(dev);
997 }
998
999
1000 static int
1001 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
1002 int prot, vm_page_t *mres)
1003 {
1004 struct netmap_vm_handle_t *vmh = object->handle;
1005 struct netmap_priv_d *priv = vmh->priv;
1006 struct netmap_adapter *na = priv->np_na;
1007 vm_paddr_t paddr;
1008 vm_page_t page;
1009 vm_memattr_t memattr;
1010
1011 nm_prdis("object %p offset %jd prot %d mres %p",
1012 object, (intmax_t)offset, prot, mres);
1013 memattr = object->memattr;
1014 paddr = netmap_mem_ofstophys(na->nm_mem, offset);
1015 if (paddr == 0)
1016 return VM_PAGER_FAIL;
1017
1018 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
1019 /*
1020 * If the passed in result page is a fake page, update it with
1021 * the new physical address.
1022 */
1023 page = *mres;
1024 vm_page_updatefake(page, paddr, memattr);
1025 } else {
1026 /*
1027 * Replace the passed in reqpage page with our own fake page and
1028 * free up the all of the original pages.
1029 */
1030 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */
1031 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
1032 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK
1033 #endif /* VM_OBJECT_WUNLOCK */
1034
1035 VM_OBJECT_WUNLOCK(object);
1036 page = vm_page_getfake(paddr, memattr);
1037 VM_OBJECT_WLOCK(object);
1038 vm_page_replace(page, object, (*mres)->pindex, *mres);
1039 *mres = page;
1040 }
1041 page->valid = VM_PAGE_BITS_ALL;
1042 return (VM_PAGER_OK);
1043 }
1044
1045
1046 static struct cdev_pager_ops netmap_cdev_pager_ops = {
1047 .cdev_pg_ctor = netmap_dev_pager_ctor,
1048 .cdev_pg_dtor = netmap_dev_pager_dtor,
1049 .cdev_pg_fault = netmap_dev_pager_fault,
1050 };
1051
1052
1053 static int
1054 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
1055 vm_size_t objsize, vm_object_t *objp, int prot)
1056 {
1057 int error;
1058 struct netmap_vm_handle_t *vmh;
1059 struct netmap_priv_d *priv;
1060 vm_object_t obj;
1061
1062 if (netmap_verbose)
1063 nm_prinf("cdev %p foff %jd size %jd objp %p prot %d", cdev,
1064 (intmax_t )*foff, (intmax_t )objsize, objp, prot);
1065
1066 vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
1067 M_NOWAIT | M_ZERO);
1068 if (vmh == NULL)
1069 return ENOMEM;
1070 vmh->dev = cdev;
1071
1072 NMG_LOCK();
1073 error = devfs_get_cdevpriv((void**)&priv);
1074 if (error)
1075 goto err_unlock;
1076 if (priv->np_nifp == NULL) {
1077 error = EINVAL;
1078 goto err_unlock;
1079 }
1080 vmh->priv = priv;
1081 priv->np_refs++;
1082 NMG_UNLOCK();
1083
1084 obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
1085 &netmap_cdev_pager_ops, objsize, prot,
1086 *foff, NULL);
1087 if (obj == NULL) {
1088 nm_prerr("cdev_pager_allocate failed");
1089 error = EINVAL;
1090 goto err_deref;
1091 }
1092
1093 *objp = obj;
1094 return 0;
1095
1096 err_deref:
1097 NMG_LOCK();
1098 priv->np_refs--;
1099 err_unlock:
1100 NMG_UNLOCK();
1101 // err:
1102 free(vmh, M_DEVBUF);
1103 return error;
1104 }
1105
1106 /*
1107 * On FreeBSD the close routine is only called on the last close on
1108 * the device (/dev/netmap) so we cannot do anything useful.
1109 * To track close() on individual file descriptors we pass netmap_dtor() to
1110 * devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor
1111 * when the last fd pointing to the device is closed.
1112 *
1113 * Note that FreeBSD does not even munmap() on close() so we also have
1114 * to track mmap() ourselves, and postpone the call to
1115 * netmap_dtor() is called when the process has no open fds and no active
1116 * memory maps on /dev/netmap, as in linux.
1117 */
1118 static int
1119 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1120 {
1121 if (netmap_verbose)
1122 nm_prinf("dev %p fflag 0x%x devtype %d td %p",
1123 dev, fflag, devtype, td);
1124 return 0;
1125 }
1126
1127
1128 static int
1129 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
1130 {
1131 struct netmap_priv_d *priv;
1132 int error;
1133
1134 (void)dev;
1135 (void)oflags;
1136 (void)devtype;
1137 (void)td;
1138
1139 NMG_LOCK();
1140 priv = netmap_priv_new();
1141 if (priv == NULL) {
1142 error = ENOMEM;
1143 goto out;
1144 }
1145 error = devfs_set_cdevpriv(priv, netmap_dtor);
1146 if (error) {
1147 netmap_priv_delete(priv);
1148 }
1149 out:
1150 NMG_UNLOCK();
1151 return error;
1152 }
1153
1154 /******************** kthread wrapper ****************/
1155 #include <sys/sysproto.h>
1156 u_int
1157 nm_os_ncpus(void)
1158 {
1159 return mp_maxid + 1;
1160 }
1161
1162 struct nm_kctx_ctx {
1163 /* Userspace thread (kthread creator). */
1164 struct thread *user_td;
1165
1166 /* worker function and parameter */
1167 nm_kctx_worker_fn_t worker_fn;
1168 void *worker_private;
1169
1170 struct nm_kctx *nmk;
1171
1172 /* integer to manage multiple worker contexts (e.g., RX or TX on ptnetmap) */
1173 long type;
1174 };
1175
1176 struct nm_kctx {
1177 struct thread *worker;
1178 struct mtx worker_lock;
1179 struct nm_kctx_ctx worker_ctx;
1180 int run; /* used to stop kthread */
1181 int attach_user; /* kthread attached to user_process */
1182 int affinity;
1183 };
1184
1185 static void
1186 nm_kctx_worker(void *data)
1187 {
1188 struct nm_kctx *nmk = data;
1189 struct nm_kctx_ctx *ctx = &nmk->worker_ctx;
1190
1191 if (nmk->affinity >= 0) {
1192 thread_lock(curthread);
1193 sched_bind(curthread, nmk->affinity);
1194 thread_unlock(curthread);
1195 }
1196
1197 while (nmk->run) {
1198 /*
1199 * check if the parent process dies
1200 * (when kthread is attached to user process)
1201 */
1202 if (ctx->user_td) {
1203 PROC_LOCK(curproc);
1204 thread_suspend_check(0);
1205 PROC_UNLOCK(curproc);
1206 } else {
1207 kthread_suspend_check();
1208 }
1209
1210 /* Continuously execute worker process. */
1211 ctx->worker_fn(ctx->worker_private); /* worker body */
1212 }
1213
1214 kthread_exit();
1215 }
1216
1217 void
1218 nm_os_kctx_worker_setaff(struct nm_kctx *nmk, int affinity)
1219 {
1220 nmk->affinity = affinity;
1221 }
1222
1223 struct nm_kctx *
1224 nm_os_kctx_create(struct nm_kctx_cfg *cfg, void *opaque)
1225 {
1226 struct nm_kctx *nmk = NULL;
1227
1228 nmk = malloc(sizeof(*nmk), M_DEVBUF, M_NOWAIT | M_ZERO);
1229 if (!nmk)
1230 return NULL;
1231
1232 mtx_init(&nmk->worker_lock, "nm_kthread lock", NULL, MTX_DEF);
1233 nmk->worker_ctx.worker_fn = cfg->worker_fn;
1234 nmk->worker_ctx.worker_private = cfg->worker_private;
1235 nmk->worker_ctx.type = cfg->type;
1236 nmk->affinity = -1;
1237
1238 /* attach kthread to user process (ptnetmap) */
1239 nmk->attach_user = cfg->attach_user;
1240
1241 return nmk;
1242 }
1243
1244 int
1245 nm_os_kctx_worker_start(struct nm_kctx *nmk)
1246 {
1247 struct proc *p = NULL;
1248 int error = 0;
1249
1250 /* Temporarily disable this function as it is currently broken
1251 * and causes kernel crashes. The failure can be triggered by
1252 * the "vale_polling_enable_disable" test in ctrl-api-test.c. */
1253 return EOPNOTSUPP;
1254
1255 if (nmk->worker)
1256 return EBUSY;
1257
1258 /* check if we want to attach kthread to user process */
1259 if (nmk->attach_user) {
1260 nmk->worker_ctx.user_td = curthread;
1261 p = curthread->td_proc;
1262 }
1263
1264 /* enable kthread main loop */
1265 nmk->run = 1;
1266 /* create kthread */
1267 if((error = kthread_add(nm_kctx_worker, nmk, p,
1268 &nmk->worker, RFNOWAIT /* to be checked */, 0, "nm-kthread-%ld",
1269 nmk->worker_ctx.type))) {
1270 goto err;
1271 }
1272
1273 nm_prinf("nm_kthread started td %p", nmk->worker);
1274
1275 return 0;
1276 err:
1277 nm_prerr("nm_kthread start failed err %d", error);
1278 nmk->worker = NULL;
1279 return error;
1280 }
1281
1282 void
1283 nm_os_kctx_worker_stop(struct nm_kctx *nmk)
1284 {
1285 if (!nmk->worker)
1286 return;
1287
1288 /* tell to kthread to exit from main loop */
1289 nmk->run = 0;
1290
1291 /* wake up kthread if it sleeps */
1292 kthread_resume(nmk->worker);
1293
1294 nmk->worker = NULL;
1295 }
1296
1297 void
1298 nm_os_kctx_destroy(struct nm_kctx *nmk)
1299 {
1300 if (!nmk)
1301 return;
1302
1303 if (nmk->worker)
1304 nm_os_kctx_worker_stop(nmk);
1305
1306 free(nmk, M_DEVBUF);
1307 }
1308
1309 /******************** kqueue support ****************/
1310
1311 /*
1312 * In addition to calling selwakeuppri(), nm_os_selwakeup() also
1313 * needs to call knote() to wake up kqueue listeners.
1314 * This operation is deferred to a taskqueue in order to avoid possible
1315 * lock order reversals; these may happen because knote() grabs a
1316 * private lock associated to the 'si' (see struct selinfo,
1317 * struct nm_selinfo, and nm_os_selinfo_init), and nm_os_selwakeup()
1318 * can be called while holding the lock associated to a different
1319 * 'si'.
1320 * When calling knote() we use a non-zero 'hint' argument to inform
1321 * the netmap_knrw() function that it is being called from
1322 * 'nm_os_selwakeup'; this is necessary because when netmap_knrw() is
1323 * called by the kevent subsystem (i.e. kevent_scan()) we also need to
1324 * call netmap_poll().
1325 *
1326 * The netmap_kqfilter() function registers one or another f_event
1327 * depending on read or write mode. A pointer to the struct
1328 * 'netmap_priv_d' is stored into kn->kn_hook, so that it can later
1329 * be passed to netmap_poll(). We pass NULL as a third argument to
1330 * netmap_poll(), so that the latter only runs the txsync/rxsync
1331 * (if necessary), and skips the nm_os_selrecord() calls.
1332 */
1333
1334
1335 void
1336 nm_os_selwakeup(struct nm_selinfo *si)
1337 {
1338 selwakeuppri(&si->si, PI_NET);
1339 if (si->kqueue_users > 0) {
1340 taskqueue_enqueue(si->ntfytq, &si->ntfytask);
1341 }
1342 }
1343
1344 void
1345 nm_os_selrecord(struct thread *td, struct nm_selinfo *si)
1346 {
1347 selrecord(td, &si->si);
1348 }
1349
1350 static void
1351 netmap_knrdetach(struct knote *kn)
1352 {
1353 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
1354 struct nm_selinfo *si = priv->np_si[NR_RX];
1355
1356 knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
1357 NMG_LOCK();
1358 KASSERT(si->kqueue_users > 0, ("kqueue_user underflow on %s",
1359 si->mtxname));
1360 si->kqueue_users--;
1361 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1362 NMG_UNLOCK();
1363 }
1364
1365 static void
1366 netmap_knwdetach(struct knote *kn)
1367 {
1368 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
1369 struct nm_selinfo *si = priv->np_si[NR_TX];
1370
1371 knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
1372 NMG_LOCK();
1373 si->kqueue_users--;
1374 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1375 NMG_UNLOCK();
1376 }
1377
1378 /*
1379 * Callback triggered by netmap notifications (see netmap_notify()),
1380 * and by the application calling kevent(). In the former case we
1381 * just return 1 (events ready), since we are not able to do better.
1382 * In the latter case we use netmap_poll() to see which events are
1383 * ready.
1384 */
1385 static int
1386 netmap_knrw(struct knote *kn, long hint, int events)
1387 {
1388 struct netmap_priv_d *priv;
1389 int revents;
1390
1391 if (hint != 0) {
1392 /* Called from netmap_notify(), typically from a
1393 * thread different from the one issuing kevent().
1394 * Assume we are ready. */
1395 return 1;
1396 }
1397
1398 /* Called from kevent(). */
1399 priv = kn->kn_hook;
1400 revents = netmap_poll(priv, events, /*thread=*/NULL);
1401
1402 return (events & revents) ? 1 : 0;
1403 }
1404
1405 static int
1406 netmap_knread(struct knote *kn, long hint)
1407 {
1408 return netmap_knrw(kn, hint, POLLIN);
1409 }
1410
1411 static int
1412 netmap_knwrite(struct knote *kn, long hint)
1413 {
1414 return netmap_knrw(kn, hint, POLLOUT);
1415 }
1416
1417 static struct filterops netmap_rfiltops = {
1418 .f_isfd = 1,
1419 .f_detach = netmap_knrdetach,
1420 .f_event = netmap_knread,
1421 };
1422
1423 static struct filterops netmap_wfiltops = {
1424 .f_isfd = 1,
1425 .f_detach = netmap_knwdetach,
1426 .f_event = netmap_knwrite,
1427 };
1428
1429
1430 /*
1431 * This is called when a thread invokes kevent() to record
1432 * a change in the configuration of the kqueue().
1433 * The 'priv' is the one associated to the open netmap device.
1434 */
1435 static int
1436 netmap_kqfilter(struct cdev *dev, struct knote *kn)
1437 {
1438 struct netmap_priv_d *priv;
1439 int error;
1440 struct netmap_adapter *na;
1441 struct nm_selinfo *si;
1442 int ev = kn->kn_filter;
1443
1444 if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
1445 nm_prerr("bad filter request %d", ev);
1446 return 1;
1447 }
1448 error = devfs_get_cdevpriv((void**)&priv);
1449 if (error) {
1450 nm_prerr("device not yet setup");
1451 return 1;
1452 }
1453 na = priv->np_na;
1454 if (na == NULL) {
1455 nm_prerr("no netmap adapter for this file descriptor");
1456 return 1;
1457 }
1458 /* the si is indicated in the priv */
1459 si = priv->np_si[(ev == EVFILT_WRITE) ? NR_TX : NR_RX];
1460 kn->kn_fop = (ev == EVFILT_WRITE) ?
1461 &netmap_wfiltops : &netmap_rfiltops;
1462 kn->kn_hook = priv;
1463 NMG_LOCK();
1464 si->kqueue_users++;
1465 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1466 NMG_UNLOCK();
1467 knlist_add(&si->si.si_note, kn, /*islocked=*/0);
1468
1469 return 0;
1470 }
1471
1472 static int
1473 freebsd_netmap_poll(struct cdev *cdevi __unused, int events, struct thread *td)
1474 {
1475 struct netmap_priv_d *priv;
1476 if (devfs_get_cdevpriv((void **)&priv)) {
1477 return POLLERR;
1478 }
1479 return netmap_poll(priv, events, td);
1480 }
1481
1482 static int
1483 freebsd_netmap_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
1484 int ffla __unused, struct thread *td)
1485 {
1486 int error;
1487 struct netmap_priv_d *priv;
1488
1489 CURVNET_SET(TD_TO_VNET(td));
1490 error = devfs_get_cdevpriv((void **)&priv);
1491 if (error) {
1492 /* XXX ENOENT should be impossible, since the priv
1493 * is now created in the open */
1494 if (error == ENOENT)
1495 error = ENXIO;
1496 goto out;
1497 }
1498 error = netmap_ioctl(priv, cmd, data, td, /*nr_body_is_user=*/1);
1499 out:
1500 CURVNET_RESTORE();
1501
1502 return error;
1503 }
1504
1505 void
1506 nm_os_onattach(struct ifnet *ifp)
1507 {
1508 ifp->if_capabilities |= IFCAP_NETMAP;
1509 }
1510
1511 void
1512 nm_os_onenter(struct ifnet *ifp)
1513 {
1514 struct netmap_adapter *na = NA(ifp);
1515
1516 na->if_transmit = ifp->if_transmit;
1517 ifp->if_transmit = netmap_transmit;
1518 ifp->if_capenable |= IFCAP_NETMAP;
1519 }
1520
1521 void
1522 nm_os_onexit(struct ifnet *ifp)
1523 {
1524 struct netmap_adapter *na = NA(ifp);
1525
1526 ifp->if_transmit = na->if_transmit;
1527 ifp->if_capenable &= ~IFCAP_NETMAP;
1528 }
1529
1530 extern struct cdevsw netmap_cdevsw; /* XXX used in netmap.c, should go elsewhere */
1531 struct cdevsw netmap_cdevsw = {
1532 .d_version = D_VERSION,
1533 .d_name = "netmap",
1534 .d_open = netmap_open,
1535 .d_mmap_single = netmap_mmap_single,
1536 .d_ioctl = freebsd_netmap_ioctl,
1537 .d_poll = freebsd_netmap_poll,
1538 .d_kqfilter = netmap_kqfilter,
1539 .d_close = netmap_close,
1540 };
1541 /*--- end of kqueue support ----*/
1542
1543 /*
1544 * Kernel entry point.
1545 *
1546 * Initialize/finalize the module and return.
1547 *
1548 * Return 0 on success, errno on failure.
1549 */
1550 static int
1551 netmap_loader(__unused struct module *module, int event, __unused void *arg)
1552 {
1553 int error = 0;
1554
1555 switch (event) {
1556 case MOD_LOAD:
1557 error = netmap_init();
1558 break;
1559
1560 case MOD_UNLOAD:
1561 /*
1562 * if some one is still using netmap,
1563 * then the module can not be unloaded.
1564 */
1565 if (netmap_use_count) {
1566 nm_prerr("netmap module can not be unloaded - netmap_use_count: %d",
1567 netmap_use_count);
1568 error = EBUSY;
1569 break;
1570 }
1571 netmap_fini();
1572 break;
1573
1574 default:
1575 error = EOPNOTSUPP;
1576 break;
1577 }
1578
1579 return (error);
1580 }
1581
1582 #ifdef DEV_MODULE_ORDERED
1583 /*
1584 * The netmap module contains three drivers: (i) the netmap character device
1585 * driver; (ii) the ptnetmap memdev PCI device driver, (iii) the ptnet PCI
1586 * device driver. The attach() routines of both (ii) and (iii) need the
1587 * lock of the global allocator, and such lock is initialized in netmap_init(),
1588 * which is part of (i).
1589 * Therefore, we make sure that (i) is loaded before (ii) and (iii), using
1590 * the 'order' parameter of driver declaration macros. For (i), we specify
1591 * SI_ORDER_MIDDLE, while higher orders are used with the DRIVER_MODULE_ORDERED
1592 * macros for (ii) and (iii).
1593 */
1594 DEV_MODULE_ORDERED(netmap, netmap_loader, NULL, SI_ORDER_MIDDLE);
1595 #else /* !DEV_MODULE_ORDERED */
1596 DEV_MODULE(netmap, netmap_loader, NULL);
1597 #endif /* DEV_MODULE_ORDERED */
1598 MODULE_DEPEND(netmap, pci, 1, 1, 1);
1599 MODULE_VERSION(netmap, 1);
1600 /* reduce conditional code */
1601 // linux API, use for the knlist in FreeBSD
1602 /* use a private mutex for the knlist */
Cache object: 48696e49366f05905c07b4865697fc1e
|