1 /*
2 * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 /*
27 * $FreeBSD$
28 */
29
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
32 #include <vm/vm.h>
33 #include <vm/pmap.h> /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
35
36 /* Register and unregister. */
37 static int
38 vtnet_netmap_reg(struct netmap_adapter *na, int state)
39 {
40 struct ifnet *ifp = na->ifp;
41 struct vtnet_softc *sc = ifp->if_softc;
42
43 /*
44 * Trigger a device reinit, asking vtnet_init_locked() to
45 * also enter or exit netmap mode.
46 */
47 VTNET_CORE_LOCK(sc);
48 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
49 vtnet_init_locked(sc, state ? VTNET_INIT_NETMAP_ENTER
50 : VTNET_INIT_NETMAP_EXIT);
51 VTNET_CORE_UNLOCK(sc);
52
53 return (0);
54 }
55
56
57 /* Reconcile kernel and user view of the transmit ring. */
58 static int
59 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
60 {
61 struct netmap_adapter *na = kring->na;
62 struct ifnet *ifp = na->ifp;
63 struct netmap_ring *ring = kring->ring;
64 u_int ring_nr = kring->ring_id;
65 u_int nm_i; /* index into the netmap ring */
66 u_int const lim = kring->nkr_num_slots - 1;
67 u_int const head = kring->rhead;
68
69 /* device-specific */
70 struct vtnet_softc *sc = ifp->if_softc;
71 struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
72 struct virtqueue *vq = txq->vtntx_vq;
73 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
74 u_int n;
75
76 /*
77 * First part: process new packets to send.
78 */
79
80 nm_i = kring->nr_hwcur;
81 if (nm_i != head) { /* we have new packets to send */
82 struct sglist *sg = txq->vtntx_sg;
83
84 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
85 /* we use an empty header here */
86 struct netmap_slot *slot = &ring->slot[nm_i];
87 uint64_t offset = nm_get_offset(kring, slot);
88 u_int len = slot->len;
89 uint64_t paddr;
90 int err;
91
92 (void)PNMB(na, slot, &paddr);
93 NM_CHECK_ADDR_LEN_OFF(na, len, offset);
94
95 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
96 /* Initialize the scatterlist, expose it to the hypervisor,
97 * and kick the hypervisor (if necessary).
98 */
99 sglist_reset(sg); // cheap
100 err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
101 err |= sglist_append_phys(sg, paddr + offset, len);
102 KASSERT(err == 0, ("%s: cannot append to sglist %d",
103 __func__, err));
104 err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
105 /*readable=*/sg->sg_nseg,
106 /*writeable=*/0);
107 if (unlikely(err)) {
108 if (err != ENOSPC)
109 nm_prerr("virtqueue_enqueue(%s) failed: %d",
110 kring->name, err);
111 break;
112 }
113 }
114
115 virtqueue_notify(vq);
116
117 /* Update hwcur depending on where we stopped. */
118 kring->nr_hwcur = nm_i; /* note we might break early */
119 }
120
121 /* Free used slots. We only consider our own used buffers, recognized
122 * by the token we passed to virtqueue_enqueue.
123 */
124 n = 0;
125 for (;;) {
126 void *token = virtqueue_dequeue(vq, NULL);
127 if (token == NULL)
128 break;
129 if (unlikely(token != (void *)txq))
130 nm_prerr("BUG: TX token mismatch");
131 else
132 n++;
133 }
134 if (n > 0) {
135 kring->nr_hwtail += n;
136 if (kring->nr_hwtail > lim)
137 kring->nr_hwtail -= lim + 1;
138 }
139
140 if (interrupts && virtqueue_nfree(vq) < 32)
141 virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
142
143 return 0;
144 }
145
146 /*
147 * Publish 'num 'netmap receive buffers to the host, starting
148 * from the next available one (rx->vtnrx_nm_refill).
149 * Return a positive error code on error, and 0 on success.
150 * If we could not publish all of the buffers that's an error,
151 * since the netmap ring and the virtqueue would go out of sync.
152 */
153 static int
154 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int num)
155 {
156 struct netmap_adapter *na = kring->na;
157 struct ifnet *ifp = na->ifp;
158 struct netmap_ring *ring = kring->ring;
159 u_int ring_nr = kring->ring_id;
160 u_int const lim = kring->nkr_num_slots - 1;
161 u_int nm_i;
162
163 /* device-specific */
164 struct vtnet_softc *sc = ifp->if_softc;
165 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
166 struct virtqueue *vq = rxq->vtnrx_vq;
167
168 /* use a local sglist, default might be short */
169 struct sglist_seg ss[2];
170 struct sglist sg = { ss, 0, 0, 2 };
171
172 for (nm_i = rxq->vtnrx_nm_refill; num > 0;
173 nm_i = nm_next(nm_i, lim), num--) {
174 struct netmap_slot *slot = &ring->slot[nm_i];
175 uint64_t offset = nm_get_offset(kring, slot);
176 uint64_t paddr;
177 void *addr = PNMB(na, slot, &paddr);
178 int err;
179
180 if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
181 netmap_ring_reinit(kring);
182 return EFAULT;
183 }
184
185 slot->flags &= ~NS_BUF_CHANGED;
186 sglist_reset(&sg);
187 err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
188 err |= sglist_append_phys(&sg, paddr + offset,
189 NETMAP_BUF_SIZE(na) - offset);
190 KASSERT(err == 0, ("%s: cannot append to sglist %d",
191 __func__, err));
192 /* writable for the host */
193 err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
194 /*readable=*/0, /*writeable=*/sg.sg_nseg);
195 if (unlikely(err)) {
196 nm_prerr("virtqueue_enqueue(%s) failed: %d",
197 kring->name, err);
198 break;
199 }
200 }
201 rxq->vtnrx_nm_refill = nm_i;
202
203 return num == 0 ? 0 : ENOSPC;
204 }
205
206 /*
207 * Publish netmap buffers on a RX virtqueue.
208 * Returns -1 if this virtqueue is not being opened in netmap mode.
209 * If the virtqueue is being opened in netmap mode, return 0 on success and
210 * a positive error code on failure.
211 */
212 static int
213 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
214 {
215 struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
216 struct netmap_kring *kring;
217 struct netmap_slot *slot;
218 int error;
219 int num;
220
221 slot = netmap_reset(na, NR_RX, rxq->vtnrx_id, 0);
222 if (slot == NULL)
223 return -1;
224 kring = na->rx_rings[rxq->vtnrx_id];
225
226 /*
227 * Expose all the RX netmap buffers we can. In case of no indirect
228 * buffers, the number of netmap slots in the RX ring matches the
229 * maximum number of 2-elements sglist that the RX virtqueue can
230 * accommodate. We need to start from kring->nr_hwtail, which is 0
231 * on the first netmap register and may be different from 0 if a
232 * virtio re-init (caused by a netma register or i.e., ifconfig)
233 * happens while the device is in use by netmap.
234 */
235 rxq->vtnrx_nm_refill = kring->nr_hwtail;
236 num = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
237 error = vtnet_netmap_kring_refill(kring, num);
238 virtqueue_notify(rxq->vtnrx_vq);
239
240 return error;
241 }
242
243 /* Reconcile kernel and user view of the receive ring. */
244 static int
245 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
246 {
247 struct netmap_adapter *na = kring->na;
248 struct ifnet *ifp = na->ifp;
249 struct netmap_ring *ring = kring->ring;
250 u_int ring_nr = kring->ring_id;
251 u_int nm_i; /* index into the netmap ring */
252 u_int const lim = kring->nkr_num_slots - 1;
253 u_int const head = kring->rhead;
254 int force_update = (flags & NAF_FORCE_READ) ||
255 (kring->nr_kflags & NKR_PENDINTR);
256 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
257
258 /* device-specific */
259 struct vtnet_softc *sc = ifp->if_softc;
260 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
261 struct virtqueue *vq = rxq->vtnrx_vq;
262
263 /*
264 * First part: import newly received packets.
265 * Only accept our own buffers (matching the token). We should only get
266 * matching buffers. The hwtail should never overrun hwcur, because
267 * we publish only N-1 receive buffers (and not N).
268 * In any case we must not leave this routine with the interrupts
269 * disabled, pending packets in the VQ and hwtail == (hwcur - 1),
270 * otherwise the pending packets could stall.
271 */
272 if (netmap_no_pendintr || force_update) {
273 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
274 void *token;
275
276 vtnet_rxq_disable_intr(rxq);
277
278 nm_i = kring->nr_hwtail;
279 for (;;) {
280 int len;
281 token = virtqueue_dequeue(vq, &len);
282 if (token == NULL) {
283 /*
284 * Enable the interrupts again and double-check
285 * for more work. We can go on until we win the
286 * race condition, since we are not replenishing
287 * in the meanwhile, and thus we will process at
288 * most N-1 slots.
289 */
290 if (interrupts && vtnet_rxq_enable_intr(rxq)) {
291 vtnet_rxq_disable_intr(rxq);
292 continue;
293 }
294 break;
295 }
296 if (unlikely(token != (void *)rxq)) {
297 nm_prerr("BUG: RX token mismatch");
298 } else {
299 if (nm_i == hwtail_lim) {
300 KASSERT(false, ("hwtail would "
301 "overrun hwcur"));
302 }
303
304 /* Skip the virtio-net header. */
305 len -= sc->vtnet_hdr_size;
306 if (unlikely(len < 0)) {
307 nm_prlim(1, "Truncated virtio-net-header, "
308 "missing %d bytes", -len);
309 len = 0;
310 }
311 ring->slot[nm_i].len = len;
312 ring->slot[nm_i].flags = 0;
313 nm_i = nm_next(nm_i, lim);
314 }
315 }
316 kring->nr_hwtail = nm_i;
317 kring->nr_kflags &= ~NKR_PENDINTR;
318 }
319
320 /*
321 * Second part: skip past packets that userspace has released.
322 */
323 nm_i = kring->nr_hwcur; /* netmap ring index */
324 if (nm_i != head) {
325 int released;
326 int error;
327
328 released = head - nm_i;
329 if (released < 0)
330 released += kring->nkr_num_slots;
331 error = vtnet_netmap_kring_refill(kring, released);
332 if (error) {
333 nm_prerr("Failed to replenish RX VQ with %u sgs",
334 released);
335 return error;
336 }
337 kring->nr_hwcur = head;
338 virtqueue_notify(vq);
339 }
340
341 nm_prdis("h %d c %d t %d hwcur %d hwtail %d", kring->rhead,
342 kring->rcur, kring->rtail, kring->nr_hwcur, kring->nr_hwtail);
343
344 return 0;
345 }
346
347
348 /* Enable/disable interrupts on all virtqueues. */
349 static void
350 vtnet_netmap_intr(struct netmap_adapter *na, int state)
351 {
352 struct vtnet_softc *sc = na->ifp->if_softc;
353 int i;
354
355 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
356 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
357 struct vtnet_txq *txq = &sc->vtnet_txqs[i];
358 struct virtqueue *txvq = txq->vtntx_vq;
359
360 if (state) {
361 vtnet_rxq_enable_intr(rxq);
362 virtqueue_enable_intr(txvq);
363 } else {
364 vtnet_rxq_disable_intr(rxq);
365 virtqueue_disable_intr(txvq);
366 }
367 }
368 }
369
370 static int
371 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
372 {
373 int div;
374
375 /* We need to prepend a virtio-net header to each netmap buffer to be
376 * transmitted, therefore calling virtqueue_enqueue() passing sglist
377 * with 2 elements.
378 * TX virtqueues use indirect descriptors if the feature was negotiated
379 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
380 * descriptors, a single virtio descriptor is sufficient to reference
381 * each TX sglist. Without them, we need two separate virtio descriptors
382 * for each TX sglist. We therefore compute the number of netmap TX
383 * slots according to these assumptions.
384 */
385 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
386 div = 1;
387 else
388 div = 2;
389
390 return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
391 }
392
393 static int
394 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
395 {
396 int div;
397
398 /* We need to prepend a virtio-net header to each netmap buffer to be
399 * received, therefore calling virtqueue_enqueue() passing sglist
400 * with 2 elements.
401 * RX virtqueues use indirect descriptors if the feature was negotiated
402 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
403 * descriptors, a single virtio descriptor is sufficient to reference
404 * each RX sglist. Without them, we need two separate virtio descriptors
405 * for each RX sglist. We therefore compute the number of netmap RX
406 * slots according to these assumptions.
407 */
408 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
409 div = 1;
410 else
411 div = 2;
412
413 return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
414 }
415
416 static int
417 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
418 {
419 struct vtnet_softc *sc = na->ifp->if_softc;
420
421 info->num_tx_rings = sc->vtnet_act_vq_pairs;
422 info->num_rx_rings = sc->vtnet_act_vq_pairs;
423 info->num_tx_descs = vtnet_netmap_tx_slots(sc);
424 info->num_rx_descs = vtnet_netmap_rx_slots(sc);
425 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
426
427 return 0;
428 }
429
430 static void
431 vtnet_netmap_attach(struct vtnet_softc *sc)
432 {
433 struct netmap_adapter na;
434
435 bzero(&na, sizeof(na));
436
437 na.ifp = sc->vtnet_ifp;
438 na.na_flags = NAF_OFFSETS;
439 na.num_tx_desc = vtnet_netmap_tx_slots(sc);
440 na.num_rx_desc = vtnet_netmap_rx_slots(sc);
441 na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
442 na.rx_buf_maxsize = 0;
443 na.nm_register = vtnet_netmap_reg;
444 na.nm_txsync = vtnet_netmap_txsync;
445 na.nm_rxsync = vtnet_netmap_rxsync;
446 na.nm_intr = vtnet_netmap_intr;
447 na.nm_config = vtnet_netmap_config;
448
449 netmap_attach(&na);
450
451 nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
452 na.num_tx_rings, na.num_tx_desc,
453 na.num_tx_rings, na.num_rx_desc);
454 }
455 /* end of file */
Cache object: ee9db044095bb37921ee466dc7f68f14
|