1 /*
2 * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 /*
27 * $FreeBSD: releng/8.4/sys/dev/netmap/if_re_netmap.h 231717 2012-02-14 22:49:34Z luigi $
28 * $Id: if_re_netmap.h 10075 2011-12-25 22:55:48Z luigi $
29 *
30 * netmap support for if_re
31 */
32
33 #include <net/netmap.h>
34 #include <sys/selinfo.h>
35 #include <vm/vm.h>
36 #include <vm/pmap.h> /* vtophys ? */
37 #include <dev/netmap/netmap_kern.h>
38
39 static int re_netmap_reg(struct ifnet *, int onoff);
40 static int re_netmap_txsync(struct ifnet *, u_int, int);
41 static int re_netmap_rxsync(struct ifnet *, u_int, int);
42 static void re_netmap_lock_wrapper(struct ifnet *, int, u_int);
43
44 static void
45 re_netmap_attach(struct rl_softc *sc)
46 {
47 struct netmap_adapter na;
48
49 bzero(&na, sizeof(na));
50
51 na.ifp = sc->rl_ifp;
52 na.separate_locks = 0;
53 na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
54 na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
55 na.nm_txsync = re_netmap_txsync;
56 na.nm_rxsync = re_netmap_rxsync;
57 na.nm_lock = re_netmap_lock_wrapper;
58 na.nm_register = re_netmap_reg;
59 netmap_attach(&na, 1);
60 }
61
62
63 /*
64 * wrapper to export locks to the generic code
65 * We should not use the tx/rx locks
66 */
67 static void
68 re_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
69 {
70 struct rl_softc *adapter = ifp->if_softc;
71
72 switch (what) {
73 case NETMAP_CORE_LOCK:
74 RL_LOCK(adapter);
75 break;
76 case NETMAP_CORE_UNLOCK:
77 RL_UNLOCK(adapter);
78 break;
79
80 case NETMAP_TX_LOCK:
81 case NETMAP_RX_LOCK:
82 case NETMAP_TX_UNLOCK:
83 case NETMAP_RX_UNLOCK:
84 D("invalid lock call %d, no tx/rx locks here", what);
85 break;
86 }
87 }
88
89
90 /*
91 * support for netmap register/unregisted. We are already under core lock.
92 * only called on the first register or the last unregister.
93 */
94 static int
95 re_netmap_reg(struct ifnet *ifp, int onoff)
96 {
97 struct rl_softc *adapter = ifp->if_softc;
98 struct netmap_adapter *na = NA(ifp);
99 int error = 0;
100
101 if (na == NULL)
102 return EINVAL;
103 /* Tell the stack that the interface is no longer active */
104 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
105
106 re_stop(adapter);
107
108 if (onoff) {
109 ifp->if_capenable |= IFCAP_NETMAP;
110
111 /* save if_transmit to restore it later */
112 na->if_transmit = ifp->if_transmit;
113 ifp->if_transmit = netmap_start;
114
115 re_init_locked(adapter);
116
117 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
118 error = ENOMEM;
119 goto fail;
120 }
121 } else {
122 fail:
123 /* restore if_transmit */
124 ifp->if_transmit = na->if_transmit;
125 ifp->if_capenable &= ~IFCAP_NETMAP;
126 re_init_locked(adapter); /* also enables intr */
127 }
128 return (error);
129 }
130
131
132 /*
133 * Reconcile kernel and user view of the transmit ring.
134 */
135 static int
136 re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
137 {
138 struct rl_softc *sc = ifp->if_softc;
139 struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
140 struct netmap_adapter *na = NA(sc->rl_ifp);
141 struct netmap_kring *kring = &na->tx_rings[ring_nr];
142 struct netmap_ring *ring = kring->ring;
143 int j, k, l, n, lim = kring->nkr_num_slots - 1;
144
145 k = ring->cur;
146 if (k > lim)
147 return netmap_ring_reinit(kring);
148
149 if (do_lock)
150 RL_LOCK(sc);
151
152 /* Sync the TX descriptor list */
153 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
154 sc->rl_ldata.rl_tx_list_map,
155 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
156
157 /* XXX move after the transmissions */
158 /* record completed transmissions */
159 for (n = 0, l = sc->rl_ldata.rl_tx_considx;
160 l != sc->rl_ldata.rl_tx_prodidx;
161 n++, l = RL_TX_DESC_NXT(sc, l)) {
162 uint32_t cmdstat =
163 le32toh(sc->rl_ldata.rl_tx_list[l].rl_cmdstat);
164 if (cmdstat & RL_TDESC_STAT_OWN)
165 break;
166 }
167 if (n > 0) {
168 sc->rl_ldata.rl_tx_considx = l;
169 sc->rl_ldata.rl_tx_free += n;
170 kring->nr_hwavail += n;
171 }
172
173 /* update avail to what the hardware knows */
174 ring->avail = kring->nr_hwavail;
175
176 j = kring->nr_hwcur;
177 if (j != k) { /* we have new packets to send */
178 n = 0;
179 l = sc->rl_ldata.rl_tx_prodidx;
180 while (j != k) {
181 struct netmap_slot *slot = &ring->slot[j];
182 struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[l];
183 int cmd = slot->len | RL_TDESC_CMD_EOF |
184 RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
185 uint64_t paddr;
186 void *addr = PNMB(slot, &paddr);
187 int len = slot->len;
188
189 if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
190 if (do_lock)
191 RL_UNLOCK(sc);
192 // XXX what about prodidx ?
193 return netmap_ring_reinit(kring);
194 }
195
196 if (l == lim) /* mark end of ring */
197 cmd |= RL_TDESC_CMD_EOR;
198
199 if (slot->flags & NS_BUF_CHANGED) {
200 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
201 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
202 /* buffer has changed, unload and reload map */
203 netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
204 txd[l].tx_dmamap, addr);
205 slot->flags &= ~NS_BUF_CHANGED;
206 }
207 slot->flags &= ~NS_REPORT;
208 desc->rl_cmdstat = htole32(cmd);
209 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
210 txd[l].tx_dmamap, BUS_DMASYNC_PREWRITE);
211 j = (j == lim) ? 0 : j + 1;
212 l = (l == lim) ? 0 : l + 1;
213 n++;
214 }
215 sc->rl_ldata.rl_tx_prodidx = l;
216 kring->nr_hwcur = k;
217
218 /* decrease avail by number of sent packets */
219 ring->avail -= n;
220 kring->nr_hwavail = ring->avail;
221
222 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
223 sc->rl_ldata.rl_tx_list_map,
224 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
225
226 /* start ? */
227 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
228 }
229 if (do_lock)
230 RL_UNLOCK(sc);
231 return 0;
232 }
233
234
235 /*
236 * Reconcile kernel and user view of the receive ring.
237 */
238 static int
239 re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
240 {
241 struct rl_softc *sc = ifp->if_softc;
242 struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
243 struct netmap_adapter *na = NA(sc->rl_ifp);
244 struct netmap_kring *kring = &na->rx_rings[ring_nr];
245 struct netmap_ring *ring = kring->ring;
246 int j, k, l, n, lim = kring->nkr_num_slots - 1;
247
248 k = ring->cur;
249 if (k > lim)
250 return netmap_ring_reinit(kring);
251
252 if (do_lock)
253 RL_LOCK(sc);
254 /* XXX check sync modes */
255 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
256 sc->rl_ldata.rl_rx_list_map,
257 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
258
259 /*
260 * The device uses all the buffers in the ring, so we need
261 * another termination condition in addition to RL_RDESC_STAT_OWN
262 * cleared (all buffers could have it cleared. The easiest one
263 * is to limit the amount of data reported up to 'lim'
264 */
265 l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
266 j = l + kring->nkr_hwofs;
267 for (n = kring->nr_hwavail; n < lim ; n++) {
268 struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l];
269 uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
270 uint32_t total_len;
271
272 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
273 break;
274 total_len = rxstat & sc->rl_rxlenmask;
275 /* XXX subtract crc */
276 total_len = (total_len < 4) ? 0 : total_len - 4;
277 kring->ring->slot[j].len = total_len;
278 /* sync was in re_newbuf() */
279 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
280 rxd[l].rx_dmamap, BUS_DMASYNC_POSTREAD);
281 j = (j == lim) ? 0 : j + 1;
282 l = (l == lim) ? 0 : l + 1;
283 }
284 if (n != kring->nr_hwavail) {
285 sc->rl_ldata.rl_rx_prodidx = l;
286 sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
287 kring->nr_hwavail = n;
288 }
289
290 /* skip past packets that userspace has already processed,
291 * making them available for reception.
292 * advance nr_hwcur and issue a bus_dmamap_sync on the
293 * buffers so it is safe to write to them.
294 * Also increase nr_hwavail
295 */
296 j = kring->nr_hwcur;
297 if (j != k) { /* userspace has read some packets. */
298 n = 0;
299 l = kring->nr_hwcur - kring->nkr_hwofs;
300 if (l < 0)
301 l += lim + 1;
302 while (j != k) {
303 struct netmap_slot *slot = ring->slot + j;
304 struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l];
305 int cmd = na->buff_size | RL_RDESC_CMD_OWN;
306 uint64_t paddr;
307 void *addr = PNMB(slot, &paddr);
308
309 if (addr == netmap_buffer_base) { /* bad buf */
310 if (do_lock)
311 RL_UNLOCK(sc);
312 return netmap_ring_reinit(kring);
313 }
314
315 if (l == lim) /* mark end of ring */
316 cmd |= RL_RDESC_CMD_EOR;
317
318 desc->rl_cmdstat = htole32(cmd);
319 slot->flags &= ~NS_REPORT;
320 if (slot->flags & NS_BUF_CHANGED) {
321 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
322 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
323 netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
324 rxd[l].rx_dmamap, addr);
325 slot->flags &= ~NS_BUF_CHANGED;
326 }
327 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
328 rxd[l].rx_dmamap, BUS_DMASYNC_PREREAD);
329 j = (j == lim) ? 0 : j + 1;
330 l = (l == lim) ? 0 : l + 1;
331 n++;
332 }
333 kring->nr_hwavail -= n;
334 kring->nr_hwcur = k;
335 /* Flush the RX DMA ring */
336
337 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
338 sc->rl_ldata.rl_rx_list_map,
339 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
340 }
341 /* tell userspace that there are new packets */
342 ring->avail = kring->nr_hwavail;
343 if (do_lock)
344 RL_UNLOCK(sc);
345 return 0;
346 }
347
348 /*
349 * Additional routines to init the tx and rx rings.
350 * In other drivers we do that inline in the main code.
351 */
352 static void
353 re_netmap_tx_init(struct rl_softc *sc)
354 {
355 struct rl_txdesc *txd;
356 struct rl_desc *desc;
357 int i, n;
358 struct netmap_adapter *na = NA(sc->rl_ifp);
359 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
360
361 /* slot is NULL if we are not in netmap mode */
362 if (!slot)
363 return;
364 /* in netmap mode, overwrite addresses and maps */
365 txd = sc->rl_ldata.rl_tx_desc;
366 desc = sc->rl_ldata.rl_tx_list;
367 n = sc->rl_ldata.rl_tx_desc_cnt;
368
369 /* l points in the netmap ring, i points in the NIC ring */
370 for (i = 0; i < n; i++) {
371 void *addr;
372 uint64_t paddr;
373 struct netmap_kring *kring = &na->tx_rings[0];
374 int l = i + kring->nkr_hwofs;
375
376 if (l >= n)
377 l -= n;
378
379 addr = PNMB(slot + l, &paddr);
380 desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
381 desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
382 netmap_load_map(sc->rl_ldata.rl_tx_mtag,
383 txd[i].tx_dmamap, addr);
384 }
385 }
386
387 static void
388 re_netmap_rx_init(struct rl_softc *sc)
389 {
390 struct netmap_adapter *na = NA(sc->rl_ifp);
391 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
392 struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
393 uint32_t cmdstat;
394 int i, n;
395
396 if (!slot)
397 return;
398 n = sc->rl_ldata.rl_rx_desc_cnt;
399 for (i = 0; i < n; i++) {
400 void *addr;
401 uint64_t paddr;
402 struct netmap_kring *kring = &na->rx_rings[0];
403 int l = i + kring->nkr_hwofs;
404
405 if (l >= n)
406 l -= n;
407
408 addr = PNMB(slot + l, &paddr);
409
410 netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
411 sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr);
412 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
413 sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD);
414 desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
415 desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
416 cmdstat = na->buff_size;
417 if (i == n - 1)
418 cmdstat |= RL_RDESC_CMD_EOR;
419 /*
420 * userspace knows that hwavail packets were ready before the
421 * reset, so we need to tell the NIC that last hwavail
422 * descriptors of the ring are still owned by the driver.
423 */
424 if (i < n - 1 - kring->nr_hwavail) // XXX + 1 ?
425 cmdstat |= RL_RDESC_CMD_OWN;
426 desc[i].rl_cmdstat = htole32(cmdstat);
427 }
428 }
Cache object: a5add805adbc291f64c10d0d464af46a
|