1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013 Chelsio Communications, Inc.
5 * All rights reserved.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include "opt_inet.h"
34 #include "opt_inet6.h"
35
36 #include <sys/param.h>
37 #include <sys/eventhandler.h>
38 #include <sys/lock.h>
39 #include <sys/types.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sx.h>
44 #include <net/bpf.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_clone.h>
48 #include <net/if_types.h>
49
50 #include "common/common.h"
51 #include "common/t4_msg.h"
52 #include "common/t4_regs.h"
53 #include "t4_ioctl.h"
54
55 /*
56 * Locking notes
57 * =============
58 *
59 * An interface cloner is registered during mod_load and it can be used to
60 * create or destroy the tracing ifnet for an adapter at any time. It is
61 * possible for the cloned interface to outlive the adapter (adapter disappears
62 * in t4_detach but the tracing ifnet may live till mod_unload when removal of
63 * the cloner finally destroys any remaining cloned interfaces). When tracing
64 * filters are active, this ifnet is also receiving data. There are potential
65 * bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
66 * cxgbe_detach/t4_detach, mod_unload.
67 *
68 * a) The driver selects an iq for tracing (sc->traceq) inside a synch op. The
69 * iq is destroyed inside a synch op too (and sc->traceq updated).
70 * b) The cloner looks for an adapter that matches the name of the ifnet it's
71 * been asked to create, starts a synch op on that adapter, and proceeds only
72 * if the adapter has a tracing iq.
73 * c) The cloned ifnet and the adapter are coupled to each other via
74 * ifp->if_softc and sc->ifp. These can be modified only with the global
75 * t4_trace_lock sx as well as the sc->ifp_lock mutex held. Holding either
76 * of these will prevent any change.
77 *
78 * The order in which all the locks involved should be acquired are:
79 * t4_list_lock
80 * adapter lock
81 * (begin synch op and let go of the above two)
82 * t4_trace_lock
83 * sc->ifp_lock
84 */
85
86 static struct sx t4_trace_lock;
87 static const char *t4_cloner_name = "tXnex";
88 static struct if_clone *t4_cloner;
89
90 /* tracer ifnet routines. mostly no-ops. */
91 static void tracer_init(void *);
92 static int tracer_ioctl(struct ifnet *, unsigned long, caddr_t);
93 static int tracer_transmit(struct ifnet *, struct mbuf *);
94 static void tracer_qflush(struct ifnet *);
95 static int tracer_media_change(struct ifnet *);
96 static void tracer_media_status(struct ifnet *, struct ifmediareq *);
97
98 /* match name (request/response) */
99 struct match_rr {
100 const char *name;
101 int lock; /* set to 1 to returned sc locked. */
102 struct adapter *sc;
103 int rc;
104 };
105
106 static void
107 match_name(struct adapter *sc, void *arg)
108 {
109 struct match_rr *mrr = arg;
110
111 if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
112 return;
113
114 KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
115 __func__, mrr->sc, sc, mrr->name));
116
117 mrr->sc = sc;
118 if (mrr->lock)
119 mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
120 else
121 mrr->rc = 0;
122 }
123
124 static int
125 t4_cloner_match(struct if_clone *ifc, const char *name)
126 {
127
128 if (strncmp(name, "t4nex", 5) != 0 &&
129 strncmp(name, "t5nex", 5) != 0 &&
130 strncmp(name, "t6nex", 5) != 0)
131 return (0);
132 if (name[5] < '' || name[5] > '9')
133 return (0);
134 return (1);
135 }
136
137 static int
138 t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
139 {
140 struct match_rr mrr;
141 struct adapter *sc;
142 struct ifnet *ifp;
143 int rc, unit;
144 const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
145
146 mrr.name = name;
147 mrr.lock = 1;
148 mrr.sc = NULL;
149 mrr.rc = ENOENT;
150 t4_iterate(match_name, &mrr);
151
152 if (mrr.rc != 0)
153 return (mrr.rc);
154 sc = mrr.sc;
155
156 KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
157 __func__, name));
158 ASSERT_SYNCHRONIZED_OP(sc);
159
160 sx_xlock(&t4_trace_lock);
161
162 if (sc->ifp != NULL) {
163 rc = EEXIST;
164 goto done;
165 }
166 if (sc->traceq < 0) {
167 rc = EAGAIN;
168 goto done;
169 }
170
171
172 unit = -1;
173 rc = ifc_alloc_unit(ifc, &unit);
174 if (rc != 0)
175 goto done;
176
177 ifp = if_alloc(IFT_ETHER);
178 if (ifp == NULL) {
179 ifc_free_unit(ifc, unit);
180 rc = ENOMEM;
181 goto done;
182 }
183
184 /* Note that if_xname is not <if_dname><if_dunit>. */
185 strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname));
186 ifp->if_dname = t4_cloner_name;
187 ifp->if_dunit = unit;
188 ifp->if_init = tracer_init;
189 ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING;
190 ifp->if_ioctl = tracer_ioctl;
191 ifp->if_transmit = tracer_transmit;
192 ifp->if_qflush = tracer_qflush;
193 ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
194 ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
195 tracer_media_status);
196 ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
197 ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
198 ether_ifattach(ifp, lla);
199
200 mtx_lock(&sc->ifp_lock);
201 ifp->if_softc = sc;
202 sc->ifp = ifp;
203 mtx_unlock(&sc->ifp_lock);
204 done:
205 sx_xunlock(&t4_trace_lock);
206 end_synchronized_op(sc, 0);
207 return (rc);
208 }
209
210 static int
211 t4_cloner_destroy(struct if_clone *ifc, struct ifnet *ifp)
212 {
213 struct adapter *sc;
214 int unit = ifp->if_dunit;
215
216 sx_xlock(&t4_trace_lock);
217 sc = ifp->if_softc;
218 if (sc != NULL) {
219 mtx_lock(&sc->ifp_lock);
220 sc->ifp = NULL;
221 ifp->if_softc = NULL;
222 mtx_unlock(&sc->ifp_lock);
223 ifmedia_removeall(&sc->media);
224 }
225 ether_ifdetach(ifp);
226 if_free(ifp);
227 ifc_free_unit(ifc, unit);
228 sx_xunlock(&t4_trace_lock);
229
230 return (0);
231 }
232
233 void
234 t4_tracer_modload(void)
235 {
236
237 sx_init(&t4_trace_lock, "T4/T5 tracer lock");
238 t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
239 t4_cloner_create, t4_cloner_destroy);
240 }
241
242 void
243 t4_tracer_modunload(void)
244 {
245
246 if (t4_cloner != NULL) {
247 /*
248 * The module is being unloaded so the nexus drivers have
249 * detached. The tracing interfaces can not outlive the nexus
250 * (ifp->if_softc is the nexus) and must have been destroyed
251 * already. XXX: but if_clone is opaque to us and we can't
252 * assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
253 */
254 if_clone_detach(t4_cloner);
255 }
256 sx_destroy(&t4_trace_lock);
257 }
258
259 void
260 t4_tracer_port_detach(struct adapter *sc)
261 {
262
263 sx_xlock(&t4_trace_lock);
264 if (sc->ifp != NULL) {
265 mtx_lock(&sc->ifp_lock);
266 sc->ifp->if_softc = NULL;
267 sc->ifp = NULL;
268 mtx_unlock(&sc->ifp_lock);
269 }
270 ifmedia_removeall(&sc->media);
271 sx_xunlock(&t4_trace_lock);
272 }
273
274 int
275 t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
276 {
277 int rc, i, enabled;
278 struct trace_params tp;
279
280 if (t->idx >= NTRACE) {
281 t->idx = 0xff;
282 t->enabled = 0;
283 t->valid = 0;
284 return (0);
285 }
286
287 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
288 "t4gett");
289 if (rc)
290 return (rc);
291
292 if (hw_off_limits(sc)) {
293 rc = ENXIO;
294 goto done;
295 }
296
297 for (i = t->idx; i < NTRACE; i++) {
298 if (isset(&sc->tracer_valid, t->idx)) {
299 t4_get_trace_filter(sc, &tp, i, &enabled);
300 t->idx = i;
301 t->enabled = enabled;
302 t->valid = 1;
303 memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
304 memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
305 t->tp.snap_len = tp.snap_len;
306 t->tp.min_len = tp.min_len;
307 t->tp.skip_ofst = tp.skip_ofst;
308 t->tp.skip_len = tp.skip_len;
309 t->tp.invert = tp.invert;
310
311 /* convert channel to port iff 0 <= port < 8. */
312 if (tp.port < 4)
313 t->tp.port = sc->chan_map[tp.port];
314 else if (tp.port < 8)
315 t->tp.port = sc->chan_map[tp.port - 4] + 4;
316 else
317 t->tp.port = tp.port;
318
319 goto done;
320 }
321 }
322
323 t->idx = 0xff;
324 t->enabled = 0;
325 t->valid = 0;
326 done:
327 end_synchronized_op(sc, LOCK_HELD);
328
329 return (rc);
330 }
331
332 int
333 t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
334 {
335 int rc;
336 struct trace_params tp, *tpp;
337
338 if (t->idx >= NTRACE)
339 return (EINVAL);
340
341 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
342 "t4sett");
343 if (rc)
344 return (rc);
345
346 if (hw_off_limits(sc)) {
347 rc = ENXIO;
348 goto done;
349 }
350
351 /*
352 * If no tracing filter is specified this time then check if the filter
353 * at the index is valid anyway because it was set previously. If so
354 * then this is a legitimate enable/disable operation.
355 */
356 if (t->valid == 0) {
357 if (isset(&sc->tracer_valid, t->idx))
358 tpp = NULL;
359 else
360 rc = EINVAL;
361 goto done;
362 }
363
364 if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
365 t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
366 t->tp.skip_ofst > M_TFOFFSET) {
367 rc = EINVAL;
368 goto done;
369 }
370
371 memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
372 memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
373 tp.snap_len = t->tp.snap_len;
374 tp.min_len = t->tp.min_len;
375 tp.skip_ofst = t->tp.skip_ofst;
376 tp.skip_len = t->tp.skip_len;
377 tp.invert = !!t->tp.invert;
378
379 /* convert port to channel iff 0 <= port < 8. */
380 if (t->tp.port < 4) {
381 if (sc->port[t->tp.port] == NULL) {
382 rc = EINVAL;
383 goto done;
384 }
385 tp.port = sc->port[t->tp.port]->tx_chan;
386 } else if (t->tp.port < 8) {
387 if (sc->port[t->tp.port - 4] == NULL) {
388 rc = EINVAL;
389 goto done;
390 }
391 tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
392 }
393 tpp = &tp;
394 done:
395 if (rc == 0) {
396 rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
397 if (rc == 0) {
398 if (t->enabled) {
399 setbit(&sc->tracer_valid, t->idx);
400 if (sc->tracer_enabled == 0) {
401 t4_set_reg_field(sc, A_MPS_TRC_CFG,
402 F_TRCEN, F_TRCEN);
403 }
404 setbit(&sc->tracer_enabled, t->idx);
405 } else {
406 clrbit(&sc->tracer_enabled, t->idx);
407 if (sc->tracer_enabled == 0) {
408 t4_set_reg_field(sc, A_MPS_TRC_CFG,
409 F_TRCEN, 0);
410 }
411 }
412 }
413 }
414 end_synchronized_op(sc, LOCK_HELD);
415
416 return (rc);
417 }
418
419 int
420 t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
421 {
422 struct adapter *sc = iq->adapter;
423 struct ifnet *ifp;
424
425 KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
426 rss->opcode));
427
428 mtx_lock(&sc->ifp_lock);
429 ifp = sc->ifp;
430 if (sc->ifp) {
431 m_adj(m, sizeof(struct cpl_trace_pkt));
432 m->m_pkthdr.rcvif = ifp;
433 ETHER_BPF_MTAP(ifp, m);
434 }
435 mtx_unlock(&sc->ifp_lock);
436 m_freem(m);
437
438 return (0);
439 }
440
441 int
442 t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
443 {
444 struct adapter *sc = iq->adapter;
445 struct ifnet *ifp;
446
447 KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
448 rss->opcode));
449
450 mtx_lock(&sc->ifp_lock);
451 ifp = sc->ifp;
452 if (ifp != NULL) {
453 m_adj(m, sizeof(struct cpl_t5_trace_pkt));
454 m->m_pkthdr.rcvif = ifp;
455 ETHER_BPF_MTAP(ifp, m);
456 }
457 mtx_unlock(&sc->ifp_lock);
458 m_freem(m);
459
460 return (0);
461 }
462
463
464 static void
465 tracer_init(void *arg)
466 {
467
468 return;
469 }
470
471 static int
472 tracer_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
473 {
474 int rc = 0;
475 struct adapter *sc;
476 struct ifreq *ifr = (struct ifreq *)data;
477
478 switch (cmd) {
479 case SIOCSIFMTU:
480 case SIOCSIFFLAGS:
481 case SIOCADDMULTI:
482 case SIOCDELMULTI:
483 case SIOCSIFCAP:
484 break;
485 case SIOCSIFMEDIA:
486 case SIOCGIFMEDIA:
487 case SIOCGIFXMEDIA:
488 sx_xlock(&t4_trace_lock);
489 sc = ifp->if_softc;
490 if (sc == NULL)
491 rc = EIO;
492 else
493 rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
494 sx_xunlock(&t4_trace_lock);
495 break;
496 default:
497 rc = ether_ioctl(ifp, cmd, data);
498 }
499
500 return (rc);
501 }
502
503 static int
504 tracer_transmit(struct ifnet *ifp, struct mbuf *m)
505 {
506
507 m_freem(m);
508 return (0);
509 }
510
511 static void
512 tracer_qflush(struct ifnet *ifp)
513 {
514
515 return;
516 }
517
518 static int
519 tracer_media_change(struct ifnet *ifp)
520 {
521
522 return (EOPNOTSUPP);
523 }
524
525 static void
526 tracer_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
527 {
528
529 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
530
531 return;
532 }
Cache object: 8f7bf36e947368fc212f83e159eb9454
|