1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2016-2018
5 * Netflix Inc. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/12.0/sys/netinet/tcp_log_buf.c 332378 2018-04-10 15:51:37Z jtl $");
32
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/queue.h>
39 #include <sys/refcount.h>
40 #include <sys/rwlock.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/sysctl.h>
44 #include <sys/tree.h>
45 #include <sys/counter.h>
46
47 #include <dev/tcp_log/tcp_log_dev.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/vnet.h>
52
53 #include <netinet/in.h>
54 #include <netinet/in_pcb.h>
55 #include <netinet/in_var.h>
56 #include <netinet/tcp_var.h>
57 #include <netinet/tcp_log_buf.h>
58
59 /* Default expiry time */
60 #define TCP_LOG_EXPIRE_TIME ((sbintime_t)60 * SBT_1S)
61
62 /* Max interval at which to run the expiry timer */
63 #define TCP_LOG_EXPIRE_INTVL ((sbintime_t)5 * SBT_1S)
64
65 bool tcp_log_verbose;
66 static uma_zone_t tcp_log_bucket_zone, tcp_log_node_zone, tcp_log_zone;
67 static int tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT;
68 static uint32_t tcp_log_version = TCP_LOG_BUF_VER;
69 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket);
70 static struct tcp_log_id_tree tcp_log_id_head;
71 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head =
72 STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head);
73 static struct mtx tcp_log_expireq_mtx;
74 static struct callout tcp_log_expireq_callout;
75 static u_long tcp_log_auto_ratio = 0;
76 static volatile u_long tcp_log_auto_ratio_cur = 0;
77 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL;
78 static bool tcp_log_auto_all = false;
79
80 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
81
82 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW, 0, "TCP Black Box controls");
83
84 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose,
85 0, "Force verbose logging for TCP traces");
86
87 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit,
88 CTLFLAG_RW, &tcp_log_session_limit, 0,
89 "Maximum number of events maintained for each TCP session");
90
91 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW,
92 &tcp_log_zone, "Maximum number of events maintained for all TCP sessions");
93
94 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD,
95 &tcp_log_zone, "Current number of events maintained for all TCP sessions");
96
97 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW,
98 &tcp_log_bucket_zone, "Maximum number of log IDs");
99
100 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD,
101 &tcp_log_bucket_zone, "Current number of log IDs");
102
103 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW,
104 &tcp_log_node_zone, "Maximum number of tcpcbs with log IDs");
105
106 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD,
107 &tcp_log_node_zone, "Current number of tcpcbs with log IDs");
108
109 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version,
110 0, "Version of log formats exported");
111
112 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW,
113 &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions");
114
115 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW,
116 &tcp_log_auto_mode, TCP_LOG_STATE_HEAD_AUTO,
117 "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_HEAD_AUTO)");
118
119 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW,
120 &tcp_log_auto_all, false,
121 "Auto-select from all sessions (rather than just those with IDs)");
122
123 #ifdef TCPLOG_DEBUG_COUNTERS
124 counter_u64_t tcp_log_queued;
125 counter_u64_t tcp_log_que_fail1;
126 counter_u64_t tcp_log_que_fail2;
127 counter_u64_t tcp_log_que_fail3;
128 counter_u64_t tcp_log_que_fail4;
129 counter_u64_t tcp_log_que_fail5;
130 counter_u64_t tcp_log_que_copyout;
131 counter_u64_t tcp_log_que_read;
132 counter_u64_t tcp_log_que_freed;
133
134 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD,
135 &tcp_log_queued, "Number of entries queued");
136 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD,
137 &tcp_log_que_fail1, "Number of entries queued but fail 1");
138 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD,
139 &tcp_log_que_fail2, "Number of entries queued but fail 2");
140 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD,
141 &tcp_log_que_fail3, "Number of entries queued but fail 3");
142 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD,
143 &tcp_log_que_fail4, "Number of entries queued but fail 4");
144 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD,
145 &tcp_log_que_fail5, "Number of entries queued but fail 4");
146 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD,
147 &tcp_log_que_copyout, "Number of entries copied out");
148 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD,
149 &tcp_log_que_read, "Number of entries read from the queue");
150 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD,
151 &tcp_log_que_freed, "Number of entries freed after reading");
152 #endif
153
154 #ifdef INVARIANTS
155 #define TCPLOG_DEBUG_RINGBUF
156 #endif
157
158 struct tcp_log_mem
159 {
160 STAILQ_ENTRY(tcp_log_mem) tlm_queue;
161 struct tcp_log_buffer tlm_buf;
162 struct tcp_log_verbose tlm_v;
163 #ifdef TCPLOG_DEBUG_RINGBUF
164 volatile int tlm_refcnt;
165 #endif
166 };
167
168 /* 60 bytes for the header, + 16 bytes for padding */
169 static uint8_t zerobuf[76];
170
171 /*
172 * Lock order:
173 * 1. TCPID_TREE
174 * 2. TCPID_BUCKET
175 * 3. INP
176 *
177 * Rules:
178 * A. You need a lock on the Tree to add/remove buckets.
179 * B. You need a lock on the bucket to add/remove nodes from the bucket.
180 * C. To change information in a node, you need the INP lock if the tln_closed
181 * field is false. Otherwise, you need the bucket lock. (Note that the
182 * tln_closed field can change at any point, so you need to recheck the
183 * entry after acquiring the INP lock.)
184 * D. To remove a node from the bucket, you must have that entry locked,
185 * according to the criteria of Rule C. Also, the node must not be on
186 * the expiry queue.
187 * E. The exception to C is the expiry queue fields, which are locked by
188 * the TCPLOG_EXPIREQ lock.
189 *
190 * Buckets have a reference count. Each node is a reference. Further,
191 * other callers may add reference counts to keep a bucket from disappearing.
192 * You can add a reference as long as you own a lock sufficient to keep the
193 * bucket from disappearing. For example, a common use is:
194 * a. Have a locked INP, but need to lock the TCPID_BUCKET.
195 * b. Add a refcount on the bucket. (Safe because the INP lock prevents
196 * the TCPID_BUCKET from going away.)
197 * c. Drop the INP lock.
198 * d. Acquire a lock on the TCPID_BUCKET.
199 * e. Acquire a lock on the INP.
200 * f. Drop the refcount on the bucket.
201 * (At this point, the bucket may disappear.)
202 *
203 * Expire queue lock:
204 * You can acquire this with either the bucket or INP lock. Don't reverse it.
205 * When the expire code has committed to freeing a node, it resets the expiry
206 * time to SBT_MAX. That is the signal to everyone else that they should
207 * leave that node alone.
208 */
209 static struct rwlock tcp_id_tree_lock;
210 #define TCPID_TREE_WLOCK() rw_wlock(&tcp_id_tree_lock)
211 #define TCPID_TREE_RLOCK() rw_rlock(&tcp_id_tree_lock)
212 #define TCPID_TREE_UPGRADE() rw_try_upgrade(&tcp_id_tree_lock)
213 #define TCPID_TREE_WUNLOCK() rw_wunlock(&tcp_id_tree_lock)
214 #define TCPID_TREE_RUNLOCK() rw_runlock(&tcp_id_tree_lock)
215 #define TCPID_TREE_WLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_WLOCKED)
216 #define TCPID_TREE_RLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_RLOCKED)
217 #define TCPID_TREE_UNLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_UNLOCKED)
218
219 #define TCPID_BUCKET_LOCK_INIT(tlb) mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF)
220 #define TCPID_BUCKET_LOCK_DESTROY(tlb) mtx_destroy(&((tlb)->tlb_mtx))
221 #define TCPID_BUCKET_LOCK(tlb) mtx_lock(&((tlb)->tlb_mtx))
222 #define TCPID_BUCKET_UNLOCK(tlb) mtx_unlock(&((tlb)->tlb_mtx))
223 #define TCPID_BUCKET_LOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_OWNED)
224 #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED)
225
226 #define TCPID_BUCKET_REF(tlb) refcount_acquire(&((tlb)->tlb_refcnt))
227 #define TCPID_BUCKET_UNREF(tlb) refcount_release(&((tlb)->tlb_refcnt))
228
229 #define TCPLOG_EXPIREQ_LOCK() mtx_lock(&tcp_log_expireq_mtx)
230 #define TCPLOG_EXPIREQ_UNLOCK() mtx_unlock(&tcp_log_expireq_mtx)
231
232 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node);
233
234 struct tcp_log_id_bucket
235 {
236 /*
237 * tlb_id must be first. This lets us use strcmp on
238 * (struct tcp_log_id_bucket *) and (char *) interchangeably.
239 */
240 char tlb_id[TCP_LOG_ID_LEN];
241 RB_ENTRY(tcp_log_id_bucket) tlb_rb;
242 struct tcp_log_id_head tlb_head;
243 struct mtx tlb_mtx;
244 volatile u_int tlb_refcnt;
245 };
246
247 struct tcp_log_id_node
248 {
249 SLIST_ENTRY(tcp_log_id_node) tln_list;
250 STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */
251 sbintime_t tln_expiretime; /* Locked by the expireq lock */
252
253 /*
254 * If INP is NULL, that means the connection has closed. We've
255 * saved the connection endpoint information and the log entries
256 * in the tln_ie and tln_entries members. We've also saved a pointer
257 * to the enclosing bucket here. If INP is not NULL, the information is
258 * in the PCB and not here.
259 */
260 struct inpcb *tln_inp;
261 struct tcpcb *tln_tp;
262 struct tcp_log_id_bucket *tln_bucket;
263 struct in_endpoints tln_ie;
264 struct tcp_log_stailq tln_entries;
265 int tln_count;
266 volatile int tln_closed;
267 uint8_t tln_af;
268 };
269
270 enum tree_lock_state {
271 TREE_UNLOCKED = 0,
272 TREE_RLOCKED,
273 TREE_WLOCKED,
274 };
275
276 /* Do we want to select this session for auto-logging? */
277 static __inline bool
278 tcp_log_selectauto(void)
279 {
280
281 /*
282 * If we are doing auto-capturing, figure out whether we will capture
283 * this session.
284 */
285 if (tcp_log_auto_ratio &&
286 (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) %
287 tcp_log_auto_ratio) == 0)
288 return (true);
289 return (false);
290 }
291
292 static __inline int
293 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b)
294 {
295 KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL"));
296 KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL"));
297 return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN);
298 }
299
300 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
301
302 static __inline void
303 tcp_log_id_validate_tree_lock(int tree_locked)
304 {
305
306 #ifdef INVARIANTS
307 switch (tree_locked) {
308 case TREE_WLOCKED:
309 TCPID_TREE_WLOCK_ASSERT();
310 break;
311 case TREE_RLOCKED:
312 TCPID_TREE_RLOCK_ASSERT();
313 break;
314 case TREE_UNLOCKED:
315 TCPID_TREE_UNLOCK_ASSERT();
316 break;
317 default:
318 kassert_panic("%s:%d: unknown tree lock state", __func__,
319 __LINE__);
320 }
321 #endif
322 }
323
324 static __inline void
325 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb)
326 {
327
328 TCPID_TREE_WLOCK_ASSERT();
329 KASSERT(SLIST_EMPTY(&tlb->tlb_head),
330 ("%s: Attempt to remove non-empty bucket", __func__));
331 if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) {
332 #ifdef INVARIANTS
333 kassert_panic("%s:%d: error removing element from tree",
334 __func__, __LINE__);
335 #endif
336 }
337 TCPID_BUCKET_LOCK_DESTROY(tlb);
338 uma_zfree(tcp_log_bucket_zone, tlb);
339 }
340
341 /*
342 * Call with a referenced and locked bucket.
343 * Will return true if the bucket was freed; otherwise, false.
344 * tlb: The bucket to unreference.
345 * tree_locked: A pointer to the state of the tree lock. If the tree lock
346 * state changes, the function will update it.
347 * inp: If not NULL and the function needs to drop the inp lock to relock the
348 * tree, it will do so. (The caller must ensure inp will not become invalid,
349 * probably by holding a reference to it.)
350 */
351 static bool
352 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked,
353 struct inpcb *inp)
354 {
355
356 KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__));
357 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
358 __func__));
359
360 tcp_log_id_validate_tree_lock(*tree_locked);
361
362 /*
363 * Did we hold the last reference on the tlb? If so, we may need
364 * to free it. (Note that we can realistically only execute the
365 * loop twice: once without a write lock and once with a write
366 * lock.)
367 */
368 while (TCPID_BUCKET_UNREF(tlb)) {
369 /*
370 * We need a write lock on the tree to free this.
371 * If we can upgrade the tree lock, this is "easy". If we
372 * can't upgrade the tree lock, we need to do this the
373 * "hard" way: unwind all our locks and relock everything.
374 * In the meantime, anything could have changed. We even
375 * need to validate that we still need to free the bucket.
376 */
377 if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE())
378 *tree_locked = TREE_WLOCKED;
379 else if (*tree_locked != TREE_WLOCKED) {
380 TCPID_BUCKET_REF(tlb);
381 if (inp != NULL)
382 INP_WUNLOCK(inp);
383 TCPID_BUCKET_UNLOCK(tlb);
384 if (*tree_locked == TREE_RLOCKED)
385 TCPID_TREE_RUNLOCK();
386 TCPID_TREE_WLOCK();
387 *tree_locked = TREE_WLOCKED;
388 TCPID_BUCKET_LOCK(tlb);
389 if (inp != NULL)
390 INP_WLOCK(inp);
391 continue;
392 }
393
394 /*
395 * We have an empty bucket and a write lock on the tree.
396 * Remove the empty bucket.
397 */
398 tcp_log_remove_bucket(tlb);
399 return (true);
400 }
401 return (false);
402 }
403
404 /*
405 * Call with a locked bucket. This function will release the lock on the
406 * bucket before returning.
407 *
408 * The caller is responsible for freeing the tp->t_lin/tln node!
409 *
410 * Note: one of tp or both tlb and tln must be supplied.
411 *
412 * inp: A pointer to the inp. If the function needs to drop the inp lock to
413 * acquire the tree write lock, it will do so. (The caller must ensure inp
414 * will not become invalid, probably by holding a reference to it.)
415 * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored)
416 * tlb: A pointer to the bucket. (optional; ignored if tp is specified)
417 * tln: A pointer to the node. (optional; ignored if tp is specified)
418 * tree_locked: A pointer to the state of the tree lock. If the tree lock
419 * state changes, the function will update it.
420 *
421 * Will return true if the INP lock was reacquired; otherwise, false.
422 */
423 static bool
424 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp,
425 struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln,
426 int *tree_locked)
427 {
428 int orig_tree_locked;
429
430 KASSERT(tp != NULL || (tlb != NULL && tln != NULL),
431 ("%s: called with tp=%p, tlb=%p, tln=%p", __func__,
432 tp, tlb, tln));
433 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
434 __func__));
435
436 if (tp != NULL) {
437 tlb = tp->t_lib;
438 tln = tp->t_lin;
439 KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__));
440 KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__));
441 }
442
443 tcp_log_id_validate_tree_lock(*tree_locked);
444 TCPID_BUCKET_LOCK_ASSERT(tlb);
445
446 /*
447 * Remove the node, clear the log bucket and node from the TCPCB, and
448 * decrement the bucket refcount. In the process, if this is the
449 * last reference, the bucket will be freed.
450 */
451 SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list);
452 if (tp != NULL) {
453 tp->t_lib = NULL;
454 tp->t_lin = NULL;
455 }
456 orig_tree_locked = *tree_locked;
457 if (!tcp_log_unref_bucket(tlb, tree_locked, inp))
458 TCPID_BUCKET_UNLOCK(tlb);
459 return (*tree_locked != orig_tree_locked);
460 }
461
462 #define RECHECK_INP_CLEAN(cleanup) do { \
463 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { \
464 rv = ECONNRESET; \
465 cleanup; \
466 goto done; \
467 } \
468 tp = intotcpcb(inp); \
469 } while (0)
470
471 #define RECHECK_INP() RECHECK_INP_CLEAN(/* noop */)
472
473 static void
474 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp)
475 {
476
477 INP_WLOCK_ASSERT(tp->t_inpcb);
478
479 #ifdef NETFLIX
480 if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL)
481 (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id));
482 #endif
483 }
484
485 /*
486 * Set the TCP log ID for a TCPCB.
487 * Called with INPCB locked. Returns with it unlocked.
488 */
489 int
490 tcp_log_set_id(struct tcpcb *tp, char *id)
491 {
492 struct tcp_log_id_bucket *tlb, *tmp_tlb;
493 struct tcp_log_id_node *tln;
494 struct inpcb *inp;
495 int tree_locked, rv;
496 bool bucket_locked;
497
498 tlb = NULL;
499 tln = NULL;
500 inp = tp->t_inpcb;
501 tree_locked = TREE_UNLOCKED;
502 bucket_locked = false;
503
504 restart:
505 INP_WLOCK_ASSERT(inp);
506
507 /* See if the ID is unchanged. */
508 if ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) ||
509 (tp->t_lib == NULL && *id == 0)) {
510 rv = 0;
511 goto done;
512 }
513
514 /*
515 * If the TCPCB had a previous ID, we need to extricate it from
516 * the previous list.
517 *
518 * Drop the TCPCB lock and lock the tree and the bucket.
519 * Because this is called in the socket context, we (theoretically)
520 * don't need to worry about the INPCB completely going away
521 * while we are gone.
522 */
523 if (tp->t_lib != NULL) {
524 tlb = tp->t_lib;
525 TCPID_BUCKET_REF(tlb);
526 INP_WUNLOCK(inp);
527
528 if (tree_locked == TREE_UNLOCKED) {
529 TCPID_TREE_RLOCK();
530 tree_locked = TREE_RLOCKED;
531 }
532 TCPID_BUCKET_LOCK(tlb);
533 bucket_locked = true;
534 INP_WLOCK(inp);
535
536 /*
537 * Unreference the bucket. If our bucket went away, it is no
538 * longer locked or valid.
539 */
540 if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) {
541 bucket_locked = false;
542 tlb = NULL;
543 }
544
545 /* Validate the INP. */
546 RECHECK_INP();
547
548 /*
549 * Evaluate whether the bucket changed while we were unlocked.
550 *
551 * Possible scenarios here:
552 * 1. Bucket is unchanged and the same one we started with.
553 * 2. The TCPCB no longer has a bucket and our bucket was
554 * freed.
555 * 3. The TCPCB has a new bucket, whether ours was freed.
556 * 4. The TCPCB no longer has a bucket and our bucket was
557 * not freed.
558 *
559 * In cases 2-4, we will start over. In case 1, we will
560 * proceed here to remove the bucket.
561 */
562 if (tlb == NULL || tp->t_lib != tlb) {
563 KASSERT(bucket_locked || tlb == NULL,
564 ("%s: bucket_locked (%d) and tlb (%p) are "
565 "inconsistent", __func__, bucket_locked, tlb));
566
567 if (bucket_locked) {
568 TCPID_BUCKET_UNLOCK(tlb);
569 bucket_locked = false;
570 tlb = NULL;
571 }
572 goto restart;
573 }
574
575 /*
576 * Store the (struct tcp_log_id_node) for reuse. Then, remove
577 * it from the bucket. In the process, we may end up relocking.
578 * If so, we need to validate that the INP is still valid, and
579 * the TCPCB entries match we expect.
580 *
581 * We will clear tlb and change the bucket_locked state just
582 * before calling tcp_log_remove_id_node(), since that function
583 * will unlock the bucket.
584 */
585 if (tln != NULL)
586 uma_zfree(tcp_log_node_zone, tln);
587 tln = tp->t_lin;
588 tlb = NULL;
589 bucket_locked = false;
590 if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) {
591 RECHECK_INP();
592
593 /*
594 * If the TCPCB moved to a new bucket while we had
595 * dropped the lock, restart.
596 */
597 if (tp->t_lib != NULL || tp->t_lin != NULL)
598 goto restart;
599 }
600
601 /*
602 * Yay! We successfully removed the TCPCB from its old
603 * bucket. Phew!
604 *
605 * On to bigger and better things...
606 */
607 }
608
609 /* At this point, the TCPCB should not be in any bucket. */
610 KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__));
611
612 /*
613 * If the new ID is not empty, we need to now assign this TCPCB to a
614 * new bucket.
615 */
616 if (*id) {
617 /* Get a new tln, if we don't already have one to reuse. */
618 if (tln == NULL) {
619 tln = uma_zalloc(tcp_log_node_zone, M_NOWAIT | M_ZERO);
620 if (tln == NULL) {
621 rv = ENOBUFS;
622 goto done;
623 }
624 tln->tln_inp = inp;
625 tln->tln_tp = tp;
626 }
627
628 /*
629 * Drop the INP lock for a bit. We don't need it, and dropping
630 * it prevents lock order reversals.
631 */
632 INP_WUNLOCK(inp);
633
634 /* Make sure we have at least a read lock on the tree. */
635 tcp_log_id_validate_tree_lock(tree_locked);
636 if (tree_locked == TREE_UNLOCKED) {
637 TCPID_TREE_RLOCK();
638 tree_locked = TREE_RLOCKED;
639 }
640
641 refind:
642 /*
643 * Remember that we constructed (struct tcp_log_id_node) so
644 * we can safely cast the id to it for the purposes of finding.
645 */
646 KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL",
647 __func__, __LINE__));
648 tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head,
649 (struct tcp_log_id_bucket *) id);
650
651 /*
652 * If we didn't find a matching bucket, we need to add a new
653 * one. This requires a write lock. But, of course, we will
654 * need to recheck some things when we re-acquire the lock.
655 */
656 if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) {
657 tree_locked = TREE_WLOCKED;
658 if (!TCPID_TREE_UPGRADE()) {
659 TCPID_TREE_RUNLOCK();
660 TCPID_TREE_WLOCK();
661
662 /*
663 * The tree may have changed while we were
664 * unlocked.
665 */
666 goto refind;
667 }
668 }
669
670 /* If we need to add a new bucket, do it now. */
671 if (tmp_tlb == NULL) {
672 /* Allocate new bucket. */
673 tlb = uma_zalloc(tcp_log_bucket_zone, M_NOWAIT);
674 if (tlb == NULL) {
675 rv = ENOBUFS;
676 goto done_noinp;
677 }
678
679 /*
680 * Copy the ID to the bucket.
681 * NB: Don't use strlcpy() unless you are sure
682 * we've always validated NULL termination.
683 *
684 * TODO: When I'm done writing this, see if we
685 * we have correctly validated NULL termination and
686 * can use strlcpy(). :-)
687 */
688 strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1);
689 tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0';
690
691 /*
692 * Take the refcount for the first node and go ahead
693 * and lock this. Note that we zero the tlb_mtx
694 * structure, since 0xdeadc0de flips the right bits
695 * for the code to think that this mutex has already
696 * been initialized. :-(
697 */
698 SLIST_INIT(&tlb->tlb_head);
699 refcount_init(&tlb->tlb_refcnt, 1);
700 memset(&tlb->tlb_mtx, 0, sizeof(struct mtx));
701 TCPID_BUCKET_LOCK_INIT(tlb);
702 TCPID_BUCKET_LOCK(tlb);
703 bucket_locked = true;
704
705 #define FREE_NEW_TLB() do { \
706 TCPID_BUCKET_LOCK_DESTROY(tlb); \
707 uma_zfree(tcp_log_bucket_zone, tlb); \
708 bucket_locked = false; \
709 tlb = NULL; \
710 } while (0)
711 /*
712 * Relock the INP and make sure we are still
713 * unassigned.
714 */
715 INP_WLOCK(inp);
716 RECHECK_INP_CLEAN(FREE_NEW_TLB());
717 if (tp->t_lib != NULL) {
718 FREE_NEW_TLB();
719 goto restart;
720 }
721
722 /* Add the new bucket to the tree. */
723 tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head,
724 tlb);
725 KASSERT(tmp_tlb == NULL,
726 ("%s: Unexpected conflicting bucket (%p) while "
727 "adding new bucket (%p)", __func__, tmp_tlb, tlb));
728
729 /*
730 * If we found a conflicting bucket, free the new
731 * one we made and fall through to use the existing
732 * bucket.
733 */
734 if (tmp_tlb != NULL) {
735 FREE_NEW_TLB();
736 INP_WUNLOCK(inp);
737 }
738 #undef FREE_NEW_TLB
739 }
740
741 /* If we found an existing bucket, use it. */
742 if (tmp_tlb != NULL) {
743 tlb = tmp_tlb;
744 TCPID_BUCKET_LOCK(tlb);
745 bucket_locked = true;
746
747 /*
748 * Relock the INP and make sure we are still
749 * unassigned.
750 */
751 INP_UNLOCK_ASSERT(inp);
752 INP_WLOCK(inp);
753 RECHECK_INP();
754 if (tp->t_lib != NULL) {
755 TCPID_BUCKET_UNLOCK(tlb);
756 tlb = NULL;
757 goto restart;
758 }
759
760 /* Take a reference on the bucket. */
761 TCPID_BUCKET_REF(tlb);
762 }
763
764 tcp_log_grow_tlb(tlb->tlb_id, tp);
765
766 /* Add the new node to the list. */
767 SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list);
768 tp->t_lib = tlb;
769 tp->t_lin = tln;
770 tln = NULL;
771 }
772
773 rv = 0;
774
775 done:
776 /* Unlock things, as needed, and return. */
777 INP_WUNLOCK(inp);
778 done_noinp:
779 INP_UNLOCK_ASSERT(inp);
780 if (bucket_locked) {
781 TCPID_BUCKET_LOCK_ASSERT(tlb);
782 TCPID_BUCKET_UNLOCK(tlb);
783 } else if (tlb != NULL)
784 TCPID_BUCKET_UNLOCK_ASSERT(tlb);
785 if (tree_locked == TREE_WLOCKED) {
786 TCPID_TREE_WLOCK_ASSERT();
787 TCPID_TREE_WUNLOCK();
788 } else if (tree_locked == TREE_RLOCKED) {
789 TCPID_TREE_RLOCK_ASSERT();
790 TCPID_TREE_RUNLOCK();
791 } else
792 TCPID_TREE_UNLOCK_ASSERT();
793 if (tln != NULL)
794 uma_zfree(tcp_log_node_zone, tln);
795 return (rv);
796 }
797
798 /*
799 * Get the TCP log ID for a TCPCB.
800 * Called with INPCB locked.
801 * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long.
802 * Returns number of bytes copied.
803 */
804 size_t
805 tcp_log_get_id(struct tcpcb *tp, char *buf)
806 {
807 size_t len;
808
809 INP_LOCK_ASSERT(tp->t_inpcb);
810 if (tp->t_lib != NULL) {
811 len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
812 KASSERT(len < TCP_LOG_ID_LEN,
813 ("%s:%d: tp->t_lib->tlb_id too long (%zu)",
814 __func__, __LINE__, len));
815 } else {
816 *buf = '\0';
817 len = 0;
818 }
819 return (len);
820 }
821
822 /*
823 * Get number of connections with the same log ID.
824 * Log ID is taken from given TCPCB.
825 * Called with INPCB locked.
826 */
827 u_int
828 tcp_log_get_id_cnt(struct tcpcb *tp)
829 {
830
831 INP_WLOCK_ASSERT(tp->t_inpcb);
832 return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt);
833 }
834
835 #ifdef TCPLOG_DEBUG_RINGBUF
836 /*
837 * Functions/macros to increment/decrement reference count for a log
838 * entry. This should catch when we do a double-free/double-remove or
839 * a double-add.
840 */
841 static inline void
842 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func,
843 int line)
844 {
845 int refcnt;
846
847 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1);
848 if (refcnt != 0)
849 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)",
850 func, line, log_entry, refcnt);
851 }
852 #define tcp_log_entry_refcnt_add(l) \
853 _tcp_log_entry_refcnt_add((l), __func__, __LINE__)
854
855 static inline void
856 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func,
857 int line)
858 {
859 int refcnt;
860
861 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1);
862 if (refcnt != 1)
863 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)",
864 func, line, log_entry, refcnt);
865 }
866 #define tcp_log_entry_refcnt_rem(l) \
867 _tcp_log_entry_refcnt_rem((l), __func__, __LINE__)
868
869 #else /* !TCPLOG_DEBUG_RINGBUF */
870
871 #define tcp_log_entry_refcnt_add(l)
872 #define tcp_log_entry_refcnt_rem(l)
873
874 #endif
875
876 /*
877 * Cleanup after removing a log entry, but only decrement the count if we
878 * are running INVARIANTS.
879 */
880 static inline void
881 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused)
882 {
883
884 uma_zfree(tcp_log_zone, log_entry);
885 #ifdef INVARIANTS
886 (*count)--;
887 KASSERT(*count >= 0,
888 ("%s: count unexpectedly negative", __func__));
889 #endif
890 }
891
892 static void
893 tcp_log_free_entries(struct tcp_log_stailq *head, int *count)
894 {
895 struct tcp_log_mem *log_entry;
896
897 /* Free the entries. */
898 while ((log_entry = STAILQ_FIRST(head)) != NULL) {
899 STAILQ_REMOVE_HEAD(head, tlm_queue);
900 tcp_log_entry_refcnt_rem(log_entry);
901 tcp_log_free_log_common(log_entry, count);
902 }
903 }
904
905 /* Cleanup after removing a log entry. */
906 static inline void
907 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry)
908 {
909 uma_zfree(tcp_log_zone, log_entry);
910 tp->t_lognum--;
911 KASSERT(tp->t_lognum >= 0,
912 ("%s: tp->t_lognum unexpectedly negative", __func__));
913 }
914
915 /* Remove a log entry from the head of a list. */
916 static inline void
917 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry)
918 {
919
920 KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs),
921 ("%s: attempt to remove non-HEAD log entry", __func__));
922 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
923 tcp_log_entry_refcnt_rem(log_entry);
924 tcp_log_remove_log_cleanup(tp, log_entry);
925 }
926
927 #ifdef TCPLOG_DEBUG_RINGBUF
928 /*
929 * Initialize the log entry's reference count, which we want to
930 * survive allocations.
931 */
932 static int
933 tcp_log_zone_init(void *mem, int size, int flags __unused)
934 {
935 struct tcp_log_mem *tlm;
936
937 KASSERT(size >= sizeof(struct tcp_log_mem),
938 ("%s: unexpectedly short (%d) allocation", __func__, size));
939 tlm = (struct tcp_log_mem *)mem;
940 tlm->tlm_refcnt = 0;
941 return (0);
942 }
943
944 /*
945 * Double check that the refcnt is zero on allocation and return.
946 */
947 static int
948 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused)
949 {
950 struct tcp_log_mem *tlm;
951
952 KASSERT(size >= sizeof(struct tcp_log_mem),
953 ("%s: unexpectedly short (%d) allocation", __func__, size));
954 tlm = (struct tcp_log_mem *)mem;
955 if (tlm->tlm_refcnt != 0)
956 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
957 __func__, __LINE__, tlm, tlm->tlm_refcnt);
958 return (0);
959 }
960
961 static void
962 tcp_log_zone_dtor(void *mem, int size, void *args __unused)
963 {
964 struct tcp_log_mem *tlm;
965
966 KASSERT(size >= sizeof(struct tcp_log_mem),
967 ("%s: unexpectedly short (%d) allocation", __func__, size));
968 tlm = (struct tcp_log_mem *)mem;
969 if (tlm->tlm_refcnt != 0)
970 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
971 __func__, __LINE__, tlm, tlm->tlm_refcnt);
972 }
973 #endif /* TCPLOG_DEBUG_RINGBUF */
974
975 /* Do global initialization. */
976 void
977 tcp_log_init(void)
978 {
979
980 tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem),
981 #ifdef TCPLOG_DEBUG_RINGBUF
982 tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init,
983 #else
984 NULL, NULL, NULL,
985 #endif
986 NULL, UMA_ALIGN_PTR, 0);
987 (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT);
988 tcp_log_bucket_zone = uma_zcreate("tcp_log_bucket",
989 sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL,
990 UMA_ALIGN_PTR, 0);
991 tcp_log_node_zone = uma_zcreate("tcp_log_node",
992 sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL,
993 UMA_ALIGN_PTR, 0);
994 #ifdef TCPLOG_DEBUG_COUNTERS
995 tcp_log_queued = counter_u64_alloc(M_WAITOK);
996 tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK);
997 tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK);
998 tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK);
999 tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK);
1000 tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK);
1001 tcp_log_que_copyout = counter_u64_alloc(M_WAITOK);
1002 tcp_log_que_read = counter_u64_alloc(M_WAITOK);
1003 tcp_log_que_freed = counter_u64_alloc(M_WAITOK);
1004 #endif
1005
1006 rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW);
1007 mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF);
1008 callout_init(&tcp_log_expireq_callout, 1);
1009 }
1010
1011 /* Do per-TCPCB initialization. */
1012 void
1013 tcp_log_tcpcbinit(struct tcpcb *tp)
1014 {
1015
1016 /* A new TCPCB should start out zero-initialized. */
1017 STAILQ_INIT(&tp->t_logs);
1018
1019 /*
1020 * If we are doing auto-capturing, figure out whether we will capture
1021 * this session.
1022 */
1023 if (tcp_log_selectauto()) {
1024 tp->t_logstate = tcp_log_auto_mode;
1025 tp->t_flags2 |= TF2_LOG_AUTO;
1026 }
1027 }
1028
1029
1030 /* Remove entries */
1031 static void
1032 tcp_log_expire(void *unused __unused)
1033 {
1034 struct tcp_log_id_bucket *tlb;
1035 struct tcp_log_id_node *tln;
1036 sbintime_t expiry_limit;
1037 int tree_locked;
1038
1039 TCPLOG_EXPIREQ_LOCK();
1040 if (callout_pending(&tcp_log_expireq_callout)) {
1041 /* Callout was reset. */
1042 TCPLOG_EXPIREQ_UNLOCK();
1043 return;
1044 }
1045
1046 /*
1047 * Process entries until we reach one that expires too far in the
1048 * future. Look one second in the future.
1049 */
1050 expiry_limit = getsbinuptime() + SBT_1S;
1051 tree_locked = TREE_UNLOCKED;
1052
1053 while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL &&
1054 tln->tln_expiretime <= expiry_limit) {
1055 if (!callout_active(&tcp_log_expireq_callout)) {
1056 /*
1057 * Callout was stopped. I guess we should
1058 * just quit at this point.
1059 */
1060 TCPLOG_EXPIREQ_UNLOCK();
1061 return;
1062 }
1063
1064 /*
1065 * Remove the node from the head of the list and unlock
1066 * the list. Change the expiry time to SBT_MAX as a signal
1067 * to other threads that we now own this.
1068 */
1069 STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq);
1070 tln->tln_expiretime = SBT_MAX;
1071 TCPLOG_EXPIREQ_UNLOCK();
1072
1073 /*
1074 * Remove the node from the bucket.
1075 */
1076 tlb = tln->tln_bucket;
1077 TCPID_BUCKET_LOCK(tlb);
1078 if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) {
1079 tcp_log_id_validate_tree_lock(tree_locked);
1080 if (tree_locked == TREE_WLOCKED)
1081 TCPID_TREE_WUNLOCK();
1082 else
1083 TCPID_TREE_RUNLOCK();
1084 tree_locked = TREE_UNLOCKED;
1085 }
1086
1087 /* Drop the INP reference. */
1088 INP_WLOCK(tln->tln_inp);
1089 if (!in_pcbrele_wlocked(tln->tln_inp))
1090 INP_WUNLOCK(tln->tln_inp);
1091
1092 /* Free the log records. */
1093 tcp_log_free_entries(&tln->tln_entries, &tln->tln_count);
1094
1095 /* Free the node. */
1096 uma_zfree(tcp_log_node_zone, tln);
1097
1098 /* Relock the expiry queue. */
1099 TCPLOG_EXPIREQ_LOCK();
1100 }
1101
1102 /*
1103 * We've expired all the entries we can. Do we need to reschedule
1104 * ourselves?
1105 */
1106 callout_deactivate(&tcp_log_expireq_callout);
1107 if (tln != NULL) {
1108 /*
1109 * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and
1110 * set the next callout to that. (This helps ensure we generally
1111 * run the callout no more often than desired.)
1112 */
1113 expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL;
1114 if (expiry_limit < tln->tln_expiretime)
1115 expiry_limit = tln->tln_expiretime;
1116 callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit,
1117 SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE);
1118 }
1119
1120 /* We're done. */
1121 TCPLOG_EXPIREQ_UNLOCK();
1122 return;
1123 }
1124
1125 /*
1126 * Move log data from the TCPCB to a new node. This will reset the TCPCB log
1127 * entries and log count; however, it will not touch other things from the
1128 * TCPCB (e.g. t_lin, t_lib).
1129 *
1130 * NOTE: Must hold a lock on the INP.
1131 */
1132 static void
1133 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln)
1134 {
1135
1136 INP_WLOCK_ASSERT(tp->t_inpcb);
1137
1138 tln->tln_ie = tp->t_inpcb->inp_inc.inc_ie;
1139 if (tp->t_inpcb->inp_inc.inc_flags & INC_ISIPV6)
1140 tln->tln_af = AF_INET6;
1141 else
1142 tln->tln_af = AF_INET;
1143 tln->tln_entries = tp->t_logs;
1144 tln->tln_count = tp->t_lognum;
1145 tln->tln_bucket = tp->t_lib;
1146
1147 /* Clear information from the PCB. */
1148 STAILQ_INIT(&tp->t_logs);
1149 tp->t_lognum = 0;
1150 }
1151
1152 /* Do per-TCPCB cleanup */
1153 void
1154 tcp_log_tcpcbfini(struct tcpcb *tp)
1155 {
1156 struct tcp_log_id_node *tln, *tln_first;
1157 struct tcp_log_mem *log_entry;
1158 sbintime_t callouttime;
1159
1160 INP_WLOCK_ASSERT(tp->t_inpcb);
1161
1162 /*
1163 * If we were gathering packets to be automatically dumped, try to do
1164 * it now. If this succeeds, the log information in the TCPCB will be
1165 * cleared. Otherwise, we'll handle the log information as we do
1166 * for other states.
1167 */
1168 switch(tp->t_logstate) {
1169 case TCP_LOG_STATE_HEAD_AUTO:
1170 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1171 M_NOWAIT, false);
1172 break;
1173 case TCP_LOG_STATE_TAIL_AUTO:
1174 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail",
1175 M_NOWAIT, false);
1176 break;
1177 case TCP_LOG_STATE_CONTINUAL:
1178 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1179 M_NOWAIT, false);
1180 break;
1181 }
1182
1183 /*
1184 * There are two ways we could keep logs: per-socket or per-ID. If
1185 * we are tracking logs with an ID, then the logs survive the
1186 * destruction of the TCPCB.
1187 *
1188 * If the TCPCB is associated with an ID node, move the logs from the
1189 * TCPCB to the ID node. In theory, this is safe, for reasons which I
1190 * will now explain for my own benefit when I next need to figure out
1191 * this code. :-)
1192 *
1193 * We own the INP lock. Therefore, no one else can change the contents
1194 * of this node (Rule C). Further, no one can remove this node from
1195 * the bucket while we hold the lock (Rule D). Basically, no one can
1196 * mess with this node. That leaves two states in which we could be:
1197 *
1198 * 1. Another thread is currently waiting to acquire the INP lock, with
1199 * plans to do something with this node. When we drop the INP lock,
1200 * they will have a chance to do that. They will recheck the
1201 * tln_closed field (see note to Rule C) and then acquire the
1202 * bucket lock before proceeding further.
1203 *
1204 * 2. Another thread will try to acquire a lock at some point in the
1205 * future. If they try to acquire a lock before we set the
1206 * tln_closed field, they will follow state #1. If they try to
1207 * acquire a lock after we set the tln_closed field, they will be
1208 * able to make changes to the node, at will, following Rule C.
1209 *
1210 * Therefore, we currently own this node and can make any changes
1211 * we want. But, as soon as we set the tln_closed field to true, we
1212 * have effectively dropped our lock on the node. (For this reason, we
1213 * also need to make sure our writes are ordered correctly. An atomic
1214 * operation with "release" semantics should be sufficient.)
1215 */
1216
1217 if (tp->t_lin != NULL) {
1218 /* Copy the relevant information to the log entry. */
1219 tln = tp->t_lin;
1220 KASSERT(tln->tln_inp == tp->t_inpcb,
1221 ("%s: Mismatched inp (tln->tln_inp=%p, tp->t_inpcb=%p)",
1222 __func__, tln->tln_inp, tp->t_inpcb));
1223 tcp_log_move_tp_to_node(tp, tln);
1224
1225 /* Clear information from the PCB. */
1226 tp->t_lin = NULL;
1227 tp->t_lib = NULL;
1228
1229 /*
1230 * Take a reference on the INP. This ensures that the INP
1231 * remains valid while the node is on the expiry queue. This
1232 * ensures the INP is valid for other threads that may be
1233 * racing to lock this node when we move it to the expire
1234 * queue.
1235 */
1236 in_pcbref(tp->t_inpcb);
1237
1238 /*
1239 * Store the entry on the expiry list. The exact behavior
1240 * depends on whether we have entries to keep. If so, we
1241 * put the entry at the tail of the list and expire in
1242 * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put
1243 * the entry at the head of the list. (Handling the cleanup
1244 * via the expiry timer lets us avoid locking messy-ness here.)
1245 */
1246 tln->tln_expiretime = getsbinuptime();
1247 TCPLOG_EXPIREQ_LOCK();
1248 if (tln->tln_count) {
1249 tln->tln_expiretime += TCP_LOG_EXPIRE_TIME;
1250 if (STAILQ_EMPTY(&tcp_log_expireq_head) &&
1251 !callout_active(&tcp_log_expireq_callout)) {
1252 /*
1253 * We are adding the first entry and a callout
1254 * is not currently scheduled; therefore, we
1255 * need to schedule one.
1256 */
1257 callout_reset_sbt(&tcp_log_expireq_callout,
1258 tln->tln_expiretime, SBT_1S, tcp_log_expire,
1259 NULL, C_ABSOLUTE);
1260 }
1261 STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln,
1262 tln_expireq);
1263 } else {
1264 callouttime = tln->tln_expiretime +
1265 TCP_LOG_EXPIRE_INTVL;
1266 tln_first = STAILQ_FIRST(&tcp_log_expireq_head);
1267
1268 if ((tln_first == NULL ||
1269 callouttime < tln_first->tln_expiretime) &&
1270 (callout_pending(&tcp_log_expireq_callout) ||
1271 !callout_active(&tcp_log_expireq_callout))) {
1272 /*
1273 * The list is empty, or we want to run the
1274 * expire code before the first entry's timer
1275 * fires. Also, we are in a case where a callout
1276 * is not actively running. We want to reset
1277 * the callout to occur sooner.
1278 */
1279 callout_reset_sbt(&tcp_log_expireq_callout,
1280 callouttime, SBT_1S, tcp_log_expire, NULL,
1281 C_ABSOLUTE);
1282 }
1283
1284 /*
1285 * Insert to the head, or just after the head, as
1286 * appropriate. (This might result in small
1287 * mis-orderings as a bunch of "expire now" entries
1288 * gather at the start of the list, but that should
1289 * not produce big problems, since the expire timer
1290 * will walk through all of them.)
1291 */
1292 if (tln_first == NULL ||
1293 tln->tln_expiretime < tln_first->tln_expiretime)
1294 STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln,
1295 tln_expireq);
1296 else
1297 STAILQ_INSERT_AFTER(&tcp_log_expireq_head,
1298 tln_first, tln, tln_expireq);
1299 }
1300 TCPLOG_EXPIREQ_UNLOCK();
1301
1302 /*
1303 * We are done messing with the tln. After this point, we
1304 * can't touch it. (Note that the "release" semantics should
1305 * be included with the TCPLOG_EXPIREQ_UNLOCK() call above.
1306 * Therefore, they should be unnecessary here. However, it
1307 * seems like a good idea to include them anyway, since we
1308 * really are releasing a lock here.)
1309 */
1310 atomic_store_rel_int(&tln->tln_closed, 1);
1311 } else {
1312 /* Remove log entries. */
1313 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1314 tcp_log_remove_log_head(tp, log_entry);
1315 KASSERT(tp->t_lognum == 0,
1316 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1317 __func__, tp->t_lognum));
1318 }
1319
1320 /*
1321 * Change the log state to off (just in case anything tries to sneak
1322 * in a last-minute log).
1323 */
1324 tp->t_logstate = TCP_LOG_STATE_OFF;
1325 }
1326
1327 /*
1328 * This logs an event for a TCP socket. Normally, this is called via
1329 * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for
1330 * TCP_LOG_EVENT().
1331 */
1332
1333 struct tcp_log_buffer *
1334 tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
1335 struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len,
1336 union tcp_log_stackspecific *stackinfo, int th_hostorder,
1337 const char *output_caller, const char *func, int line, const struct timeval *itv)
1338 {
1339 struct tcp_log_mem *log_entry;
1340 struct tcp_log_buffer *log_buf;
1341 int attempt_count = 0;
1342 struct tcp_log_verbose *log_verbose;
1343 uint32_t logsn;
1344
1345 KASSERT((func == NULL && line == 0) || (func != NULL && line > 0),
1346 ("%s called with inconsistent func (%p) and line (%d) arguments",
1347 __func__, func, line));
1348
1349 INP_WLOCK_ASSERT(tp->t_inpcb);
1350
1351 KASSERT(tp->t_logstate == TCP_LOG_STATE_HEAD ||
1352 tp->t_logstate == TCP_LOG_STATE_TAIL ||
1353 tp->t_logstate == TCP_LOG_STATE_CONTINUAL ||
1354 tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO ||
1355 tp->t_logstate == TCP_LOG_STATE_TAIL_AUTO,
1356 ("%s called with unexpected tp->t_logstate (%d)", __func__,
1357 tp->t_logstate));
1358
1359 /*
1360 * Get the serial number. We do this early so it will
1361 * increment even if we end up skipping the log entry for some
1362 * reason.
1363 */
1364 logsn = tp->t_logsn++;
1365
1366 /*
1367 * Can we get a new log entry? If so, increment the lognum counter
1368 * here.
1369 */
1370 retry:
1371 if (tp->t_lognum < tcp_log_session_limit) {
1372 if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL)
1373 tp->t_lognum++;
1374 } else
1375 log_entry = NULL;
1376
1377 /* Do we need to try to reuse? */
1378 if (log_entry == NULL) {
1379 /*
1380 * Sacrifice auto-logged sessions without a log ID if
1381 * tcp_log_auto_all is false. (If they don't have a log
1382 * ID by now, it is probable that either they won't get one
1383 * or we are resource-constrained.)
1384 */
1385 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
1386 !tcp_log_auto_all) {
1387 if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) {
1388 #ifdef INVARIANTS
1389 panic("%s:%d: tcp_log_state_change() failed "
1390 "to set tp %p to TCP_LOG_STATE_CLEAR",
1391 __func__, __LINE__, tp);
1392 #endif
1393 tp->t_logstate = TCP_LOG_STATE_OFF;
1394 }
1395 return (NULL);
1396 }
1397 /*
1398 * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump
1399 * the buffers. If successful, deactivate tracing. Otherwise,
1400 * leave it active so we will retry.
1401 */
1402 if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO &&
1403 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1404 M_NOWAIT, false)) {
1405 tp->t_logstate = TCP_LOG_STATE_OFF;
1406 return(NULL);
1407 } else if ((tp->t_logstate == TCP_LOG_STATE_CONTINUAL) &&
1408 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1409 M_NOWAIT, false)) {
1410 if (attempt_count == 0) {
1411 attempt_count++;
1412 goto retry;
1413 }
1414 #ifdef TCPLOG_DEBUG_COUNTERS
1415 counter_u64_add(tcp_log_que_fail4, 1);
1416 #endif
1417 return(NULL);
1418 } else if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO)
1419 return(NULL);
1420
1421 /* If in HEAD state, just deactivate the tracing and return. */
1422 if (tp->t_logstate == TCP_LOG_STATE_HEAD) {
1423 tp->t_logstate = TCP_LOG_STATE_OFF;
1424 return(NULL);
1425 }
1426
1427 /*
1428 * Get a buffer to reuse. If that fails, just give up.
1429 * (We can't log anything without a buffer in which to
1430 * put it.)
1431 *
1432 * Note that we don't change the t_lognum counter
1433 * here. Because we are re-using the buffer, the total
1434 * number won't change.
1435 */
1436 if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL)
1437 return(NULL);
1438 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1439 tcp_log_entry_refcnt_rem(log_entry);
1440 }
1441
1442 KASSERT(log_entry != NULL,
1443 ("%s: log_entry unexpectedly NULL", __func__));
1444
1445 /* Extract the log buffer and verbose buffer pointers. */
1446 log_buf = &log_entry->tlm_buf;
1447 log_verbose = &log_entry->tlm_v;
1448
1449 /* Basic entries. */
1450 if (itv == NULL)
1451 getmicrouptime(&log_buf->tlb_tv);
1452 else
1453 memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval));
1454 log_buf->tlb_ticks = ticks;
1455 log_buf->tlb_sn = logsn;
1456 log_buf->tlb_stackid = tp->t_fb->tfb_id;
1457 log_buf->tlb_eventid = eventid;
1458 log_buf->tlb_eventflags = 0;
1459 log_buf->tlb_errno = errornum;
1460
1461 /* Socket buffers */
1462 if (rxbuf != NULL) {
1463 log_buf->tlb_eventflags |= TLB_FLAG_RXBUF;
1464 log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc;
1465 log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc;
1466 log_buf->tlb_rxbuf.tls_sb_spare = 0;
1467 }
1468 if (txbuf != NULL) {
1469 log_buf->tlb_eventflags |= TLB_FLAG_TXBUF;
1470 log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc;
1471 log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc;
1472 log_buf->tlb_txbuf.tls_sb_spare = 0;
1473 }
1474 /* Copy values from tp to the log entry. */
1475 #define COPY_STAT(f) log_buf->tlb_ ## f = tp->f
1476 #define COPY_STAT_T(f) log_buf->tlb_ ## f = tp->t_ ## f
1477 COPY_STAT_T(state);
1478 COPY_STAT_T(starttime);
1479 COPY_STAT(iss);
1480 COPY_STAT_T(flags);
1481 COPY_STAT(snd_una);
1482 COPY_STAT(snd_max);
1483 COPY_STAT(snd_cwnd);
1484 COPY_STAT(snd_nxt);
1485 COPY_STAT(snd_recover);
1486 COPY_STAT(snd_wnd);
1487 COPY_STAT(snd_ssthresh);
1488 COPY_STAT_T(srtt);
1489 COPY_STAT_T(rttvar);
1490 COPY_STAT(rcv_up);
1491 COPY_STAT(rcv_adv);
1492 COPY_STAT(rcv_nxt);
1493 COPY_STAT(sack_newdata);
1494 COPY_STAT(rcv_wnd);
1495 COPY_STAT_T(dupacks);
1496 COPY_STAT_T(segqlen);
1497 COPY_STAT(snd_numholes);
1498 COPY_STAT(snd_scale);
1499 COPY_STAT(rcv_scale);
1500 #undef COPY_STAT
1501 #undef COPY_STAT_T
1502 log_buf->tlb_flex1 = 0;
1503 log_buf->tlb_flex2 = 0;
1504 /* Copy stack-specific info. */
1505 if (stackinfo != NULL) {
1506 memcpy(&log_buf->tlb_stackinfo, stackinfo,
1507 sizeof(log_buf->tlb_stackinfo));
1508 log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO;
1509 }
1510
1511 /* The packet */
1512 log_buf->tlb_len = len;
1513 if (th) {
1514 int optlen;
1515
1516 log_buf->tlb_eventflags |= TLB_FLAG_HDR;
1517 log_buf->tlb_th = *th;
1518 if (th_hostorder)
1519 tcp_fields_to_net(&log_buf->tlb_th);
1520 optlen = (th->th_off << 2) - sizeof (struct tcphdr);
1521 if (optlen > 0)
1522 memcpy(log_buf->tlb_opts, th + 1, optlen);
1523 }
1524
1525 /* Verbose information */
1526 if (func != NULL) {
1527 log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE;
1528 if (output_caller != NULL)
1529 strlcpy(log_verbose->tlv_snd_frm, output_caller,
1530 TCP_FUNC_LEN);
1531 else
1532 *log_verbose->tlv_snd_frm = 0;
1533 strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN);
1534 log_verbose->tlv_trace_line = line;
1535 }
1536
1537 /* Insert the new log at the tail. */
1538 STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue);
1539 tcp_log_entry_refcnt_add(log_entry);
1540 return (log_buf);
1541 }
1542
1543 /*
1544 * Change the logging state for a TCPCB. Returns 0 on success or an
1545 * error code on failure.
1546 */
1547 int
1548 tcp_log_state_change(struct tcpcb *tp, int state)
1549 {
1550 struct tcp_log_mem *log_entry;
1551
1552 INP_WLOCK_ASSERT(tp->t_inpcb);
1553 switch(state) {
1554 case TCP_LOG_STATE_CLEAR:
1555 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1556 tcp_log_remove_log_head(tp, log_entry);
1557 /* Fall through */
1558
1559 case TCP_LOG_STATE_OFF:
1560 tp->t_logstate = TCP_LOG_STATE_OFF;
1561 break;
1562
1563 case TCP_LOG_STATE_TAIL:
1564 case TCP_LOG_STATE_HEAD:
1565 case TCP_LOG_STATE_CONTINUAL:
1566 case TCP_LOG_STATE_HEAD_AUTO:
1567 case TCP_LOG_STATE_TAIL_AUTO:
1568 tp->t_logstate = state;
1569 break;
1570
1571 default:
1572 return (EINVAL);
1573 }
1574
1575 tp->t_flags2 &= ~(TF2_LOG_AUTO);
1576
1577 return (0);
1578 }
1579
1580 /* If tcp_drain() is called, flush half the log entries. */
1581 void
1582 tcp_log_drain(struct tcpcb *tp)
1583 {
1584 struct tcp_log_mem *log_entry, *next;
1585 int target, skip;
1586
1587 INP_WLOCK_ASSERT(tp->t_inpcb);
1588 if ((target = tp->t_lognum / 2) == 0)
1589 return;
1590
1591 /*
1592 * If we are logging the "head" packets, we want to discard
1593 * from the tail of the queue. Otherwise, we want to discard
1594 * from the head.
1595 */
1596 if (tp->t_logstate == TCP_LOG_STATE_HEAD ||
1597 tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) {
1598 skip = tp->t_lognum - target;
1599 STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue)
1600 if (!--skip)
1601 break;
1602 KASSERT(log_entry != NULL,
1603 ("%s: skipped through all entries!", __func__));
1604 if (log_entry == NULL)
1605 return;
1606 while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) {
1607 STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue);
1608 tcp_log_entry_refcnt_rem(next);
1609 tcp_log_remove_log_cleanup(tp, next);
1610 #ifdef INVARIANTS
1611 target--;
1612 #endif
1613 }
1614 KASSERT(target == 0,
1615 ("%s: After removing from tail, target was %d", __func__,
1616 target));
1617 } else if (tp->t_logstate == TCP_LOG_STATE_CONTINUAL) {
1618 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1619 M_NOWAIT, false);
1620 } else {
1621 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL &&
1622 target--)
1623 tcp_log_remove_log_head(tp, log_entry);
1624 KASSERT(target <= 0,
1625 ("%s: After removing from head, target was %d", __func__,
1626 target));
1627 KASSERT(tp->t_lognum > 0,
1628 ("%s: After removing from head, tp->t_lognum was %d",
1629 __func__, target));
1630 KASSERT(log_entry != NULL,
1631 ("%s: After removing from head, the tailq was empty",
1632 __func__));
1633 }
1634 }
1635
1636 static inline int
1637 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len)
1638 {
1639
1640 if (sopt->sopt_td != NULL)
1641 return (copyout(src, dst, len));
1642 bcopy(src, dst, len);
1643 return (0);
1644 }
1645
1646 static int
1647 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp,
1648 struct tcp_log_buffer **end, int count)
1649 {
1650 struct tcp_log_buffer *out_entry;
1651 struct tcp_log_mem *log_entry;
1652 size_t entrysize;
1653 int error;
1654 #ifdef INVARIANTS
1655 int orig_count = count;
1656 #endif
1657
1658 /* Copy the data out. */
1659 error = 0;
1660 out_entry = (struct tcp_log_buffer *) sopt->sopt_val;
1661 STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) {
1662 count--;
1663 KASSERT(count >= 0,
1664 ("%s:%d: Exceeded expected count (%d) processing list %p",
1665 __func__, __LINE__, orig_count, log_tailqp));
1666
1667 #ifdef TCPLOG_DEBUG_COUNTERS
1668 counter_u64_add(tcp_log_que_copyout, 1);
1669 #endif
1670
1671 /*
1672 * Skip copying out the header if it isn't present.
1673 * Instead, copy out zeros (to ensure we don't leak info).
1674 * TODO: Make sure we truly do zero everything we don't
1675 * explicitly set.
1676 */
1677 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)
1678 entrysize = sizeof(struct tcp_log_buffer);
1679 else
1680 entrysize = offsetof(struct tcp_log_buffer, tlb_th);
1681 error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry,
1682 entrysize);
1683 if (error)
1684 break;
1685 if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) {
1686 error = tcp_log_copyout(sopt, zerobuf,
1687 ((uint8_t *)out_entry) + entrysize,
1688 sizeof(struct tcp_log_buffer) - entrysize);
1689 }
1690
1691 /*
1692 * Copy out the verbose bit, if needed. Either way,
1693 * increment the output pointer the correct amount.
1694 */
1695 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) {
1696 error = tcp_log_copyout(sopt, &log_entry->tlm_v,
1697 out_entry->tlb_verbose,
1698 sizeof(struct tcp_log_verbose));
1699 if (error)
1700 break;
1701 out_entry = (struct tcp_log_buffer *)
1702 (((uint8_t *) (out_entry + 1)) +
1703 sizeof(struct tcp_log_verbose));
1704 } else
1705 out_entry++;
1706 }
1707 *end = out_entry;
1708 KASSERT(error || count == 0,
1709 ("%s:%d: Less than expected count (%d) processing list %p"
1710 " (%d remain)", __func__, __LINE__, orig_count,
1711 log_tailqp, count));
1712
1713 return (error);
1714 }
1715
1716 /*
1717 * Copy out the buffer. Note that we do incremental copying, so
1718 * sooptcopyout() won't work. However, the goal is to produce the same
1719 * end result as if we copied in the entire user buffer, updated it,
1720 * and then used sooptcopyout() to copy it out.
1721 *
1722 * NOTE: This should be called with a write lock on the PCB; however,
1723 * the function will drop it after it extracts the data from the TCPCB.
1724 */
1725 int
1726 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp)
1727 {
1728 struct tcp_log_stailq log_tailq;
1729 struct tcp_log_mem *log_entry, *log_next;
1730 struct tcp_log_buffer *out_entry;
1731 struct inpcb *inp;
1732 size_t outsize, entrysize;
1733 int error, outnum;
1734
1735 INP_WLOCK_ASSERT(tp->t_inpcb);
1736 inp = tp->t_inpcb;
1737
1738 /*
1739 * Determine which log entries will fit in the buffer. As an
1740 * optimization, skip this if all the entries will clearly fit
1741 * in the buffer. (However, get an exact size if we are using
1742 * INVARIANTS.)
1743 */
1744 #ifndef INVARIANTS
1745 if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) +
1746 sizeof(struct tcp_log_verbose)) >= tp->t_lognum) {
1747 log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue);
1748 log_next = NULL;
1749 outsize = 0;
1750 outnum = tp->t_lognum;
1751 } else {
1752 #endif
1753 outsize = outnum = 0;
1754 log_entry = NULL;
1755 STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) {
1756 entrysize = sizeof(struct tcp_log_buffer);
1757 if (log_next->tlm_buf.tlb_eventflags &
1758 TLB_FLAG_VERBOSE)
1759 entrysize += sizeof(struct tcp_log_verbose);
1760 if ((sopt->sopt_valsize - outsize) < entrysize)
1761 break;
1762 outsize += entrysize;
1763 outnum++;
1764 log_entry = log_next;
1765 }
1766 KASSERT(outsize <= sopt->sopt_valsize,
1767 ("%s: calculated output size (%zu) greater than available"
1768 "space (%zu)", __func__, outsize, sopt->sopt_valsize));
1769 #ifndef INVARIANTS
1770 }
1771 #endif
1772
1773 /*
1774 * Copy traditional sooptcopyout() behavior: if sopt->sopt_val
1775 * is NULL, silently skip the copy. However, in this case, we
1776 * will leave the list alone and return. Functionally, this
1777 * gives userspace a way to poll for an approximate buffer
1778 * size they will need to get the log entries.
1779 */
1780 if (sopt->sopt_val == NULL) {
1781 INP_WUNLOCK(inp);
1782 if (outsize == 0) {
1783 outsize = outnum * (sizeof(struct tcp_log_buffer) +
1784 sizeof(struct tcp_log_verbose));
1785 }
1786 if (sopt->sopt_valsize > outsize)
1787 sopt->sopt_valsize = outsize;
1788 return (0);
1789 }
1790
1791 /*
1792 * Break apart the list. We'll save the ones we want to copy
1793 * out locally and remove them from the TCPCB list. We can
1794 * then drop the INPCB lock while we do the copyout.
1795 *
1796 * There are roughly three cases:
1797 * 1. There was nothing to copy out. That's easy: drop the
1798 * lock and return.
1799 * 2. We are copying out the entire list. Again, that's easy:
1800 * move the whole list.
1801 * 3. We are copying out a partial list. That's harder. We
1802 * need to update the list book-keeping entries.
1803 */
1804 if (log_entry != NULL && log_next == NULL) {
1805 /* Move entire list. */
1806 KASSERT(outnum == tp->t_lognum,
1807 ("%s:%d: outnum (%d) should match tp->t_lognum (%d)",
1808 __func__, __LINE__, outnum, tp->t_lognum));
1809 log_tailq = tp->t_logs;
1810 tp->t_lognum = 0;
1811 STAILQ_INIT(&tp->t_logs);
1812 } else if (log_entry != NULL) {
1813 /* Move partial list. */
1814 KASSERT(outnum < tp->t_lognum,
1815 ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)",
1816 __func__, __LINE__, outnum, tp->t_lognum));
1817 STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs);
1818 STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue);
1819 KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL,
1820 ("%s:%d: tp->t_logs is unexpectedly shorter than expected"
1821 "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)",
1822 __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum));
1823 STAILQ_NEXT(log_entry, tlm_queue) = NULL;
1824 log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue);
1825 tp->t_lognum -= outnum;
1826 } else
1827 STAILQ_INIT(&log_tailq);
1828
1829 /* Drop the PCB lock. */
1830 INP_WUNLOCK(inp);
1831
1832 /* Copy the data out. */
1833 error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum);
1834
1835 if (error) {
1836 /* Restore list */
1837 INP_WLOCK(inp);
1838 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0) {
1839 tp = intotcpcb(inp);
1840
1841 /* Merge the two lists. */
1842 STAILQ_CONCAT(&log_tailq, &tp->t_logs);
1843 tp->t_logs = log_tailq;
1844 tp->t_lognum += outnum;
1845 }
1846 INP_WUNLOCK(inp);
1847 } else {
1848 /* Sanity check entries */
1849 KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val) ==
1850 outsize, ("%s: Actual output size (%zu) != "
1851 "calculated output size (%zu)", __func__,
1852 (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val),
1853 outsize));
1854
1855 /* Free the entries we just copied out. */
1856 STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) {
1857 tcp_log_entry_refcnt_rem(log_entry);
1858 uma_zfree(tcp_log_zone, log_entry);
1859 }
1860 }
1861
1862 sopt->sopt_valsize = (size_t)((caddr_t)out_entry -
1863 (caddr_t)sopt->sopt_val);
1864 return (error);
1865 }
1866
1867 static void
1868 tcp_log_free_queue(struct tcp_log_dev_queue *param)
1869 {
1870 struct tcp_log_dev_log_queue *entry;
1871
1872 KASSERT(param != NULL, ("%s: called with NULL param", __func__));
1873 if (param == NULL)
1874 return;
1875
1876 entry = (struct tcp_log_dev_log_queue *)param;
1877
1878 /* Free the entries. */
1879 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
1880
1881 /* Free the buffer, if it is allocated. */
1882 if (entry->tldl_common.tldq_buf != NULL)
1883 free(entry->tldl_common.tldq_buf, M_TCPLOGDEV);
1884
1885 /* Free the queue entry. */
1886 free(entry, M_TCPLOGDEV);
1887 }
1888
1889 static struct tcp_log_common_header *
1890 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param)
1891 {
1892 struct tcp_log_dev_log_queue *entry;
1893 struct tcp_log_header *hdr;
1894 uint8_t *end;
1895 struct sockopt sopt;
1896 int error;
1897
1898 entry = (struct tcp_log_dev_log_queue *)param;
1899
1900 /* Take a worst-case guess at space needs. */
1901 sopt.sopt_valsize = sizeof(struct tcp_log_header) +
1902 entry->tldl_count * (sizeof(struct tcp_log_buffer) +
1903 sizeof(struct tcp_log_verbose));
1904 hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT);
1905 if (hdr == NULL) {
1906 #ifdef TCPLOG_DEBUG_COUNTERS
1907 counter_u64_add(tcp_log_que_fail5, entry->tldl_count);
1908 #endif
1909 return (NULL);
1910 }
1911 sopt.sopt_val = hdr + 1;
1912 sopt.sopt_valsize -= sizeof(struct tcp_log_header);
1913 sopt.sopt_td = NULL;
1914
1915 error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries,
1916 (struct tcp_log_buffer **)&end, entry->tldl_count);
1917 if (error) {
1918 free(hdr, M_TCPLOGDEV);
1919 return (NULL);
1920 }
1921
1922 /* Free the entries. */
1923 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
1924 entry->tldl_count = 0;
1925
1926 memset(hdr, 0, sizeof(struct tcp_log_header));
1927 hdr->tlh_version = TCP_LOG_BUF_VER;
1928 hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR;
1929 hdr->tlh_length = end - (uint8_t *)hdr;
1930 hdr->tlh_ie = entry->tldl_ie;
1931 hdr->tlh_af = entry->tldl_af;
1932 getboottime(&hdr->tlh_offset);
1933 strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN);
1934 strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN);
1935 return ((struct tcp_log_common_header *)hdr);
1936 }
1937
1938 /*
1939 * Queue the tcpcb's log buffer for transmission via the log buffer facility.
1940 *
1941 * NOTE: This should be called with a write lock on the PCB.
1942 *
1943 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
1944 * and reacquire the INP lock if it needs to do so.
1945 *
1946 * If force is false, this will only dump auto-logged sessions if
1947 * tcp_log_auto_all is true or if there is a log ID defined for the session.
1948 */
1949 int
1950 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force)
1951 {
1952 struct tcp_log_dev_log_queue *entry;
1953 struct inpcb *inp;
1954 #ifdef TCPLOG_DEBUG_COUNTERS
1955 int num_entries;
1956 #endif
1957
1958 inp = tp->t_inpcb;
1959 INP_WLOCK_ASSERT(inp);
1960
1961 /* If there are no log entries, there is nothing to do. */
1962 if (tp->t_lognum == 0)
1963 return (0);
1964
1965 /* Check for a log ID. */
1966 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
1967 !tcp_log_auto_all && !force) {
1968 struct tcp_log_mem *log_entry;
1969
1970 /*
1971 * We needed a log ID and none was found. Free the log entries
1972 * and return success. Also, cancel further logging. If the
1973 * session doesn't have a log ID by now, we'll assume it isn't
1974 * going to get one.
1975 */
1976 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1977 tcp_log_remove_log_head(tp, log_entry);
1978 KASSERT(tp->t_lognum == 0,
1979 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1980 __func__, tp->t_lognum));
1981 tp->t_logstate = TCP_LOG_STATE_OFF;
1982 return (0);
1983 }
1984
1985 /*
1986 * Allocate memory. If we must wait, we'll need to drop the locks
1987 * and reacquire them (and do all the related business that goes
1988 * along with that).
1989 */
1990 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
1991 M_NOWAIT);
1992 if (entry == NULL && (how & M_NOWAIT)) {
1993 #ifdef TCPLOG_DEBUG_COUNTERS
1994 counter_u64_add(tcp_log_que_fail3, 1);
1995 #endif
1996 return (ENOBUFS);
1997 }
1998 if (entry == NULL) {
1999 INP_WUNLOCK(inp);
2000 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2001 M_TCPLOGDEV, M_WAITOK);
2002 INP_WLOCK(inp);
2003 /*
2004 * Note that this check is slightly overly-restrictive in
2005 * that the TCB can survive either of these events.
2006 * However, there is currently not a good way to ensure
2007 * that is the case. So, if we hit this M_WAIT path, we
2008 * may end up dropping some entries. That seems like a
2009 * small price to pay for safety.
2010 */
2011 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
2012 free(entry, M_TCPLOGDEV);
2013 #ifdef TCPLOG_DEBUG_COUNTERS
2014 counter_u64_add(tcp_log_que_fail2, 1);
2015 #endif
2016 return (ECONNRESET);
2017 }
2018 tp = intotcpcb(inp);
2019 if (tp->t_lognum == 0) {
2020 free(entry, M_TCPLOGDEV);
2021 return (0);
2022 }
2023 }
2024
2025 /* Fill in the unique parts of the queue entry. */
2026 if (tp->t_lib != NULL)
2027 strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
2028 else
2029 strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN);
2030 if (reason != NULL)
2031 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2032 else
2033 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2034 entry->tldl_ie = inp->inp_inc.inc_ie;
2035 if (inp->inp_inc.inc_flags & INC_ISIPV6)
2036 entry->tldl_af = AF_INET6;
2037 else
2038 entry->tldl_af = AF_INET;
2039 entry->tldl_entries = tp->t_logs;
2040 entry->tldl_count = tp->t_lognum;
2041
2042 /* Fill in the common parts of the queue entry. */
2043 entry->tldl_common.tldq_buf = NULL;
2044 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2045 entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2046
2047 /* Clear the log data from the TCPCB. */
2048 #ifdef TCPLOG_DEBUG_COUNTERS
2049 num_entries = tp->t_lognum;
2050 #endif
2051 tp->t_lognum = 0;
2052 STAILQ_INIT(&tp->t_logs);
2053
2054 /* Add the entry. If no one is listening, free the entry. */
2055 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) {
2056 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2057 #ifdef TCPLOG_DEBUG_COUNTERS
2058 counter_u64_add(tcp_log_que_fail1, num_entries);
2059 } else {
2060 counter_u64_add(tcp_log_queued, num_entries);
2061 #endif
2062 }
2063 return (0);
2064 }
2065
2066 /*
2067 * Queue the log_id_node's log buffers for transmission via the log buffer
2068 * facility.
2069 *
2070 * NOTE: This should be called with the bucket locked and referenced.
2071 *
2072 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2073 * and reacquire the bucket lock if it needs to do so. (The caller must
2074 * ensure that the tln is no longer on any lists so no one else will mess
2075 * with this while the lock is dropped!)
2076 */
2077 static int
2078 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how)
2079 {
2080 struct tcp_log_dev_log_queue *entry;
2081 struct tcp_log_id_bucket *tlb;
2082
2083 tlb = tln->tln_bucket;
2084 TCPID_BUCKET_LOCK_ASSERT(tlb);
2085 KASSERT(tlb->tlb_refcnt > 0,
2086 ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)",
2087 __func__, __LINE__, tln, tlb));
2088 KASSERT(tln->tln_closed,
2089 ("%s:%d: Called for node with tln_closed==false (tln=%p)",
2090 __func__, __LINE__, tln));
2091
2092 /* If there are no log entries, there is nothing to do. */
2093 if (tln->tln_count == 0)
2094 return (0);
2095
2096 /*
2097 * Allocate memory. If we must wait, we'll need to drop the locks
2098 * and reacquire them (and do all the related business that goes
2099 * along with that).
2100 */
2101 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2102 M_NOWAIT);
2103 if (entry == NULL && (how & M_NOWAIT))
2104 return (ENOBUFS);
2105 if (entry == NULL) {
2106 TCPID_BUCKET_UNLOCK(tlb);
2107 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2108 M_TCPLOGDEV, M_WAITOK);
2109 TCPID_BUCKET_LOCK(tlb);
2110 }
2111
2112 /* Fill in the common parts of the queue entry.. */
2113 entry->tldl_common.tldq_buf = NULL;
2114 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2115 entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2116
2117 /* Fill in the unique parts of the queue entry. */
2118 strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN);
2119 if (reason != NULL)
2120 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2121 else
2122 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2123 entry->tldl_ie = tln->tln_ie;
2124 entry->tldl_entries = tln->tln_entries;
2125 entry->tldl_count = tln->tln_count;
2126 entry->tldl_af = tln->tln_af;
2127
2128 /* Add the entry. If no one is listening, free the entry. */
2129 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry))
2130 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2131
2132 return (0);
2133 }
2134
2135
2136 /*
2137 * Queue the log buffers for all sessions in a bucket for transmissions via
2138 * the log buffer facility.
2139 *
2140 * NOTE: This should be called with a locked bucket; however, the function
2141 * will drop the lock.
2142 */
2143 #define LOCAL_SAVE 10
2144 static void
2145 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
2146 {
2147 struct tcp_log_id_node local_entries[LOCAL_SAVE];
2148 struct inpcb *inp;
2149 struct tcpcb *tp;
2150 struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln;
2151 int i, num_local_entries, tree_locked;
2152 bool expireq_locked;
2153
2154 TCPID_BUCKET_LOCK_ASSERT(tlb);
2155
2156 /*
2157 * Take a reference on the bucket to keep it from disappearing until
2158 * we are done.
2159 */
2160 TCPID_BUCKET_REF(tlb);
2161
2162 /*
2163 * We'll try to create these without dropping locks. However, we
2164 * might very well need to drop locks to get memory. If that's the
2165 * case, we'll save up to 10 on the stack, and sacrifice the rest.
2166 * (Otherwise, we need to worry about finding our place again in a
2167 * potentially changed list. It just doesn't seem worth the trouble
2168 * to do that.
2169 */
2170 expireq_locked = false;
2171 num_local_entries = 0;
2172 prev_tln = NULL;
2173 tree_locked = TREE_UNLOCKED;
2174 SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) {
2175 /*
2176 * If this isn't associated with a TCPCB, we can pull it off
2177 * the list now. We need to be careful that the expire timer
2178 * hasn't already taken ownership (tln_expiretime == SBT_MAX).
2179 * If so, we let the expire timer code free the data.
2180 */
2181 if (cur_tln->tln_closed) {
2182 no_inp:
2183 /*
2184 * Get the expireq lock so we can get a consistent
2185 * read of tln_expiretime and so we can remove this
2186 * from the expireq.
2187 */
2188 if (!expireq_locked) {
2189 TCPLOG_EXPIREQ_LOCK();
2190 expireq_locked = true;
2191 }
2192
2193 /*
2194 * We ignore entries with tln_expiretime == SBT_MAX.
2195 * The expire timer code already owns those.
2196 */
2197 KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0,
2198 ("%s:%d: node on the expire queue without positive "
2199 "expire time", __func__, __LINE__));
2200 if (cur_tln->tln_expiretime == SBT_MAX) {
2201 prev_tln = cur_tln;
2202 continue;
2203 }
2204
2205 /* Remove the entry from the expireq. */
2206 STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln,
2207 tcp_log_id_node, tln_expireq);
2208
2209 /* Remove the entry from the bucket. */
2210 if (prev_tln != NULL)
2211 SLIST_REMOVE_AFTER(prev_tln, tln_list);
2212 else
2213 SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list);
2214
2215 /*
2216 * Drop the INP and bucket reference counts. Due to
2217 * lock-ordering rules, we need to drop the expire
2218 * queue lock.
2219 */
2220 TCPLOG_EXPIREQ_UNLOCK();
2221 expireq_locked = false;
2222
2223 /* Drop the INP reference. */
2224 INP_WLOCK(cur_tln->tln_inp);
2225 if (!in_pcbrele_wlocked(cur_tln->tln_inp))
2226 INP_WUNLOCK(cur_tln->tln_inp);
2227
2228 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2229 #ifdef INVARIANTS
2230 panic("%s: Bucket refcount unexpectedly 0.",
2231 __func__);
2232 #endif
2233 /*
2234 * Recover as best we can: free the entry we
2235 * own.
2236 */
2237 tcp_log_free_entries(&cur_tln->tln_entries,
2238 &cur_tln->tln_count);
2239 uma_zfree(tcp_log_node_zone, cur_tln);
2240 goto done;
2241 }
2242
2243 if (tcp_log_dump_node_logbuf(cur_tln, reason,
2244 M_NOWAIT)) {
2245 /*
2246 * If we have sapce, save the entries locally.
2247 * Otherwise, free them.
2248 */
2249 if (num_local_entries < LOCAL_SAVE) {
2250 local_entries[num_local_entries] =
2251 *cur_tln;
2252 num_local_entries++;
2253 } else {
2254 tcp_log_free_entries(
2255 &cur_tln->tln_entries,
2256 &cur_tln->tln_count);
2257 }
2258 }
2259
2260 /* No matter what, we are done with the node now. */
2261 uma_zfree(tcp_log_node_zone, cur_tln);
2262
2263 /*
2264 * Because we removed this entry from the list, prev_tln
2265 * (which tracks the previous entry still on the tlb
2266 * list) remains unchanged.
2267 */
2268 continue;
2269 }
2270
2271 /*
2272 * If we get to this point, the session data is still held in
2273 * the TCPCB. So, we need to pull the data out of that.
2274 *
2275 * We will need to drop the expireq lock so we can lock the INP.
2276 * We can then try to extract the data the "easy" way. If that
2277 * fails, we'll save the log entries for later.
2278 */
2279 if (expireq_locked) {
2280 TCPLOG_EXPIREQ_UNLOCK();
2281 expireq_locked = false;
2282 }
2283
2284 /* Lock the INP and then re-check the state. */
2285 inp = cur_tln->tln_inp;
2286 INP_WLOCK(inp);
2287 /*
2288 * If we caught this while it was transitioning, the data
2289 * might have moved from the TCPCB to the tln (signified by
2290 * setting tln_closed to true. If so, treat this like an
2291 * inactive connection.
2292 */
2293 if (cur_tln->tln_closed) {
2294 /*
2295 * It looks like we may have caught this connection
2296 * while it was transitioning from active to inactive.
2297 * Treat this like an inactive connection.
2298 */
2299 INP_WUNLOCK(inp);
2300 goto no_inp;
2301 }
2302
2303 /*
2304 * Try to dump the data from the tp without dropping the lock.
2305 * If this fails, try to save off the data locally.
2306 */
2307 tp = cur_tln->tln_tp;
2308 if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) &&
2309 num_local_entries < LOCAL_SAVE) {
2310 tcp_log_move_tp_to_node(tp,
2311 &local_entries[num_local_entries]);
2312 local_entries[num_local_entries].tln_closed = 1;
2313 KASSERT(local_entries[num_local_entries].tln_bucket ==
2314 tlb, ("%s: %d: bucket mismatch for node %p",
2315 __func__, __LINE__, cur_tln));
2316 num_local_entries++;
2317 }
2318
2319 INP_WUNLOCK(inp);
2320
2321 /*
2322 * We are goint to leave the current tln on the list. It will
2323 * become the previous tln.
2324 */
2325 prev_tln = cur_tln;
2326 }
2327
2328 /* Drop our locks, if any. */
2329 KASSERT(tree_locked == TREE_UNLOCKED,
2330 ("%s: %d: tree unexpectedly locked", __func__, __LINE__));
2331 switch (tree_locked) {
2332 case TREE_WLOCKED:
2333 TCPID_TREE_WUNLOCK();
2334 tree_locked = TREE_UNLOCKED;
2335 break;
2336 case TREE_RLOCKED:
2337 TCPID_TREE_RUNLOCK();
2338 tree_locked = TREE_UNLOCKED;
2339 break;
2340 }
2341 if (expireq_locked) {
2342 TCPLOG_EXPIREQ_UNLOCK();
2343 expireq_locked = false;
2344 }
2345
2346 /*
2347 * Try again for any saved entries. tcp_log_dump_node_logbuf() is
2348 * guaranteed to free the log entries within the node. And, since
2349 * the node itself is on our stack, we don't need to free it.
2350 */
2351 for (i = 0; i < num_local_entries; i++)
2352 tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK);
2353
2354 /* Drop our reference. */
2355 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
2356 TCPID_BUCKET_UNLOCK(tlb);
2357
2358 done:
2359 /* Drop our locks, if any. */
2360 switch (tree_locked) {
2361 case TREE_WLOCKED:
2362 TCPID_TREE_WUNLOCK();
2363 break;
2364 case TREE_RLOCKED:
2365 TCPID_TREE_RUNLOCK();
2366 break;
2367 }
2368 if (expireq_locked)
2369 TCPLOG_EXPIREQ_UNLOCK();
2370 }
2371 #undef LOCAL_SAVE
2372
2373
2374 /*
2375 * Queue the log buffers for all sessions in a bucket for transmissions via
2376 * the log buffer facility.
2377 *
2378 * NOTE: This should be called with a locked INP; however, the function
2379 * will drop the lock.
2380 */
2381 void
2382 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
2383 {
2384 struct tcp_log_id_bucket *tlb;
2385 int tree_locked;
2386
2387 /* Figure out our bucket and lock it. */
2388 INP_WLOCK_ASSERT(tp->t_inpcb);
2389 tlb = tp->t_lib;
2390 if (tlb == NULL) {
2391 /*
2392 * No bucket; treat this like a request to dump a single
2393 * session's traces.
2394 */
2395 (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true);
2396 INP_WUNLOCK(tp->t_inpcb);
2397 return;
2398 }
2399 TCPID_BUCKET_REF(tlb);
2400 INP_WUNLOCK(tp->t_inpcb);
2401 TCPID_BUCKET_LOCK(tlb);
2402
2403 /* If we are the last reference, we have nothing more to do here. */
2404 tree_locked = TREE_UNLOCKED;
2405 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2406 switch (tree_locked) {
2407 case TREE_WLOCKED:
2408 TCPID_TREE_WUNLOCK();
2409 break;
2410 case TREE_RLOCKED:
2411 TCPID_TREE_RUNLOCK();
2412 break;
2413 }
2414 return;
2415 }
2416
2417 /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */
2418 tcp_log_dumpbucketlogs(tlb, reason);
2419 }
2420
2421 /*
2422 * Mark the end of a flow with the current stack. A stack can add
2423 * stack-specific info to this trace event by overriding this
2424 * function (see bbr_log_flowend() for example).
2425 */
2426 void
2427 tcp_log_flowend(struct tcpcb *tp)
2428 {
2429 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2430 struct socket *so = tp->t_inpcb->inp_socket;
2431 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd,
2432 TCP_LOG_FLOWEND, 0, 0, NULL, false);
2433 }
2434 }
2435
Cache object: 2de8029f3cf3b2bad0244992bed9d2cc
|