1 /*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 * promote products derived from this software without specific prior
17 * written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35 /*
36 * Implementation of the `witness' lock verifier. Originally implemented for
37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41 /*
42 * Main Entry: witness
43 * Pronunciation: 'wit-n&s
44 * Function: noun
45 * Etymology: Middle English witnesse, from Old English witnes knowledge,
46 * testimony, witness, from 2wit
47 * Date: before 12th century
48 * 1 : attestation of a fact or event : TESTIMONY
49 * 2 : one that gives evidence; specifically : one who testifies in
50 * a cause or before a judicial tribunal
51 * 3 : one asked to be present at a transaction so as to be able to
52 * testify to its having taken place
53 * 4 : one who has personal knowledge of something
54 * 5 a : something serving as evidence or proof : SIGN
55 * b : public affirmation by word or example of usually
56 * religious faith or conviction <the heroic witness to divine
57 * life -- Pilot>
58 * 6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61 /*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes. Stated another way,
65 * no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl(). Basically, when a thread sleeps, it must release
71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious. Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock. The second
79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute. Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87 #include <sys/cdefs.h>
88 __FBSDID("$FreeBSD: releng/11.1/sys/kern/subr_witness.c 314063 2017-02-22 00:50:36Z markj $");
89
90 #include "opt_ddb.h"
91 #include "opt_hwpmc_hooks.h"
92 #include "opt_stack.h"
93 #include "opt_witness.h"
94
95 #include <sys/param.h>
96 #include <sys/bus.h>
97 #include <sys/kdb.h>
98 #include <sys/kernel.h>
99 #include <sys/ktr.h>
100 #include <sys/lock.h>
101 #include <sys/malloc.h>
102 #include <sys/mutex.h>
103 #include <sys/priv.h>
104 #include <sys/proc.h>
105 #include <sys/sbuf.h>
106 #include <sys/sched.h>
107 #include <sys/stack.h>
108 #include <sys/sysctl.h>
109 #include <sys/syslog.h>
110 #include <sys/systm.h>
111
112 #ifdef DDB
113 #include <ddb/ddb.h>
114 #endif
115
116 #include <machine/stdarg.h>
117
118 #if !defined(DDB) && !defined(STACK)
119 #error "DDB or STACK options are required for WITNESS"
120 #endif
121
122 /* Note that these traces do not work with KTR_ALQ. */
123 #if 0
124 #define KTR_WITNESS KTR_SUBSYS
125 #else
126 #define KTR_WITNESS 0
127 #endif
128
129 #define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */
130 #define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */
131 #define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */
132
133 /* Define this to check for blessed mutexes */
134 #undef BLESSING
135
136 #ifndef WITNESS_COUNT
137 #define WITNESS_COUNT 1536
138 #endif
139 #define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */
140 #define WITNESS_PENDLIST (1024 + MAXCPU)
141
142 /* Allocate 256 KB of stack data space */
143 #define WITNESS_LO_DATA_COUNT 2048
144
145 /* Prime, gives load factor of ~2 at full load */
146 #define WITNESS_LO_HASH_SIZE 1021
147
148 /*
149 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
150 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should
151 * probably be safe for the most part, but it's still a SWAG.
152 */
153 #define LOCK_NCHILDREN 5
154 #define LOCK_CHILDCOUNT 2048
155
156 #define MAX_W_NAME 64
157
158 #define FULLGRAPH_SBUF_SIZE 512
159
160 /*
161 * These flags go in the witness relationship matrix and describe the
162 * relationship between any two struct witness objects.
163 */
164 #define WITNESS_UNRELATED 0x00 /* No lock order relation. */
165 #define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
166 #define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
167 #define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
168 #define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
169 #define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
170 #define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
171 #define WITNESS_RELATED_MASK \
172 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
173 #define WITNESS_REVERSAL 0x10 /* A lock order reversal has been
174 * observed. */
175 #define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
176 #define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
177 #define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
178
179 /* Descendant to ancestor flags */
180 #define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2)
181
182 /* Ancestor to descendant flags */
183 #define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2)
184
185 #define WITNESS_INDEX_ASSERT(i) \
186 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < witness_count)
187
188 static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
189
190 /*
191 * Lock instances. A lock instance is the data associated with a lock while
192 * it is held by witness. For example, a lock instance will hold the
193 * recursion count of a lock. Lock instances are held in lists. Spin locks
194 * are held in a per-cpu list while sleep locks are held in per-thread list.
195 */
196 struct lock_instance {
197 struct lock_object *li_lock;
198 const char *li_file;
199 int li_line;
200 u_int li_flags;
201 };
202
203 /*
204 * A simple list type used to build the list of locks held by a thread
205 * or CPU. We can't simply embed the list in struct lock_object since a
206 * lock may be held by more than one thread if it is a shared lock. Locks
207 * are added to the head of the list, so we fill up each list entry from
208 * "the back" logically. To ease some of the arithmetic, we actually fill
209 * in each list entry the normal way (children[0] then children[1], etc.) but
210 * when we traverse the list we read children[count-1] as the first entry
211 * down to children[0] as the final entry.
212 */
213 struct lock_list_entry {
214 struct lock_list_entry *ll_next;
215 struct lock_instance ll_children[LOCK_NCHILDREN];
216 u_int ll_count;
217 };
218
219 /*
220 * The main witness structure. One of these per named lock type in the system
221 * (for example, "vnode interlock").
222 */
223 struct witness {
224 char w_name[MAX_W_NAME];
225 uint32_t w_index; /* Index in the relationship matrix */
226 struct lock_class *w_class;
227 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
228 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
229 struct witness *w_hash_next; /* Linked list in hash buckets. */
230 const char *w_file; /* File where last acquired */
231 uint32_t w_line; /* Line where last acquired */
232 uint32_t w_refcount;
233 uint16_t w_num_ancestors; /* direct/indirect
234 * ancestor count */
235 uint16_t w_num_descendants; /* direct/indirect
236 * descendant count */
237 int16_t w_ddb_level;
238 unsigned w_displayed:1;
239 unsigned w_reversed:1;
240 };
241
242 STAILQ_HEAD(witness_list, witness);
243
244 /*
245 * The witness hash table. Keys are witness names (const char *), elements are
246 * witness objects (struct witness *).
247 */
248 struct witness_hash {
249 struct witness *wh_array[WITNESS_HASH_SIZE];
250 uint32_t wh_size;
251 uint32_t wh_count;
252 };
253
254 /*
255 * Key type for the lock order data hash table.
256 */
257 struct witness_lock_order_key {
258 uint16_t from;
259 uint16_t to;
260 };
261
262 struct witness_lock_order_data {
263 struct stack wlod_stack;
264 struct witness_lock_order_key wlod_key;
265 struct witness_lock_order_data *wlod_next;
266 };
267
268 /*
269 * The witness lock order data hash table. Keys are witness index tuples
270 * (struct witness_lock_order_key), elements are lock order data objects
271 * (struct witness_lock_order_data).
272 */
273 struct witness_lock_order_hash {
274 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE];
275 u_int wloh_size;
276 u_int wloh_count;
277 };
278
279 #ifdef BLESSING
280 struct witness_blessed {
281 const char *b_lock1;
282 const char *b_lock2;
283 };
284 #endif
285
286 struct witness_pendhelp {
287 const char *wh_type;
288 struct lock_object *wh_lock;
289 };
290
291 struct witness_order_list_entry {
292 const char *w_name;
293 struct lock_class *w_class;
294 };
295
296 /*
297 * Returns 0 if one of the locks is a spin lock and the other is not.
298 * Returns 1 otherwise.
299 */
300 static __inline int
301 witness_lock_type_equal(struct witness *w1, struct witness *w2)
302 {
303
304 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
305 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
306 }
307
308 static __inline int
309 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
310 const struct witness_lock_order_key *b)
311 {
312
313 return (a->from == b->from && a->to == b->to);
314 }
315
316 static int _isitmyx(struct witness *w1, struct witness *w2, int rmask,
317 const char *fname);
318 static void adopt(struct witness *parent, struct witness *child);
319 #ifdef BLESSING
320 static int blessed(struct witness *, struct witness *);
321 #endif
322 static void depart(struct witness *w);
323 static struct witness *enroll(const char *description,
324 struct lock_class *lock_class);
325 static struct lock_instance *find_instance(struct lock_list_entry *list,
326 const struct lock_object *lock);
327 static int isitmychild(struct witness *parent, struct witness *child);
328 static int isitmydescendant(struct witness *parent, struct witness *child);
329 static void itismychild(struct witness *parent, struct witness *child);
330 static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
331 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
332 static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
333 static int sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS);
334 static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
335 #ifdef DDB
336 static void witness_ddb_compute_levels(void);
337 static void witness_ddb_display(int(*)(const char *fmt, ...));
338 static void witness_ddb_display_descendants(int(*)(const char *fmt, ...),
339 struct witness *, int indent);
340 static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
341 struct witness_list *list);
342 static void witness_ddb_level_descendants(struct witness *parent, int l);
343 static void witness_ddb_list(struct thread *td);
344 #endif
345 static void witness_debugger(int cond, const char *msg);
346 static void witness_free(struct witness *m);
347 static struct witness *witness_get(void);
348 static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size);
349 static struct witness *witness_hash_get(const char *key);
350 static void witness_hash_put(struct witness *w);
351 static void witness_init_hash_tables(void);
352 static void witness_increment_graph_generation(void);
353 static void witness_lock_list_free(struct lock_list_entry *lle);
354 static struct lock_list_entry *witness_lock_list_get(void);
355 static int witness_lock_order_add(struct witness *parent,
356 struct witness *child);
357 static int witness_lock_order_check(struct witness *parent,
358 struct witness *child);
359 static struct witness_lock_order_data *witness_lock_order_get(
360 struct witness *parent,
361 struct witness *child);
362 static void witness_list_lock(struct lock_instance *instance,
363 int (*prnt)(const char *fmt, ...));
364 static int witness_output(const char *fmt, ...) __printflike(1, 2);
365 static int witness_voutput(const char *fmt, va_list ap) __printflike(1, 0);
366 static void witness_setflag(struct lock_object *lock, int flag, int set);
367
368 static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
369 "Witness Locking");
370
371 /*
372 * If set to 0, lock order checking is disabled. If set to -1,
373 * witness is completely disabled. Otherwise witness performs full
374 * lock order checking for all locks. At runtime, lock order checking
375 * may be toggled. However, witness cannot be reenabled once it is
376 * completely disabled.
377 */
378 static int witness_watch = 1;
379 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RWTUN | CTLTYPE_INT, NULL, 0,
380 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
381
382 #ifdef KDB
383 /*
384 * When KDB is enabled and witness_kdb is 1, it will cause the system
385 * to drop into kdebug() when:
386 * - a lock hierarchy violation occurs
387 * - locks are held when going to sleep.
388 */
389 #ifdef WITNESS_KDB
390 int witness_kdb = 1;
391 #else
392 int witness_kdb = 0;
393 #endif
394 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, "");
395 #endif /* KDB */
396
397 #if defined(DDB) || defined(KDB)
398 /*
399 * When DDB or KDB is enabled and witness_trace is 1, it will cause the system
400 * to print a stack trace:
401 * - a lock hierarchy violation occurs
402 * - locks are held when going to sleep.
403 */
404 int witness_trace = 1;
405 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, "");
406 #endif /* DDB || KDB */
407
408 #ifdef WITNESS_SKIPSPIN
409 int witness_skipspin = 1;
410 #else
411 int witness_skipspin = 0;
412 #endif
413 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, "");
414
415 int badstack_sbuf_size;
416
417 int witness_count = WITNESS_COUNT;
418 SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
419 &witness_count, 0, "");
420
421 /*
422 * Output channel for witness messages. By default we print to the console.
423 */
424 enum witness_channel {
425 WITNESS_CONSOLE,
426 WITNESS_LOG,
427 WITNESS_NONE,
428 };
429
430 static enum witness_channel witness_channel = WITNESS_CONSOLE;
431 SYSCTL_PROC(_debug_witness, OID_AUTO, output_channel, CTLTYPE_STRING |
432 CTLFLAG_RWTUN, NULL, 0, sysctl_debug_witness_channel, "A",
433 "Output channel for warnings");
434
435 /*
436 * Call this to print out the relations between locks.
437 */
438 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
439 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
440
441 /*
442 * Call this to print out the witness faulty stacks.
443 */
444 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
445 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
446
447 static struct mtx w_mtx;
448
449 /* w_list */
450 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
451 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
452
453 /* w_typelist */
454 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
455 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
456
457 /* lock list */
458 static struct lock_list_entry *w_lock_list_free = NULL;
459 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
460 static u_int pending_cnt;
461
462 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
463 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
464 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
465 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
466 "");
467
468 static struct witness *w_data;
469 static uint8_t **w_rmatrix;
470 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
471 static struct witness_hash w_hash; /* The witness hash table. */
472
473 /* The lock order data hash */
474 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
475 static struct witness_lock_order_data *w_lofree = NULL;
476 static struct witness_lock_order_hash w_lohash;
477 static int w_max_used_index = 0;
478 static unsigned int w_generation = 0;
479 static const char w_notrunning[] = "Witness not running\n";
480 static const char w_stillcold[] = "Witness is still cold\n";
481
482
483 static struct witness_order_list_entry order_lists[] = {
484 /*
485 * sx locks
486 */
487 { "proctree", &lock_class_sx },
488 { "allproc", &lock_class_sx },
489 { "allprison", &lock_class_sx },
490 { NULL, NULL },
491 /*
492 * Various mutexes
493 */
494 { "Giant", &lock_class_mtx_sleep },
495 { "pipe mutex", &lock_class_mtx_sleep },
496 { "sigio lock", &lock_class_mtx_sleep },
497 { "process group", &lock_class_mtx_sleep },
498 { "process lock", &lock_class_mtx_sleep },
499 { "session", &lock_class_mtx_sleep },
500 { "uidinfo hash", &lock_class_rw },
501 #ifdef HWPMC_HOOKS
502 { "pmc-sleep", &lock_class_mtx_sleep },
503 #endif
504 { "time lock", &lock_class_mtx_sleep },
505 { NULL, NULL },
506 /*
507 * umtx
508 */
509 { "umtx lock", &lock_class_mtx_sleep },
510 { NULL, NULL },
511 /*
512 * Sockets
513 */
514 { "accept", &lock_class_mtx_sleep },
515 { "so_snd", &lock_class_mtx_sleep },
516 { "so_rcv", &lock_class_mtx_sleep },
517 { "sellck", &lock_class_mtx_sleep },
518 { NULL, NULL },
519 /*
520 * Routing
521 */
522 { "so_rcv", &lock_class_mtx_sleep },
523 { "radix node head", &lock_class_rw },
524 { "rtentry", &lock_class_mtx_sleep },
525 { "ifaddr", &lock_class_mtx_sleep },
526 { NULL, NULL },
527 /*
528 * IPv4 multicast:
529 * protocol locks before interface locks, after UDP locks.
530 */
531 { "udpinp", &lock_class_rw },
532 { "in_multi_mtx", &lock_class_mtx_sleep },
533 { "igmp_mtx", &lock_class_mtx_sleep },
534 { "if_addr_lock", &lock_class_rw },
535 { NULL, NULL },
536 /*
537 * IPv6 multicast:
538 * protocol locks before interface locks, after UDP locks.
539 */
540 { "udpinp", &lock_class_rw },
541 { "in6_multi_mtx", &lock_class_mtx_sleep },
542 { "mld_mtx", &lock_class_mtx_sleep },
543 { "if_addr_lock", &lock_class_rw },
544 { NULL, NULL },
545 /*
546 * UNIX Domain Sockets
547 */
548 { "unp_link_rwlock", &lock_class_rw },
549 { "unp_list_lock", &lock_class_mtx_sleep },
550 { "unp", &lock_class_mtx_sleep },
551 { "so_snd", &lock_class_mtx_sleep },
552 { NULL, NULL },
553 /*
554 * UDP/IP
555 */
556 { "udp", &lock_class_rw },
557 { "udpinp", &lock_class_rw },
558 { "so_snd", &lock_class_mtx_sleep },
559 { NULL, NULL },
560 /*
561 * TCP/IP
562 */
563 { "tcp", &lock_class_rw },
564 { "tcpinp", &lock_class_rw },
565 { "so_snd", &lock_class_mtx_sleep },
566 { NULL, NULL },
567 /*
568 * BPF
569 */
570 { "bpf global lock", &lock_class_mtx_sleep },
571 { "bpf interface lock", &lock_class_rw },
572 { "bpf cdev lock", &lock_class_mtx_sleep },
573 { NULL, NULL },
574 /*
575 * NFS server
576 */
577 { "nfsd_mtx", &lock_class_mtx_sleep },
578 { "so_snd", &lock_class_mtx_sleep },
579 { NULL, NULL },
580
581 /*
582 * IEEE 802.11
583 */
584 { "802.11 com lock", &lock_class_mtx_sleep},
585 { NULL, NULL },
586 /*
587 * Network drivers
588 */
589 { "network driver", &lock_class_mtx_sleep},
590 { NULL, NULL },
591
592 /*
593 * Netgraph
594 */
595 { "ng_node", &lock_class_mtx_sleep },
596 { "ng_worklist", &lock_class_mtx_sleep },
597 { NULL, NULL },
598 /*
599 * CDEV
600 */
601 { "vm map (system)", &lock_class_mtx_sleep },
602 { "vm pagequeue", &lock_class_mtx_sleep },
603 { "vnode interlock", &lock_class_mtx_sleep },
604 { "cdev", &lock_class_mtx_sleep },
605 { NULL, NULL },
606 /*
607 * VM
608 */
609 { "vm map (user)", &lock_class_sx },
610 { "vm object", &lock_class_rw },
611 { "vm page", &lock_class_mtx_sleep },
612 { "vm pagequeue", &lock_class_mtx_sleep },
613 { "pmap pv global", &lock_class_rw },
614 { "pmap", &lock_class_mtx_sleep },
615 { "pmap pv list", &lock_class_rw },
616 { "vm page free queue", &lock_class_mtx_sleep },
617 { NULL, NULL },
618 /*
619 * kqueue/VFS interaction
620 */
621 { "kqueue", &lock_class_mtx_sleep },
622 { "struct mount mtx", &lock_class_mtx_sleep },
623 { "vnode interlock", &lock_class_mtx_sleep },
624 { NULL, NULL },
625 /*
626 * VFS namecache
627 */
628 { "ncvn", &lock_class_mtx_sleep },
629 { "ncbuc", &lock_class_rw },
630 { "vnode interlock", &lock_class_mtx_sleep },
631 { "ncneg", &lock_class_mtx_sleep },
632 { NULL, NULL },
633 /*
634 * ZFS locking
635 */
636 { "dn->dn_mtx", &lock_class_sx },
637 { "dr->dt.di.dr_mtx", &lock_class_sx },
638 { "db->db_mtx", &lock_class_sx },
639 { NULL, NULL },
640 /*
641 * spin locks
642 */
643 #ifdef SMP
644 { "ap boot", &lock_class_mtx_spin },
645 #endif
646 { "rm.mutex_mtx", &lock_class_mtx_spin },
647 { "sio", &lock_class_mtx_spin },
648 { "scrlock", &lock_class_mtx_spin },
649 #ifdef __i386__
650 { "cy", &lock_class_mtx_spin },
651 #endif
652 #ifdef __sparc64__
653 { "pcib_mtx", &lock_class_mtx_spin },
654 { "rtc_mtx", &lock_class_mtx_spin },
655 #endif
656 { "scc_hwmtx", &lock_class_mtx_spin },
657 { "uart_hwmtx", &lock_class_mtx_spin },
658 { "fast_taskqueue", &lock_class_mtx_spin },
659 { "intr table", &lock_class_mtx_spin },
660 #ifdef HWPMC_HOOKS
661 { "pmc-per-proc", &lock_class_mtx_spin },
662 #endif
663 { "process slock", &lock_class_mtx_spin },
664 { "sleepq chain", &lock_class_mtx_spin },
665 { "rm_spinlock", &lock_class_mtx_spin },
666 { "turnstile chain", &lock_class_mtx_spin },
667 { "turnstile lock", &lock_class_mtx_spin },
668 { "sched lock", &lock_class_mtx_spin },
669 { "td_contested", &lock_class_mtx_spin },
670 { "callout", &lock_class_mtx_spin },
671 { "entropy harvest mutex", &lock_class_mtx_spin },
672 { "syscons video lock", &lock_class_mtx_spin },
673 #ifdef SMP
674 { "smp rendezvous", &lock_class_mtx_spin },
675 #endif
676 #ifdef __powerpc__
677 { "tlb0", &lock_class_mtx_spin },
678 #endif
679 /*
680 * leaf locks
681 */
682 { "intrcnt", &lock_class_mtx_spin },
683 { "icu", &lock_class_mtx_spin },
684 #if defined(SMP) && defined(__sparc64__)
685 { "ipi", &lock_class_mtx_spin },
686 #endif
687 #ifdef __i386__
688 { "allpmaps", &lock_class_mtx_spin },
689 { "descriptor tables", &lock_class_mtx_spin },
690 #endif
691 { "clk", &lock_class_mtx_spin },
692 { "cpuset", &lock_class_mtx_spin },
693 { "mprof lock", &lock_class_mtx_spin },
694 { "zombie lock", &lock_class_mtx_spin },
695 { "ALD Queue", &lock_class_mtx_spin },
696 #if defined(__i386__) || defined(__amd64__)
697 { "pcicfg", &lock_class_mtx_spin },
698 { "NDIS thread lock", &lock_class_mtx_spin },
699 #endif
700 { "tw_osl_io_lock", &lock_class_mtx_spin },
701 { "tw_osl_q_lock", &lock_class_mtx_spin },
702 { "tw_cl_io_lock", &lock_class_mtx_spin },
703 { "tw_cl_intr_lock", &lock_class_mtx_spin },
704 { "tw_cl_gen_lock", &lock_class_mtx_spin },
705 #ifdef HWPMC_HOOKS
706 { "pmc-leaf", &lock_class_mtx_spin },
707 #endif
708 { "blocked lock", &lock_class_mtx_spin },
709 { NULL, NULL },
710 { NULL, NULL }
711 };
712
713 #ifdef BLESSING
714 /*
715 * Pairs of locks which have been blessed
716 * Don't complain about order problems with blessed locks
717 */
718 static struct witness_blessed blessed_list[] = {
719 };
720 #endif
721
722 /*
723 * This global is set to 0 once it becomes safe to use the witness code.
724 */
725 static int witness_cold = 1;
726
727 /*
728 * This global is set to 1 once the static lock orders have been enrolled
729 * so that a warning can be issued for any spin locks enrolled later.
730 */
731 static int witness_spin_warn = 0;
732
733 /* Trim useless garbage from filenames. */
734 static const char *
735 fixup_filename(const char *file)
736 {
737
738 if (file == NULL)
739 return (NULL);
740 while (strncmp(file, "../", 3) == 0)
741 file += 3;
742 return (file);
743 }
744
745 /*
746 * The WITNESS-enabled diagnostic code. Note that the witness code does
747 * assume that the early boot is single-threaded at least until after this
748 * routine is completed.
749 */
750 static void
751 witness_initialize(void *dummy __unused)
752 {
753 struct lock_object *lock;
754 struct witness_order_list_entry *order;
755 struct witness *w, *w1;
756 int i;
757
758 w_data = malloc(sizeof (struct witness) * witness_count, M_WITNESS,
759 M_WAITOK | M_ZERO);
760
761 w_rmatrix = malloc(sizeof(*w_rmatrix) * (witness_count + 1),
762 M_WITNESS, M_WAITOK | M_ZERO);
763
764 for (i = 0; i < witness_count + 1; i++) {
765 w_rmatrix[i] = malloc(sizeof(*w_rmatrix[i]) *
766 (witness_count + 1), M_WITNESS, M_WAITOK | M_ZERO);
767 }
768 badstack_sbuf_size = witness_count * 256;
769
770 /*
771 * We have to release Giant before initializing its witness
772 * structure so that WITNESS doesn't get confused.
773 */
774 mtx_unlock(&Giant);
775 mtx_assert(&Giant, MA_NOTOWNED);
776
777 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
778 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
779 MTX_NOWITNESS | MTX_NOPROFILE);
780 for (i = witness_count - 1; i >= 0; i--) {
781 w = &w_data[i];
782 memset(w, 0, sizeof(*w));
783 w_data[i].w_index = i; /* Witness index never changes. */
784 witness_free(w);
785 }
786 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
787 ("%s: Invalid list of free witness objects", __func__));
788
789 /* Witness with index 0 is not used to aid in debugging. */
790 STAILQ_REMOVE_HEAD(&w_free, w_list);
791 w_free_cnt--;
792
793 for (i = 0; i < witness_count; i++) {
794 memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
795 (witness_count + 1));
796 }
797
798 for (i = 0; i < LOCK_CHILDCOUNT; i++)
799 witness_lock_list_free(&w_locklistdata[i]);
800 witness_init_hash_tables();
801
802 /* First add in all the specified order lists. */
803 for (order = order_lists; order->w_name != NULL; order++) {
804 w = enroll(order->w_name, order->w_class);
805 if (w == NULL)
806 continue;
807 w->w_file = "order list";
808 for (order++; order->w_name != NULL; order++) {
809 w1 = enroll(order->w_name, order->w_class);
810 if (w1 == NULL)
811 continue;
812 w1->w_file = "order list";
813 itismychild(w, w1);
814 w = w1;
815 }
816 }
817 witness_spin_warn = 1;
818
819 /* Iterate through all locks and add them to witness. */
820 for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
821 lock = pending_locks[i].wh_lock;
822 KASSERT(lock->lo_flags & LO_WITNESS,
823 ("%s: lock %s is on pending list but not LO_WITNESS",
824 __func__, lock->lo_name));
825 lock->lo_witness = enroll(pending_locks[i].wh_type,
826 LOCK_CLASS(lock));
827 }
828
829 /* Mark the witness code as being ready for use. */
830 witness_cold = 0;
831
832 mtx_lock(&Giant);
833 }
834 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
835 NULL);
836
837 void
838 witness_init(struct lock_object *lock, const char *type)
839 {
840 struct lock_class *class;
841
842 /* Various sanity checks. */
843 class = LOCK_CLASS(lock);
844 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
845 (class->lc_flags & LC_RECURSABLE) == 0)
846 kassert_panic("%s: lock (%s) %s can not be recursable",
847 __func__, class->lc_name, lock->lo_name);
848 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
849 (class->lc_flags & LC_SLEEPABLE) == 0)
850 kassert_panic("%s: lock (%s) %s can not be sleepable",
851 __func__, class->lc_name, lock->lo_name);
852 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
853 (class->lc_flags & LC_UPGRADABLE) == 0)
854 kassert_panic("%s: lock (%s) %s can not be upgradable",
855 __func__, class->lc_name, lock->lo_name);
856
857 /*
858 * If we shouldn't watch this lock, then just clear lo_witness.
859 * Otherwise, if witness_cold is set, then it is too early to
860 * enroll this lock, so defer it to witness_initialize() by adding
861 * it to the pending_locks list. If it is not too early, then enroll
862 * the lock now.
863 */
864 if (witness_watch < 1 || panicstr != NULL ||
865 (lock->lo_flags & LO_WITNESS) == 0)
866 lock->lo_witness = NULL;
867 else if (witness_cold) {
868 pending_locks[pending_cnt].wh_lock = lock;
869 pending_locks[pending_cnt++].wh_type = type;
870 if (pending_cnt > WITNESS_PENDLIST)
871 panic("%s: pending locks list is too small, "
872 "increase WITNESS_PENDLIST\n",
873 __func__);
874 } else
875 lock->lo_witness = enroll(type, class);
876 }
877
878 void
879 witness_destroy(struct lock_object *lock)
880 {
881 struct lock_class *class;
882 struct witness *w;
883
884 class = LOCK_CLASS(lock);
885
886 if (witness_cold)
887 panic("lock (%s) %s destroyed while witness_cold",
888 class->lc_name, lock->lo_name);
889
890 /* XXX: need to verify that no one holds the lock */
891 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
892 return;
893 w = lock->lo_witness;
894
895 mtx_lock_spin(&w_mtx);
896 MPASS(w->w_refcount > 0);
897 w->w_refcount--;
898
899 if (w->w_refcount == 0)
900 depart(w);
901 mtx_unlock_spin(&w_mtx);
902 }
903
904 #ifdef DDB
905 static void
906 witness_ddb_compute_levels(void)
907 {
908 struct witness *w;
909
910 /*
911 * First clear all levels.
912 */
913 STAILQ_FOREACH(w, &w_all, w_list)
914 w->w_ddb_level = -1;
915
916 /*
917 * Look for locks with no parents and level all their descendants.
918 */
919 STAILQ_FOREACH(w, &w_all, w_list) {
920
921 /* If the witness has ancestors (is not a root), skip it. */
922 if (w->w_num_ancestors > 0)
923 continue;
924 witness_ddb_level_descendants(w, 0);
925 }
926 }
927
928 static void
929 witness_ddb_level_descendants(struct witness *w, int l)
930 {
931 int i;
932
933 if (w->w_ddb_level >= l)
934 return;
935
936 w->w_ddb_level = l;
937 l++;
938
939 for (i = 1; i <= w_max_used_index; i++) {
940 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
941 witness_ddb_level_descendants(&w_data[i], l);
942 }
943 }
944
945 static void
946 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
947 struct witness *w, int indent)
948 {
949 int i;
950
951 for (i = 0; i < indent; i++)
952 prnt(" ");
953 prnt("%s (type: %s, depth: %d, active refs: %d)",
954 w->w_name, w->w_class->lc_name,
955 w->w_ddb_level, w->w_refcount);
956 if (w->w_displayed) {
957 prnt(" -- (already displayed)\n");
958 return;
959 }
960 w->w_displayed = 1;
961 if (w->w_file != NULL && w->w_line != 0)
962 prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
963 w->w_line);
964 else
965 prnt(" -- never acquired\n");
966 indent++;
967 WITNESS_INDEX_ASSERT(w->w_index);
968 for (i = 1; i <= w_max_used_index; i++) {
969 if (db_pager_quit)
970 return;
971 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
972 witness_ddb_display_descendants(prnt, &w_data[i],
973 indent);
974 }
975 }
976
977 static void
978 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
979 struct witness_list *list)
980 {
981 struct witness *w;
982
983 STAILQ_FOREACH(w, list, w_typelist) {
984 if (w->w_file == NULL || w->w_ddb_level > 0)
985 continue;
986
987 /* This lock has no anscestors - display its descendants. */
988 witness_ddb_display_descendants(prnt, w, 0);
989 if (db_pager_quit)
990 return;
991 }
992 }
993
994 static void
995 witness_ddb_display(int(*prnt)(const char *fmt, ...))
996 {
997 struct witness *w;
998
999 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1000 witness_ddb_compute_levels();
1001
1002 /* Clear all the displayed flags. */
1003 STAILQ_FOREACH(w, &w_all, w_list)
1004 w->w_displayed = 0;
1005
1006 /*
1007 * First, handle sleep locks which have been acquired at least
1008 * once.
1009 */
1010 prnt("Sleep locks:\n");
1011 witness_ddb_display_list(prnt, &w_sleep);
1012 if (db_pager_quit)
1013 return;
1014
1015 /*
1016 * Now do spin locks which have been acquired at least once.
1017 */
1018 prnt("\nSpin locks:\n");
1019 witness_ddb_display_list(prnt, &w_spin);
1020 if (db_pager_quit)
1021 return;
1022
1023 /*
1024 * Finally, any locks which have not been acquired yet.
1025 */
1026 prnt("\nLocks which were never acquired:\n");
1027 STAILQ_FOREACH(w, &w_all, w_list) {
1028 if (w->w_file != NULL || w->w_refcount == 0)
1029 continue;
1030 prnt("%s (type: %s, depth: %d)\n", w->w_name,
1031 w->w_class->lc_name, w->w_ddb_level);
1032 if (db_pager_quit)
1033 return;
1034 }
1035 }
1036 #endif /* DDB */
1037
1038 int
1039 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1040 {
1041
1042 if (witness_watch == -1 || panicstr != NULL)
1043 return (0);
1044
1045 /* Require locks that witness knows about. */
1046 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1047 lock2->lo_witness == NULL)
1048 return (EINVAL);
1049
1050 mtx_assert(&w_mtx, MA_NOTOWNED);
1051 mtx_lock_spin(&w_mtx);
1052
1053 /*
1054 * If we already have either an explicit or implied lock order that
1055 * is the other way around, then return an error.
1056 */
1057 if (witness_watch &&
1058 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1059 mtx_unlock_spin(&w_mtx);
1060 return (EDOOFUS);
1061 }
1062
1063 /* Try to add the new order. */
1064 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1065 lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1066 itismychild(lock1->lo_witness, lock2->lo_witness);
1067 mtx_unlock_spin(&w_mtx);
1068 return (0);
1069 }
1070
1071 void
1072 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1073 int line, struct lock_object *interlock)
1074 {
1075 struct lock_list_entry *lock_list, *lle;
1076 struct lock_instance *lock1, *lock2, *plock;
1077 struct lock_class *class, *iclass;
1078 struct witness *w, *w1;
1079 struct thread *td;
1080 int i, j;
1081
1082 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1083 panicstr != NULL)
1084 return;
1085
1086 w = lock->lo_witness;
1087 class = LOCK_CLASS(lock);
1088 td = curthread;
1089
1090 if (class->lc_flags & LC_SLEEPLOCK) {
1091
1092 /*
1093 * Since spin locks include a critical section, this check
1094 * implicitly enforces a lock order of all sleep locks before
1095 * all spin locks.
1096 */
1097 if (td->td_critnest != 0 && !kdb_active)
1098 kassert_panic("acquiring blockable sleep lock with "
1099 "spinlock or critical section held (%s) %s @ %s:%d",
1100 class->lc_name, lock->lo_name,
1101 fixup_filename(file), line);
1102
1103 /*
1104 * If this is the first lock acquired then just return as
1105 * no order checking is needed.
1106 */
1107 lock_list = td->td_sleeplocks;
1108 if (lock_list == NULL || lock_list->ll_count == 0)
1109 return;
1110 } else {
1111
1112 /*
1113 * If this is the first lock, just return as no order
1114 * checking is needed. Avoid problems with thread
1115 * migration pinning the thread while checking if
1116 * spinlocks are held. If at least one spinlock is held
1117 * the thread is in a safe path and it is allowed to
1118 * unpin it.
1119 */
1120 sched_pin();
1121 lock_list = PCPU_GET(spinlocks);
1122 if (lock_list == NULL || lock_list->ll_count == 0) {
1123 sched_unpin();
1124 return;
1125 }
1126 sched_unpin();
1127 }
1128
1129 /*
1130 * Check to see if we are recursing on a lock we already own. If
1131 * so, make sure that we don't mismatch exclusive and shared lock
1132 * acquires.
1133 */
1134 lock1 = find_instance(lock_list, lock);
1135 if (lock1 != NULL) {
1136 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1137 (flags & LOP_EXCLUSIVE) == 0) {
1138 witness_output("shared lock of (%s) %s @ %s:%d\n",
1139 class->lc_name, lock->lo_name,
1140 fixup_filename(file), line);
1141 witness_output("while exclusively locked from %s:%d\n",
1142 fixup_filename(lock1->li_file), lock1->li_line);
1143 kassert_panic("excl->share");
1144 }
1145 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1146 (flags & LOP_EXCLUSIVE) != 0) {
1147 witness_output("exclusive lock of (%s) %s @ %s:%d\n",
1148 class->lc_name, lock->lo_name,
1149 fixup_filename(file), line);
1150 witness_output("while share locked from %s:%d\n",
1151 fixup_filename(lock1->li_file), lock1->li_line);
1152 kassert_panic("share->excl");
1153 }
1154 return;
1155 }
1156
1157 /* Warn if the interlock is not locked exactly once. */
1158 if (interlock != NULL) {
1159 iclass = LOCK_CLASS(interlock);
1160 lock1 = find_instance(lock_list, interlock);
1161 if (lock1 == NULL)
1162 kassert_panic("interlock (%s) %s not locked @ %s:%d",
1163 iclass->lc_name, interlock->lo_name,
1164 fixup_filename(file), line);
1165 else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1166 kassert_panic("interlock (%s) %s recursed @ %s:%d",
1167 iclass->lc_name, interlock->lo_name,
1168 fixup_filename(file), line);
1169 }
1170
1171 /*
1172 * Find the previously acquired lock, but ignore interlocks.
1173 */
1174 plock = &lock_list->ll_children[lock_list->ll_count - 1];
1175 if (interlock != NULL && plock->li_lock == interlock) {
1176 if (lock_list->ll_count > 1)
1177 plock =
1178 &lock_list->ll_children[lock_list->ll_count - 2];
1179 else {
1180 lle = lock_list->ll_next;
1181
1182 /*
1183 * The interlock is the only lock we hold, so
1184 * simply return.
1185 */
1186 if (lle == NULL)
1187 return;
1188 plock = &lle->ll_children[lle->ll_count - 1];
1189 }
1190 }
1191
1192 /*
1193 * Try to perform most checks without a lock. If this succeeds we
1194 * can skip acquiring the lock and return success. Otherwise we redo
1195 * the check with the lock held to handle races with concurrent updates.
1196 */
1197 w1 = plock->li_lock->lo_witness;
1198 if (witness_lock_order_check(w1, w))
1199 return;
1200
1201 mtx_lock_spin(&w_mtx);
1202 if (witness_lock_order_check(w1, w)) {
1203 mtx_unlock_spin(&w_mtx);
1204 return;
1205 }
1206 witness_lock_order_add(w1, w);
1207
1208 /*
1209 * Check for duplicate locks of the same type. Note that we only
1210 * have to check for this on the last lock we just acquired. Any
1211 * other cases will be caught as lock order violations.
1212 */
1213 if (w1 == w) {
1214 i = w->w_index;
1215 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1216 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1217 w_rmatrix[i][i] |= WITNESS_REVERSAL;
1218 w->w_reversed = 1;
1219 mtx_unlock_spin(&w_mtx);
1220 witness_output(
1221 "acquiring duplicate lock of same type: \"%s\"\n",
1222 w->w_name);
1223 witness_output(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1224 fixup_filename(plock->li_file), plock->li_line);
1225 witness_output(" 2nd %s @ %s:%d\n", lock->lo_name,
1226 fixup_filename(file), line);
1227 witness_debugger(1, __func__);
1228 } else
1229 mtx_unlock_spin(&w_mtx);
1230 return;
1231 }
1232 mtx_assert(&w_mtx, MA_OWNED);
1233
1234 /*
1235 * If we know that the lock we are acquiring comes after
1236 * the lock we most recently acquired in the lock order tree,
1237 * then there is no need for any further checks.
1238 */
1239 if (isitmychild(w1, w))
1240 goto out;
1241
1242 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1243 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1244
1245 MPASS(j < LOCK_CHILDCOUNT * LOCK_NCHILDREN);
1246 lock1 = &lle->ll_children[i];
1247
1248 /*
1249 * Ignore the interlock.
1250 */
1251 if (interlock == lock1->li_lock)
1252 continue;
1253
1254 /*
1255 * If this lock doesn't undergo witness checking,
1256 * then skip it.
1257 */
1258 w1 = lock1->li_lock->lo_witness;
1259 if (w1 == NULL) {
1260 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1261 ("lock missing witness structure"));
1262 continue;
1263 }
1264
1265 /*
1266 * If we are locking Giant and this is a sleepable
1267 * lock, then skip it.
1268 */
1269 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1270 lock == &Giant.lock_object)
1271 continue;
1272
1273 /*
1274 * If we are locking a sleepable lock and this lock
1275 * is Giant, then skip it.
1276 */
1277 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1278 lock1->li_lock == &Giant.lock_object)
1279 continue;
1280
1281 /*
1282 * If we are locking a sleepable lock and this lock
1283 * isn't sleepable, we want to treat it as a lock
1284 * order violation to enfore a general lock order of
1285 * sleepable locks before non-sleepable locks.
1286 */
1287 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1288 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1289 goto reversal;
1290
1291 /*
1292 * If we are locking Giant and this is a non-sleepable
1293 * lock, then treat it as a reversal.
1294 */
1295 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1296 lock == &Giant.lock_object)
1297 goto reversal;
1298
1299 /*
1300 * Check the lock order hierarchy for a reveresal.
1301 */
1302 if (!isitmydescendant(w, w1))
1303 continue;
1304 reversal:
1305
1306 /*
1307 * We have a lock order violation, check to see if it
1308 * is allowed or has already been yelled about.
1309 */
1310 #ifdef BLESSING
1311
1312 /*
1313 * If the lock order is blessed, just bail. We don't
1314 * look for other lock order violations though, which
1315 * may be a bug.
1316 */
1317 if (blessed(w, w1))
1318 goto out;
1319 #endif
1320
1321 /* Bail if this violation is known */
1322 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1323 goto out;
1324
1325 /* Record this as a violation */
1326 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1327 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1328 w->w_reversed = w1->w_reversed = 1;
1329 witness_increment_graph_generation();
1330 mtx_unlock_spin(&w_mtx);
1331
1332 #ifdef WITNESS_NO_VNODE
1333 /*
1334 * There are known LORs between VNODE locks. They are
1335 * not an indication of a bug. VNODE locks are flagged
1336 * as such (LO_IS_VNODE) and we don't yell if the LOR
1337 * is between 2 VNODE locks.
1338 */
1339 if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1340 (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1341 return;
1342 #endif
1343
1344 /*
1345 * Ok, yell about it.
1346 */
1347 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1348 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1349 witness_output(
1350 "lock order reversal: (sleepable after non-sleepable)\n");
1351 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1352 && lock == &Giant.lock_object)
1353 witness_output(
1354 "lock order reversal: (Giant after non-sleepable)\n");
1355 else
1356 witness_output("lock order reversal:\n");
1357
1358 /*
1359 * Try to locate an earlier lock with
1360 * witness w in our list.
1361 */
1362 do {
1363 lock2 = &lle->ll_children[i];
1364 MPASS(lock2->li_lock != NULL);
1365 if (lock2->li_lock->lo_witness == w)
1366 break;
1367 if (i == 0 && lle->ll_next != NULL) {
1368 lle = lle->ll_next;
1369 i = lle->ll_count - 1;
1370 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1371 } else
1372 i--;
1373 } while (i >= 0);
1374 if (i < 0) {
1375 witness_output(" 1st %p %s (%s) @ %s:%d\n",
1376 lock1->li_lock, lock1->li_lock->lo_name,
1377 w1->w_name, fixup_filename(lock1->li_file),
1378 lock1->li_line);
1379 witness_output(" 2nd %p %s (%s) @ %s:%d\n", lock,
1380 lock->lo_name, w->w_name,
1381 fixup_filename(file), line);
1382 } else {
1383 witness_output(" 1st %p %s (%s) @ %s:%d\n",
1384 lock2->li_lock, lock2->li_lock->lo_name,
1385 lock2->li_lock->lo_witness->w_name,
1386 fixup_filename(lock2->li_file),
1387 lock2->li_line);
1388 witness_output(" 2nd %p %s (%s) @ %s:%d\n",
1389 lock1->li_lock, lock1->li_lock->lo_name,
1390 w1->w_name, fixup_filename(lock1->li_file),
1391 lock1->li_line);
1392 witness_output(" 3rd %p %s (%s) @ %s:%d\n", lock,
1393 lock->lo_name, w->w_name,
1394 fixup_filename(file), line);
1395 }
1396 witness_debugger(1, __func__);
1397 return;
1398 }
1399 }
1400
1401 /*
1402 * If requested, build a new lock order. However, don't build a new
1403 * relationship between a sleepable lock and Giant if it is in the
1404 * wrong direction. The correct lock order is that sleepable locks
1405 * always come before Giant.
1406 */
1407 if (flags & LOP_NEWORDER &&
1408 !(plock->li_lock == &Giant.lock_object &&
1409 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1410 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1411 w->w_name, plock->li_lock->lo_witness->w_name);
1412 itismychild(plock->li_lock->lo_witness, w);
1413 }
1414 out:
1415 mtx_unlock_spin(&w_mtx);
1416 }
1417
1418 void
1419 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1420 {
1421 struct lock_list_entry **lock_list, *lle;
1422 struct lock_instance *instance;
1423 struct witness *w;
1424 struct thread *td;
1425
1426 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1427 panicstr != NULL)
1428 return;
1429 w = lock->lo_witness;
1430 td = curthread;
1431
1432 /* Determine lock list for this lock. */
1433 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1434 lock_list = &td->td_sleeplocks;
1435 else
1436 lock_list = PCPU_PTR(spinlocks);
1437
1438 /* Check to see if we are recursing on a lock we already own. */
1439 instance = find_instance(*lock_list, lock);
1440 if (instance != NULL) {
1441 instance->li_flags++;
1442 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1443 td->td_proc->p_pid, lock->lo_name,
1444 instance->li_flags & LI_RECURSEMASK);
1445 instance->li_file = file;
1446 instance->li_line = line;
1447 return;
1448 }
1449
1450 /* Update per-witness last file and line acquire. */
1451 w->w_file = file;
1452 w->w_line = line;
1453
1454 /* Find the next open lock instance in the list and fill it. */
1455 lle = *lock_list;
1456 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1457 lle = witness_lock_list_get();
1458 if (lle == NULL)
1459 return;
1460 lle->ll_next = *lock_list;
1461 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1462 td->td_proc->p_pid, lle);
1463 *lock_list = lle;
1464 }
1465 instance = &lle->ll_children[lle->ll_count++];
1466 instance->li_lock = lock;
1467 instance->li_line = line;
1468 instance->li_file = file;
1469 if ((flags & LOP_EXCLUSIVE) != 0)
1470 instance->li_flags = LI_EXCLUSIVE;
1471 else
1472 instance->li_flags = 0;
1473 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1474 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1475 }
1476
1477 void
1478 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1479 {
1480 struct lock_instance *instance;
1481 struct lock_class *class;
1482
1483 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1484 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1485 return;
1486 class = LOCK_CLASS(lock);
1487 if (witness_watch) {
1488 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1489 kassert_panic(
1490 "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1491 class->lc_name, lock->lo_name,
1492 fixup_filename(file), line);
1493 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1494 kassert_panic(
1495 "upgrade of non-sleep lock (%s) %s @ %s:%d",
1496 class->lc_name, lock->lo_name,
1497 fixup_filename(file), line);
1498 }
1499 instance = find_instance(curthread->td_sleeplocks, lock);
1500 if (instance == NULL) {
1501 kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1502 class->lc_name, lock->lo_name,
1503 fixup_filename(file), line);
1504 return;
1505 }
1506 if (witness_watch) {
1507 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1508 kassert_panic(
1509 "upgrade of exclusive lock (%s) %s @ %s:%d",
1510 class->lc_name, lock->lo_name,
1511 fixup_filename(file), line);
1512 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1513 kassert_panic(
1514 "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1515 class->lc_name, lock->lo_name,
1516 instance->li_flags & LI_RECURSEMASK,
1517 fixup_filename(file), line);
1518 }
1519 instance->li_flags |= LI_EXCLUSIVE;
1520 }
1521
1522 void
1523 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1524 int line)
1525 {
1526 struct lock_instance *instance;
1527 struct lock_class *class;
1528
1529 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1530 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1531 return;
1532 class = LOCK_CLASS(lock);
1533 if (witness_watch) {
1534 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1535 kassert_panic(
1536 "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1537 class->lc_name, lock->lo_name,
1538 fixup_filename(file), line);
1539 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1540 kassert_panic(
1541 "downgrade of non-sleep lock (%s) %s @ %s:%d",
1542 class->lc_name, lock->lo_name,
1543 fixup_filename(file), line);
1544 }
1545 instance = find_instance(curthread->td_sleeplocks, lock);
1546 if (instance == NULL) {
1547 kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1548 class->lc_name, lock->lo_name,
1549 fixup_filename(file), line);
1550 return;
1551 }
1552 if (witness_watch) {
1553 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1554 kassert_panic(
1555 "downgrade of shared lock (%s) %s @ %s:%d",
1556 class->lc_name, lock->lo_name,
1557 fixup_filename(file), line);
1558 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1559 kassert_panic(
1560 "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1561 class->lc_name, lock->lo_name,
1562 instance->li_flags & LI_RECURSEMASK,
1563 fixup_filename(file), line);
1564 }
1565 instance->li_flags &= ~LI_EXCLUSIVE;
1566 }
1567
1568 void
1569 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1570 {
1571 struct lock_list_entry **lock_list, *lle;
1572 struct lock_instance *instance;
1573 struct lock_class *class;
1574 struct thread *td;
1575 register_t s;
1576 int i, j;
1577
1578 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1579 return;
1580 td = curthread;
1581 class = LOCK_CLASS(lock);
1582
1583 /* Find lock instance associated with this lock. */
1584 if (class->lc_flags & LC_SLEEPLOCK)
1585 lock_list = &td->td_sleeplocks;
1586 else
1587 lock_list = PCPU_PTR(spinlocks);
1588 lle = *lock_list;
1589 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1590 for (i = 0; i < (*lock_list)->ll_count; i++) {
1591 instance = &(*lock_list)->ll_children[i];
1592 if (instance->li_lock == lock)
1593 goto found;
1594 }
1595
1596 /*
1597 * When disabling WITNESS through witness_watch we could end up in
1598 * having registered locks in the td_sleeplocks queue.
1599 * We have to make sure we flush these queues, so just search for
1600 * eventual register locks and remove them.
1601 */
1602 if (witness_watch > 0) {
1603 kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1604 lock->lo_name, fixup_filename(file), line);
1605 return;
1606 } else {
1607 return;
1608 }
1609 found:
1610
1611 /* First, check for shared/exclusive mismatches. */
1612 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1613 (flags & LOP_EXCLUSIVE) == 0) {
1614 witness_output("shared unlock of (%s) %s @ %s:%d\n",
1615 class->lc_name, lock->lo_name, fixup_filename(file), line);
1616 witness_output("while exclusively locked from %s:%d\n",
1617 fixup_filename(instance->li_file), instance->li_line);
1618 kassert_panic("excl->ushare");
1619 }
1620 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1621 (flags & LOP_EXCLUSIVE) != 0) {
1622 witness_output("exclusive unlock of (%s) %s @ %s:%d\n",
1623 class->lc_name, lock->lo_name, fixup_filename(file), line);
1624 witness_output("while share locked from %s:%d\n",
1625 fixup_filename(instance->li_file),
1626 instance->li_line);
1627 kassert_panic("share->uexcl");
1628 }
1629 /* If we are recursed, unrecurse. */
1630 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1631 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1632 td->td_proc->p_pid, instance->li_lock->lo_name,
1633 instance->li_flags);
1634 instance->li_flags--;
1635 return;
1636 }
1637 /* The lock is now being dropped, check for NORELEASE flag */
1638 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1639 witness_output("forbidden unlock of (%s) %s @ %s:%d\n",
1640 class->lc_name, lock->lo_name, fixup_filename(file), line);
1641 kassert_panic("lock marked norelease");
1642 }
1643
1644 /* Otherwise, remove this item from the list. */
1645 s = intr_disable();
1646 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1647 td->td_proc->p_pid, instance->li_lock->lo_name,
1648 (*lock_list)->ll_count - 1);
1649 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1650 (*lock_list)->ll_children[j] =
1651 (*lock_list)->ll_children[j + 1];
1652 (*lock_list)->ll_count--;
1653 intr_restore(s);
1654
1655 /*
1656 * In order to reduce contention on w_mtx, we want to keep always an
1657 * head object into lists so that frequent allocation from the
1658 * free witness pool (and subsequent locking) is avoided.
1659 * In order to maintain the current code simple, when the head
1660 * object is totally unloaded it means also that we do not have
1661 * further objects in the list, so the list ownership needs to be
1662 * hand over to another object if the current head needs to be freed.
1663 */
1664 if ((*lock_list)->ll_count == 0) {
1665 if (*lock_list == lle) {
1666 if (lle->ll_next == NULL)
1667 return;
1668 } else
1669 lle = *lock_list;
1670 *lock_list = lle->ll_next;
1671 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1672 td->td_proc->p_pid, lle);
1673 witness_lock_list_free(lle);
1674 }
1675 }
1676
1677 void
1678 witness_thread_exit(struct thread *td)
1679 {
1680 struct lock_list_entry *lle;
1681 int i, n;
1682
1683 lle = td->td_sleeplocks;
1684 if (lle == NULL || panicstr != NULL)
1685 return;
1686 if (lle->ll_count != 0) {
1687 for (n = 0; lle != NULL; lle = lle->ll_next)
1688 for (i = lle->ll_count - 1; i >= 0; i--) {
1689 if (n == 0)
1690 witness_output(
1691 "Thread %p exiting with the following locks held:\n", td);
1692 n++;
1693 witness_list_lock(&lle->ll_children[i],
1694 witness_output);
1695
1696 }
1697 kassert_panic(
1698 "Thread %p cannot exit while holding sleeplocks\n", td);
1699 }
1700 witness_lock_list_free(lle);
1701 }
1702
1703 /*
1704 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1705 * exempt Giant and sleepable locks from the checks as well. If any
1706 * non-exempt locks are held, then a supplied message is printed to the
1707 * output channel along with a list of the offending locks. If indicated in the
1708 * flags then a failure results in a panic as well.
1709 */
1710 int
1711 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1712 {
1713 struct lock_list_entry *lock_list, *lle;
1714 struct lock_instance *lock1;
1715 struct thread *td;
1716 va_list ap;
1717 int i, n;
1718
1719 if (witness_cold || witness_watch < 1 || panicstr != NULL)
1720 return (0);
1721 n = 0;
1722 td = curthread;
1723 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1724 for (i = lle->ll_count - 1; i >= 0; i--) {
1725 lock1 = &lle->ll_children[i];
1726 if (lock1->li_lock == lock)
1727 continue;
1728 if (flags & WARN_GIANTOK &&
1729 lock1->li_lock == &Giant.lock_object)
1730 continue;
1731 if (flags & WARN_SLEEPOK &&
1732 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1733 continue;
1734 if (n == 0) {
1735 va_start(ap, fmt);
1736 vprintf(fmt, ap);
1737 va_end(ap);
1738 printf(" with the following %slocks held:\n",
1739 (flags & WARN_SLEEPOK) != 0 ?
1740 "non-sleepable " : "");
1741 }
1742 n++;
1743 witness_list_lock(lock1, printf);
1744 }
1745
1746 /*
1747 * Pin the thread in order to avoid problems with thread migration.
1748 * Once that all verifies are passed about spinlocks ownership,
1749 * the thread is in a safe path and it can be unpinned.
1750 */
1751 sched_pin();
1752 lock_list = PCPU_GET(spinlocks);
1753 if (lock_list != NULL && lock_list->ll_count != 0) {
1754 sched_unpin();
1755
1756 /*
1757 * We should only have one spinlock and as long as
1758 * the flags cannot match for this locks class,
1759 * check if the first spinlock is the one curthread
1760 * should hold.
1761 */
1762 lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1763 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1764 lock1->li_lock == lock && n == 0)
1765 return (0);
1766
1767 va_start(ap, fmt);
1768 vprintf(fmt, ap);
1769 va_end(ap);
1770 printf(" with the following %slocks held:\n",
1771 (flags & WARN_SLEEPOK) != 0 ? "non-sleepable " : "");
1772 n += witness_list_locks(&lock_list, printf);
1773 } else
1774 sched_unpin();
1775 if (flags & WARN_PANIC && n)
1776 kassert_panic("%s", __func__);
1777 else
1778 witness_debugger(n, __func__);
1779 return (n);
1780 }
1781
1782 const char *
1783 witness_file(struct lock_object *lock)
1784 {
1785 struct witness *w;
1786
1787 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1788 return ("?");
1789 w = lock->lo_witness;
1790 return (w->w_file);
1791 }
1792
1793 int
1794 witness_line(struct lock_object *lock)
1795 {
1796 struct witness *w;
1797
1798 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1799 return (0);
1800 w = lock->lo_witness;
1801 return (w->w_line);
1802 }
1803
1804 static struct witness *
1805 enroll(const char *description, struct lock_class *lock_class)
1806 {
1807 struct witness *w;
1808 struct witness_list *typelist;
1809
1810 MPASS(description != NULL);
1811
1812 if (witness_watch == -1 || panicstr != NULL)
1813 return (NULL);
1814 if ((lock_class->lc_flags & LC_SPINLOCK)) {
1815 if (witness_skipspin)
1816 return (NULL);
1817 else
1818 typelist = &w_spin;
1819 } else if ((lock_class->lc_flags & LC_SLEEPLOCK)) {
1820 typelist = &w_sleep;
1821 } else {
1822 kassert_panic("lock class %s is not sleep or spin",
1823 lock_class->lc_name);
1824 return (NULL);
1825 }
1826
1827 mtx_lock_spin(&w_mtx);
1828 w = witness_hash_get(description);
1829 if (w)
1830 goto found;
1831 if ((w = witness_get()) == NULL)
1832 return (NULL);
1833 MPASS(strlen(description) < MAX_W_NAME);
1834 strcpy(w->w_name, description);
1835 w->w_class = lock_class;
1836 w->w_refcount = 1;
1837 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1838 if (lock_class->lc_flags & LC_SPINLOCK) {
1839 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1840 w_spin_cnt++;
1841 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1842 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1843 w_sleep_cnt++;
1844 }
1845
1846 /* Insert new witness into the hash */
1847 witness_hash_put(w);
1848 witness_increment_graph_generation();
1849 mtx_unlock_spin(&w_mtx);
1850 return (w);
1851 found:
1852 w->w_refcount++;
1853 mtx_unlock_spin(&w_mtx);
1854 if (lock_class != w->w_class)
1855 kassert_panic(
1856 "lock (%s) %s does not match earlier (%s) lock",
1857 description, lock_class->lc_name,
1858 w->w_class->lc_name);
1859 return (w);
1860 }
1861
1862 static void
1863 depart(struct witness *w)
1864 {
1865 struct witness_list *list;
1866
1867 MPASS(w->w_refcount == 0);
1868 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1869 list = &w_sleep;
1870 w_sleep_cnt--;
1871 } else {
1872 list = &w_spin;
1873 w_spin_cnt--;
1874 }
1875 /*
1876 * Set file to NULL as it may point into a loadable module.
1877 */
1878 w->w_file = NULL;
1879 w->w_line = 0;
1880 witness_increment_graph_generation();
1881 }
1882
1883
1884 static void
1885 adopt(struct witness *parent, struct witness *child)
1886 {
1887 int pi, ci, i, j;
1888
1889 if (witness_cold == 0)
1890 mtx_assert(&w_mtx, MA_OWNED);
1891
1892 /* If the relationship is already known, there's no work to be done. */
1893 if (isitmychild(parent, child))
1894 return;
1895
1896 /* When the structure of the graph changes, bump up the generation. */
1897 witness_increment_graph_generation();
1898
1899 /*
1900 * The hard part ... create the direct relationship, then propagate all
1901 * indirect relationships.
1902 */
1903 pi = parent->w_index;
1904 ci = child->w_index;
1905 WITNESS_INDEX_ASSERT(pi);
1906 WITNESS_INDEX_ASSERT(ci);
1907 MPASS(pi != ci);
1908 w_rmatrix[pi][ci] |= WITNESS_PARENT;
1909 w_rmatrix[ci][pi] |= WITNESS_CHILD;
1910
1911 /*
1912 * If parent was not already an ancestor of child,
1913 * then we increment the descendant and ancestor counters.
1914 */
1915 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1916 parent->w_num_descendants++;
1917 child->w_num_ancestors++;
1918 }
1919
1920 /*
1921 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1922 * an ancestor of 'pi' during this loop.
1923 */
1924 for (i = 1; i <= w_max_used_index; i++) {
1925 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1926 (i != pi))
1927 continue;
1928
1929 /* Find each descendant of 'i' and mark it as a descendant. */
1930 for (j = 1; j <= w_max_used_index; j++) {
1931
1932 /*
1933 * Skip children that are already marked as
1934 * descendants of 'i'.
1935 */
1936 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1937 continue;
1938
1939 /*
1940 * We are only interested in descendants of 'ci'. Note
1941 * that 'ci' itself is counted as a descendant of 'ci'.
1942 */
1943 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1944 (j != ci))
1945 continue;
1946 w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1947 w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1948 w_data[i].w_num_descendants++;
1949 w_data[j].w_num_ancestors++;
1950
1951 /*
1952 * Make sure we aren't marking a node as both an
1953 * ancestor and descendant. We should have caught
1954 * this as a lock order reversal earlier.
1955 */
1956 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1957 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1958 printf("witness rmatrix paradox! [%d][%d]=%d "
1959 "both ancestor and descendant\n",
1960 i, j, w_rmatrix[i][j]);
1961 kdb_backtrace();
1962 printf("Witness disabled.\n");
1963 witness_watch = -1;
1964 }
1965 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1966 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1967 printf("witness rmatrix paradox! [%d][%d]=%d "
1968 "both ancestor and descendant\n",
1969 j, i, w_rmatrix[j][i]);
1970 kdb_backtrace();
1971 printf("Witness disabled.\n");
1972 witness_watch = -1;
1973 }
1974 }
1975 }
1976 }
1977
1978 static void
1979 itismychild(struct witness *parent, struct witness *child)
1980 {
1981 int unlocked;
1982
1983 MPASS(child != NULL && parent != NULL);
1984 if (witness_cold == 0)
1985 mtx_assert(&w_mtx, MA_OWNED);
1986
1987 if (!witness_lock_type_equal(parent, child)) {
1988 if (witness_cold == 0) {
1989 unlocked = 1;
1990 mtx_unlock_spin(&w_mtx);
1991 } else {
1992 unlocked = 0;
1993 }
1994 kassert_panic(
1995 "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1996 "the same lock type", __func__, parent->w_name,
1997 parent->w_class->lc_name, child->w_name,
1998 child->w_class->lc_name);
1999 if (unlocked)
2000 mtx_lock_spin(&w_mtx);
2001 }
2002 adopt(parent, child);
2003 }
2004
2005 /*
2006 * Generic code for the isitmy*() functions. The rmask parameter is the
2007 * expected relationship of w1 to w2.
2008 */
2009 static int
2010 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
2011 {
2012 unsigned char r1, r2;
2013 int i1, i2;
2014
2015 i1 = w1->w_index;
2016 i2 = w2->w_index;
2017 WITNESS_INDEX_ASSERT(i1);
2018 WITNESS_INDEX_ASSERT(i2);
2019 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
2020 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
2021
2022 /* The flags on one better be the inverse of the flags on the other */
2023 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
2024 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
2025 /* Don't squawk if we're potentially racing with an update. */
2026 if (!mtx_owned(&w_mtx))
2027 return (0);
2028 printf("%s: rmatrix mismatch between %s (index %d) and %s "
2029 "(index %d): w_rmatrix[%d][%d] == %hhx but "
2030 "w_rmatrix[%d][%d] == %hhx\n",
2031 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
2032 i2, i1, r2);
2033 kdb_backtrace();
2034 printf("Witness disabled.\n");
2035 witness_watch = -1;
2036 }
2037 return (r1 & rmask);
2038 }
2039
2040 /*
2041 * Checks if @child is a direct child of @parent.
2042 */
2043 static int
2044 isitmychild(struct witness *parent, struct witness *child)
2045 {
2046
2047 return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2048 }
2049
2050 /*
2051 * Checks if @descendant is a direct or inderect descendant of @ancestor.
2052 */
2053 static int
2054 isitmydescendant(struct witness *ancestor, struct witness *descendant)
2055 {
2056
2057 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2058 __func__));
2059 }
2060
2061 #ifdef BLESSING
2062 static int
2063 blessed(struct witness *w1, struct witness *w2)
2064 {
2065 int i;
2066 struct witness_blessed *b;
2067
2068 for (i = 0; i < nitems(blessed_list); i++) {
2069 b = &blessed_list[i];
2070 if (strcmp(w1->w_name, b->b_lock1) == 0) {
2071 if (strcmp(w2->w_name, b->b_lock2) == 0)
2072 return (1);
2073 continue;
2074 }
2075 if (strcmp(w1->w_name, b->b_lock2) == 0)
2076 if (strcmp(w2->w_name, b->b_lock1) == 0)
2077 return (1);
2078 }
2079 return (0);
2080 }
2081 #endif
2082
2083 static struct witness *
2084 witness_get(void)
2085 {
2086 struct witness *w;
2087 int index;
2088
2089 if (witness_cold == 0)
2090 mtx_assert(&w_mtx, MA_OWNED);
2091
2092 if (witness_watch == -1) {
2093 mtx_unlock_spin(&w_mtx);
2094 return (NULL);
2095 }
2096 if (STAILQ_EMPTY(&w_free)) {
2097 witness_watch = -1;
2098 mtx_unlock_spin(&w_mtx);
2099 printf("WITNESS: unable to allocate a new witness object\n");
2100 return (NULL);
2101 }
2102 w = STAILQ_FIRST(&w_free);
2103 STAILQ_REMOVE_HEAD(&w_free, w_list);
2104 w_free_cnt--;
2105 index = w->w_index;
2106 MPASS(index > 0 && index == w_max_used_index+1 &&
2107 index < witness_count);
2108 bzero(w, sizeof(*w));
2109 w->w_index = index;
2110 if (index > w_max_used_index)
2111 w_max_used_index = index;
2112 return (w);
2113 }
2114
2115 static void
2116 witness_free(struct witness *w)
2117 {
2118
2119 STAILQ_INSERT_HEAD(&w_free, w, w_list);
2120 w_free_cnt++;
2121 }
2122
2123 static struct lock_list_entry *
2124 witness_lock_list_get(void)
2125 {
2126 struct lock_list_entry *lle;
2127
2128 if (witness_watch == -1)
2129 return (NULL);
2130 mtx_lock_spin(&w_mtx);
2131 lle = w_lock_list_free;
2132 if (lle == NULL) {
2133 witness_watch = -1;
2134 mtx_unlock_spin(&w_mtx);
2135 printf("%s: witness exhausted\n", __func__);
2136 return (NULL);
2137 }
2138 w_lock_list_free = lle->ll_next;
2139 mtx_unlock_spin(&w_mtx);
2140 bzero(lle, sizeof(*lle));
2141 return (lle);
2142 }
2143
2144 static void
2145 witness_lock_list_free(struct lock_list_entry *lle)
2146 {
2147
2148 mtx_lock_spin(&w_mtx);
2149 lle->ll_next = w_lock_list_free;
2150 w_lock_list_free = lle;
2151 mtx_unlock_spin(&w_mtx);
2152 }
2153
2154 static struct lock_instance *
2155 find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2156 {
2157 struct lock_list_entry *lle;
2158 struct lock_instance *instance;
2159 int i;
2160
2161 for (lle = list; lle != NULL; lle = lle->ll_next)
2162 for (i = lle->ll_count - 1; i >= 0; i--) {
2163 instance = &lle->ll_children[i];
2164 if (instance->li_lock == lock)
2165 return (instance);
2166 }
2167 return (NULL);
2168 }
2169
2170 static void
2171 witness_list_lock(struct lock_instance *instance,
2172 int (*prnt)(const char *fmt, ...))
2173 {
2174 struct lock_object *lock;
2175
2176 lock = instance->li_lock;
2177 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2178 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2179 if (lock->lo_witness->w_name != lock->lo_name)
2180 prnt(" (%s)", lock->lo_witness->w_name);
2181 prnt(" r = %d (%p) locked @ %s:%d\n",
2182 instance->li_flags & LI_RECURSEMASK, lock,
2183 fixup_filename(instance->li_file), instance->li_line);
2184 }
2185
2186 static int
2187 witness_output(const char *fmt, ...)
2188 {
2189 va_list ap;
2190 int ret;
2191
2192 va_start(ap, fmt);
2193 ret = witness_voutput(fmt, ap);
2194 va_end(ap);
2195 return (ret);
2196 }
2197
2198 static int
2199 witness_voutput(const char *fmt, va_list ap)
2200 {
2201 int ret;
2202
2203 ret = 0;
2204 switch (witness_channel) {
2205 case WITNESS_CONSOLE:
2206 ret = vprintf(fmt, ap);
2207 break;
2208 case WITNESS_LOG:
2209 vlog(LOG_NOTICE, fmt, ap);
2210 break;
2211 case WITNESS_NONE:
2212 break;
2213 }
2214 return (ret);
2215 }
2216
2217 #ifdef DDB
2218 static int
2219 witness_thread_has_locks(struct thread *td)
2220 {
2221
2222 if (td->td_sleeplocks == NULL)
2223 return (0);
2224 return (td->td_sleeplocks->ll_count != 0);
2225 }
2226
2227 static int
2228 witness_proc_has_locks(struct proc *p)
2229 {
2230 struct thread *td;
2231
2232 FOREACH_THREAD_IN_PROC(p, td) {
2233 if (witness_thread_has_locks(td))
2234 return (1);
2235 }
2236 return (0);
2237 }
2238 #endif
2239
2240 int
2241 witness_list_locks(struct lock_list_entry **lock_list,
2242 int (*prnt)(const char *fmt, ...))
2243 {
2244 struct lock_list_entry *lle;
2245 int i, nheld;
2246
2247 nheld = 0;
2248 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2249 for (i = lle->ll_count - 1; i >= 0; i--) {
2250 witness_list_lock(&lle->ll_children[i], prnt);
2251 nheld++;
2252 }
2253 return (nheld);
2254 }
2255
2256 /*
2257 * This is a bit risky at best. We call this function when we have timed
2258 * out acquiring a spin lock, and we assume that the other CPU is stuck
2259 * with this lock held. So, we go groveling around in the other CPU's
2260 * per-cpu data to try to find the lock instance for this spin lock to
2261 * see when it was last acquired.
2262 */
2263 void
2264 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2265 int (*prnt)(const char *fmt, ...))
2266 {
2267 struct lock_instance *instance;
2268 struct pcpu *pc;
2269
2270 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2271 return;
2272 pc = pcpu_find(owner->td_oncpu);
2273 instance = find_instance(pc->pc_spinlocks, lock);
2274 if (instance != NULL)
2275 witness_list_lock(instance, prnt);
2276 }
2277
2278 void
2279 witness_save(struct lock_object *lock, const char **filep, int *linep)
2280 {
2281 struct lock_list_entry *lock_list;
2282 struct lock_instance *instance;
2283 struct lock_class *class;
2284
2285 /*
2286 * This function is used independently in locking code to deal with
2287 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2288 * is gone.
2289 */
2290 if (SCHEDULER_STOPPED())
2291 return;
2292 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2293 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2294 return;
2295 class = LOCK_CLASS(lock);
2296 if (class->lc_flags & LC_SLEEPLOCK)
2297 lock_list = curthread->td_sleeplocks;
2298 else {
2299 if (witness_skipspin)
2300 return;
2301 lock_list = PCPU_GET(spinlocks);
2302 }
2303 instance = find_instance(lock_list, lock);
2304 if (instance == NULL) {
2305 kassert_panic("%s: lock (%s) %s not locked", __func__,
2306 class->lc_name, lock->lo_name);
2307 return;
2308 }
2309 *filep = instance->li_file;
2310 *linep = instance->li_line;
2311 }
2312
2313 void
2314 witness_restore(struct lock_object *lock, const char *file, int line)
2315 {
2316 struct lock_list_entry *lock_list;
2317 struct lock_instance *instance;
2318 struct lock_class *class;
2319
2320 /*
2321 * This function is used independently in locking code to deal with
2322 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2323 * is gone.
2324 */
2325 if (SCHEDULER_STOPPED())
2326 return;
2327 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2328 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2329 return;
2330 class = LOCK_CLASS(lock);
2331 if (class->lc_flags & LC_SLEEPLOCK)
2332 lock_list = curthread->td_sleeplocks;
2333 else {
2334 if (witness_skipspin)
2335 return;
2336 lock_list = PCPU_GET(spinlocks);
2337 }
2338 instance = find_instance(lock_list, lock);
2339 if (instance == NULL)
2340 kassert_panic("%s: lock (%s) %s not locked", __func__,
2341 class->lc_name, lock->lo_name);
2342 lock->lo_witness->w_file = file;
2343 lock->lo_witness->w_line = line;
2344 if (instance == NULL)
2345 return;
2346 instance->li_file = file;
2347 instance->li_line = line;
2348 }
2349
2350 void
2351 witness_assert(const struct lock_object *lock, int flags, const char *file,
2352 int line)
2353 {
2354 #ifdef INVARIANT_SUPPORT
2355 struct lock_instance *instance;
2356 struct lock_class *class;
2357
2358 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2359 return;
2360 class = LOCK_CLASS(lock);
2361 if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2362 instance = find_instance(curthread->td_sleeplocks, lock);
2363 else if ((class->lc_flags & LC_SPINLOCK) != 0)
2364 instance = find_instance(PCPU_GET(spinlocks), lock);
2365 else {
2366 kassert_panic("Lock (%s) %s is not sleep or spin!",
2367 class->lc_name, lock->lo_name);
2368 return;
2369 }
2370 switch (flags) {
2371 case LA_UNLOCKED:
2372 if (instance != NULL)
2373 kassert_panic("Lock (%s) %s locked @ %s:%d.",
2374 class->lc_name, lock->lo_name,
2375 fixup_filename(file), line);
2376 break;
2377 case LA_LOCKED:
2378 case LA_LOCKED | LA_RECURSED:
2379 case LA_LOCKED | LA_NOTRECURSED:
2380 case LA_SLOCKED:
2381 case LA_SLOCKED | LA_RECURSED:
2382 case LA_SLOCKED | LA_NOTRECURSED:
2383 case LA_XLOCKED:
2384 case LA_XLOCKED | LA_RECURSED:
2385 case LA_XLOCKED | LA_NOTRECURSED:
2386 if (instance == NULL) {
2387 kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2388 class->lc_name, lock->lo_name,
2389 fixup_filename(file), line);
2390 break;
2391 }
2392 if ((flags & LA_XLOCKED) != 0 &&
2393 (instance->li_flags & LI_EXCLUSIVE) == 0)
2394 kassert_panic(
2395 "Lock (%s) %s not exclusively locked @ %s:%d.",
2396 class->lc_name, lock->lo_name,
2397 fixup_filename(file), line);
2398 if ((flags & LA_SLOCKED) != 0 &&
2399 (instance->li_flags & LI_EXCLUSIVE) != 0)
2400 kassert_panic(
2401 "Lock (%s) %s exclusively locked @ %s:%d.",
2402 class->lc_name, lock->lo_name,
2403 fixup_filename(file), line);
2404 if ((flags & LA_RECURSED) != 0 &&
2405 (instance->li_flags & LI_RECURSEMASK) == 0)
2406 kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2407 class->lc_name, lock->lo_name,
2408 fixup_filename(file), line);
2409 if ((flags & LA_NOTRECURSED) != 0 &&
2410 (instance->li_flags & LI_RECURSEMASK) != 0)
2411 kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2412 class->lc_name, lock->lo_name,
2413 fixup_filename(file), line);
2414 break;
2415 default:
2416 kassert_panic("Invalid lock assertion at %s:%d.",
2417 fixup_filename(file), line);
2418
2419 }
2420 #endif /* INVARIANT_SUPPORT */
2421 }
2422
2423 static void
2424 witness_setflag(struct lock_object *lock, int flag, int set)
2425 {
2426 struct lock_list_entry *lock_list;
2427 struct lock_instance *instance;
2428 struct lock_class *class;
2429
2430 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2431 return;
2432 class = LOCK_CLASS(lock);
2433 if (class->lc_flags & LC_SLEEPLOCK)
2434 lock_list = curthread->td_sleeplocks;
2435 else {
2436 if (witness_skipspin)
2437 return;
2438 lock_list = PCPU_GET(spinlocks);
2439 }
2440 instance = find_instance(lock_list, lock);
2441 if (instance == NULL) {
2442 kassert_panic("%s: lock (%s) %s not locked", __func__,
2443 class->lc_name, lock->lo_name);
2444 return;
2445 }
2446
2447 if (set)
2448 instance->li_flags |= flag;
2449 else
2450 instance->li_flags &= ~flag;
2451 }
2452
2453 void
2454 witness_norelease(struct lock_object *lock)
2455 {
2456
2457 witness_setflag(lock, LI_NORELEASE, 1);
2458 }
2459
2460 void
2461 witness_releaseok(struct lock_object *lock)
2462 {
2463
2464 witness_setflag(lock, LI_NORELEASE, 0);
2465 }
2466
2467 #ifdef DDB
2468 static void
2469 witness_ddb_list(struct thread *td)
2470 {
2471
2472 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2473 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2474
2475 if (witness_watch < 1)
2476 return;
2477
2478 witness_list_locks(&td->td_sleeplocks, db_printf);
2479
2480 /*
2481 * We only handle spinlocks if td == curthread. This is somewhat broken
2482 * if td is currently executing on some other CPU and holds spin locks
2483 * as we won't display those locks. If we had a MI way of getting
2484 * the per-cpu data for a given cpu then we could use
2485 * td->td_oncpu to get the list of spinlocks for this thread
2486 * and "fix" this.
2487 *
2488 * That still wouldn't really fix this unless we locked the scheduler
2489 * lock or stopped the other CPU to make sure it wasn't changing the
2490 * list out from under us. It is probably best to just not try to
2491 * handle threads on other CPU's for now.
2492 */
2493 if (td == curthread && PCPU_GET(spinlocks) != NULL)
2494 witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2495 }
2496
2497 DB_SHOW_COMMAND(locks, db_witness_list)
2498 {
2499 struct thread *td;
2500
2501 if (have_addr)
2502 td = db_lookup_thread(addr, true);
2503 else
2504 td = kdb_thread;
2505 witness_ddb_list(td);
2506 }
2507
2508 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2509 {
2510 struct thread *td;
2511 struct proc *p;
2512
2513 /*
2514 * It would be nice to list only threads and processes that actually
2515 * held sleep locks, but that information is currently not exported
2516 * by WITNESS.
2517 */
2518 FOREACH_PROC_IN_SYSTEM(p) {
2519 if (!witness_proc_has_locks(p))
2520 continue;
2521 FOREACH_THREAD_IN_PROC(p, td) {
2522 if (!witness_thread_has_locks(td))
2523 continue;
2524 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2525 p->p_comm, td, td->td_tid);
2526 witness_ddb_list(td);
2527 if (db_pager_quit)
2528 return;
2529 }
2530 }
2531 }
2532 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2533
2534 DB_SHOW_COMMAND(witness, db_witness_display)
2535 {
2536
2537 witness_ddb_display(db_printf);
2538 }
2539 #endif
2540
2541 static int
2542 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2543 {
2544 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2545 struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2546 struct sbuf *sb;
2547 u_int w_rmatrix1, w_rmatrix2;
2548 int error, generation, i, j;
2549
2550 tmp_data1 = NULL;
2551 tmp_data2 = NULL;
2552 tmp_w1 = NULL;
2553 tmp_w2 = NULL;
2554 if (witness_watch < 1) {
2555 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2556 return (error);
2557 }
2558 if (witness_cold) {
2559 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2560 return (error);
2561 }
2562 error = 0;
2563 sb = sbuf_new(NULL, NULL, badstack_sbuf_size, SBUF_AUTOEXTEND);
2564 if (sb == NULL)
2565 return (ENOMEM);
2566
2567 /* Allocate and init temporary storage space. */
2568 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2569 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2570 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2571 M_WAITOK | M_ZERO);
2572 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2573 M_WAITOK | M_ZERO);
2574 stack_zero(&tmp_data1->wlod_stack);
2575 stack_zero(&tmp_data2->wlod_stack);
2576
2577 restart:
2578 mtx_lock_spin(&w_mtx);
2579 generation = w_generation;
2580 mtx_unlock_spin(&w_mtx);
2581 sbuf_printf(sb, "Number of known direct relationships is %d\n",
2582 w_lohash.wloh_count);
2583 for (i = 1; i < w_max_used_index; i++) {
2584 mtx_lock_spin(&w_mtx);
2585 if (generation != w_generation) {
2586 mtx_unlock_spin(&w_mtx);
2587
2588 /* The graph has changed, try again. */
2589 req->oldidx = 0;
2590 sbuf_clear(sb);
2591 goto restart;
2592 }
2593
2594 w1 = &w_data[i];
2595 if (w1->w_reversed == 0) {
2596 mtx_unlock_spin(&w_mtx);
2597 continue;
2598 }
2599
2600 /* Copy w1 locally so we can release the spin lock. */
2601 *tmp_w1 = *w1;
2602 mtx_unlock_spin(&w_mtx);
2603
2604 if (tmp_w1->w_reversed == 0)
2605 continue;
2606 for (j = 1; j < w_max_used_index; j++) {
2607 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2608 continue;
2609
2610 mtx_lock_spin(&w_mtx);
2611 if (generation != w_generation) {
2612 mtx_unlock_spin(&w_mtx);
2613
2614 /* The graph has changed, try again. */
2615 req->oldidx = 0;
2616 sbuf_clear(sb);
2617 goto restart;
2618 }
2619
2620 w2 = &w_data[j];
2621 data1 = witness_lock_order_get(w1, w2);
2622 data2 = witness_lock_order_get(w2, w1);
2623
2624 /*
2625 * Copy information locally so we can release the
2626 * spin lock.
2627 */
2628 *tmp_w2 = *w2;
2629 w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2630 w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2631
2632 if (data1) {
2633 stack_zero(&tmp_data1->wlod_stack);
2634 stack_copy(&data1->wlod_stack,
2635 &tmp_data1->wlod_stack);
2636 }
2637 if (data2 && data2 != data1) {
2638 stack_zero(&tmp_data2->wlod_stack);
2639 stack_copy(&data2->wlod_stack,
2640 &tmp_data2->wlod_stack);
2641 }
2642 mtx_unlock_spin(&w_mtx);
2643
2644 sbuf_printf(sb,
2645 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2646 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2647 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2648 if (data1) {
2649 sbuf_printf(sb,
2650 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2651 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2652 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2653 stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2654 sbuf_printf(sb, "\n");
2655 }
2656 if (data2 && data2 != data1) {
2657 sbuf_printf(sb,
2658 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2659 tmp_w2->w_name, tmp_w2->w_class->lc_name,
2660 tmp_w1->w_name, tmp_w1->w_class->lc_name);
2661 stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2662 sbuf_printf(sb, "\n");
2663 }
2664 }
2665 }
2666 mtx_lock_spin(&w_mtx);
2667 if (generation != w_generation) {
2668 mtx_unlock_spin(&w_mtx);
2669
2670 /*
2671 * The graph changed while we were printing stack data,
2672 * try again.
2673 */
2674 req->oldidx = 0;
2675 sbuf_clear(sb);
2676 goto restart;
2677 }
2678 mtx_unlock_spin(&w_mtx);
2679
2680 /* Free temporary storage space. */
2681 free(tmp_data1, M_TEMP);
2682 free(tmp_data2, M_TEMP);
2683 free(tmp_w1, M_TEMP);
2684 free(tmp_w2, M_TEMP);
2685
2686 sbuf_finish(sb);
2687 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2688 sbuf_delete(sb);
2689
2690 return (error);
2691 }
2692
2693 static int
2694 sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS)
2695 {
2696 static const struct {
2697 enum witness_channel channel;
2698 const char *name;
2699 } channels[] = {
2700 { WITNESS_CONSOLE, "console" },
2701 { WITNESS_LOG, "log" },
2702 { WITNESS_NONE, "none" },
2703 };
2704 char buf[16];
2705 u_int i;
2706 int error;
2707
2708 buf[0] = '\0';
2709 for (i = 0; i < nitems(channels); i++)
2710 if (witness_channel == channels[i].channel) {
2711 snprintf(buf, sizeof(buf), "%s", channels[i].name);
2712 break;
2713 }
2714
2715 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2716 if (error != 0 || req->newptr == NULL)
2717 return (error);
2718
2719 error = EINVAL;
2720 for (i = 0; i < nitems(channels); i++)
2721 if (strcmp(channels[i].name, buf) == 0) {
2722 witness_channel = channels[i].channel;
2723 error = 0;
2724 break;
2725 }
2726 return (error);
2727 }
2728
2729 static int
2730 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2731 {
2732 struct witness *w;
2733 struct sbuf *sb;
2734 int error;
2735
2736 if (witness_watch < 1) {
2737 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2738 return (error);
2739 }
2740 if (witness_cold) {
2741 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2742 return (error);
2743 }
2744 error = 0;
2745
2746 error = sysctl_wire_old_buffer(req, 0);
2747 if (error != 0)
2748 return (error);
2749 sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2750 if (sb == NULL)
2751 return (ENOMEM);
2752 sbuf_printf(sb, "\n");
2753
2754 mtx_lock_spin(&w_mtx);
2755 STAILQ_FOREACH(w, &w_all, w_list)
2756 w->w_displayed = 0;
2757 STAILQ_FOREACH(w, &w_all, w_list)
2758 witness_add_fullgraph(sb, w);
2759 mtx_unlock_spin(&w_mtx);
2760
2761 /*
2762 * Close the sbuf and return to userland.
2763 */
2764 error = sbuf_finish(sb);
2765 sbuf_delete(sb);
2766
2767 return (error);
2768 }
2769
2770 static int
2771 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2772 {
2773 int error, value;
2774
2775 value = witness_watch;
2776 error = sysctl_handle_int(oidp, &value, 0, req);
2777 if (error != 0 || req->newptr == NULL)
2778 return (error);
2779 if (value > 1 || value < -1 ||
2780 (witness_watch == -1 && value != witness_watch))
2781 return (EINVAL);
2782 witness_watch = value;
2783 return (0);
2784 }
2785
2786 static void
2787 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2788 {
2789 int i;
2790
2791 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2792 return;
2793 w->w_displayed = 1;
2794
2795 WITNESS_INDEX_ASSERT(w->w_index);
2796 for (i = 1; i <= w_max_used_index; i++) {
2797 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2798 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2799 w_data[i].w_name);
2800 witness_add_fullgraph(sb, &w_data[i]);
2801 }
2802 }
2803 }
2804
2805 /*
2806 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2807 * interprets the key as a string and reads until the null
2808 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2809 * hash value computed from the key.
2810 */
2811 static uint32_t
2812 witness_hash_djb2(const uint8_t *key, uint32_t size)
2813 {
2814 unsigned int hash = 5381;
2815 int i;
2816
2817 /* hash = hash * 33 + key[i] */
2818 if (size)
2819 for (i = 0; i < size; i++)
2820 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2821 else
2822 for (i = 0; key[i] != 0; i++)
2823 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2824
2825 return (hash);
2826 }
2827
2828
2829 /*
2830 * Initializes the two witness hash tables. Called exactly once from
2831 * witness_initialize().
2832 */
2833 static void
2834 witness_init_hash_tables(void)
2835 {
2836 int i;
2837
2838 MPASS(witness_cold);
2839
2840 /* Initialize the hash tables. */
2841 for (i = 0; i < WITNESS_HASH_SIZE; i++)
2842 w_hash.wh_array[i] = NULL;
2843
2844 w_hash.wh_size = WITNESS_HASH_SIZE;
2845 w_hash.wh_count = 0;
2846
2847 /* Initialize the lock order data hash. */
2848 w_lofree = NULL;
2849 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2850 memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2851 w_lodata[i].wlod_next = w_lofree;
2852 w_lofree = &w_lodata[i];
2853 }
2854 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2855 w_lohash.wloh_count = 0;
2856 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2857 w_lohash.wloh_array[i] = NULL;
2858 }
2859
2860 static struct witness *
2861 witness_hash_get(const char *key)
2862 {
2863 struct witness *w;
2864 uint32_t hash;
2865
2866 MPASS(key != NULL);
2867 if (witness_cold == 0)
2868 mtx_assert(&w_mtx, MA_OWNED);
2869 hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2870 w = w_hash.wh_array[hash];
2871 while (w != NULL) {
2872 if (strcmp(w->w_name, key) == 0)
2873 goto out;
2874 w = w->w_hash_next;
2875 }
2876
2877 out:
2878 return (w);
2879 }
2880
2881 static void
2882 witness_hash_put(struct witness *w)
2883 {
2884 uint32_t hash;
2885
2886 MPASS(w != NULL);
2887 MPASS(w->w_name != NULL);
2888 if (witness_cold == 0)
2889 mtx_assert(&w_mtx, MA_OWNED);
2890 KASSERT(witness_hash_get(w->w_name) == NULL,
2891 ("%s: trying to add a hash entry that already exists!", __func__));
2892 KASSERT(w->w_hash_next == NULL,
2893 ("%s: w->w_hash_next != NULL", __func__));
2894
2895 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2896 w->w_hash_next = w_hash.wh_array[hash];
2897 w_hash.wh_array[hash] = w;
2898 w_hash.wh_count++;
2899 }
2900
2901
2902 static struct witness_lock_order_data *
2903 witness_lock_order_get(struct witness *parent, struct witness *child)
2904 {
2905 struct witness_lock_order_data *data = NULL;
2906 struct witness_lock_order_key key;
2907 unsigned int hash;
2908
2909 MPASS(parent != NULL && child != NULL);
2910 key.from = parent->w_index;
2911 key.to = child->w_index;
2912 WITNESS_INDEX_ASSERT(key.from);
2913 WITNESS_INDEX_ASSERT(key.to);
2914 if ((w_rmatrix[parent->w_index][child->w_index]
2915 & WITNESS_LOCK_ORDER_KNOWN) == 0)
2916 goto out;
2917
2918 hash = witness_hash_djb2((const char*)&key,
2919 sizeof(key)) % w_lohash.wloh_size;
2920 data = w_lohash.wloh_array[hash];
2921 while (data != NULL) {
2922 if (witness_lock_order_key_equal(&data->wlod_key, &key))
2923 break;
2924 data = data->wlod_next;
2925 }
2926
2927 out:
2928 return (data);
2929 }
2930
2931 /*
2932 * Verify that parent and child have a known relationship, are not the same,
2933 * and child is actually a child of parent. This is done without w_mtx
2934 * to avoid contention in the common case.
2935 */
2936 static int
2937 witness_lock_order_check(struct witness *parent, struct witness *child)
2938 {
2939
2940 if (parent != child &&
2941 w_rmatrix[parent->w_index][child->w_index]
2942 & WITNESS_LOCK_ORDER_KNOWN &&
2943 isitmychild(parent, child))
2944 return (1);
2945
2946 return (0);
2947 }
2948
2949 static int
2950 witness_lock_order_add(struct witness *parent, struct witness *child)
2951 {
2952 struct witness_lock_order_data *data = NULL;
2953 struct witness_lock_order_key key;
2954 unsigned int hash;
2955
2956 MPASS(parent != NULL && child != NULL);
2957 key.from = parent->w_index;
2958 key.to = child->w_index;
2959 WITNESS_INDEX_ASSERT(key.from);
2960 WITNESS_INDEX_ASSERT(key.to);
2961 if (w_rmatrix[parent->w_index][child->w_index]
2962 & WITNESS_LOCK_ORDER_KNOWN)
2963 return (1);
2964
2965 hash = witness_hash_djb2((const char*)&key,
2966 sizeof(key)) % w_lohash.wloh_size;
2967 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2968 data = w_lofree;
2969 if (data == NULL)
2970 return (0);
2971 w_lofree = data->wlod_next;
2972 data->wlod_next = w_lohash.wloh_array[hash];
2973 data->wlod_key = key;
2974 w_lohash.wloh_array[hash] = data;
2975 w_lohash.wloh_count++;
2976 stack_zero(&data->wlod_stack);
2977 stack_save(&data->wlod_stack);
2978 return (1);
2979 }
2980
2981 /* Call this whenever the structure of the witness graph changes. */
2982 static void
2983 witness_increment_graph_generation(void)
2984 {
2985
2986 if (witness_cold == 0)
2987 mtx_assert(&w_mtx, MA_OWNED);
2988 w_generation++;
2989 }
2990
2991 static int
2992 witness_output_drain(void *arg __unused, const char *data, int len)
2993 {
2994
2995 witness_output("%.*s", len, data);
2996 return (len);
2997 }
2998
2999 static void
3000 witness_debugger(int cond, const char *msg)
3001 {
3002 char buf[32];
3003 struct sbuf sb;
3004 struct stack st;
3005
3006 if (!cond)
3007 return;
3008
3009 if (witness_trace) {
3010 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
3011 sbuf_set_drain(&sb, witness_output_drain, NULL);
3012
3013 stack_zero(&st);
3014 stack_save(&st);
3015 witness_output("stack backtrace:\n");
3016 stack_sbuf_print_ddb(&sb, &st);
3017
3018 sbuf_finish(&sb);
3019 }
3020
3021 #ifdef KDB
3022 if (witness_kdb)
3023 kdb_enter(KDB_WHY_WITNESS, msg);
3024 #endif
3025 }
Cache object: a7febbf2d08222136a83b5eabff8144c
|