1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Implementation of the `witness' lock verifier. Originally implemented for
34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
35 * classes in FreeBSD.
36 */
37
38 /*
39 * Main Entry: witness
40 * Pronunciation: 'wit-n&s
41 * Function: noun
42 * Etymology: Middle English witnesse, from Old English witnes knowledge,
43 * testimony, witness, from 2wit
44 * Date: before 12th century
45 * 1 : attestation of a fact or event : TESTIMONY
46 * 2 : one that gives evidence; specifically : one who testifies in
47 * a cause or before a judicial tribunal
48 * 3 : one asked to be present at a transaction so as to be able to
49 * testify to its having taken place
50 * 4 : one who has personal knowledge of something
51 * 5 a : something serving as evidence or proof : SIGN
52 * b : public affirmation by word or example of usually
53 * religious faith or conviction <the heroic witness to divine
54 * life -- Pilot>
55 * 6 capitalized : a member of the Jehovah's Witnesses
56 */
57
58 /*
59 * Special rules concerning Giant and lock orders:
60 *
61 * 1) Giant must be acquired before any other mutexes. Stated another way,
62 * no other mutex may be held when Giant is acquired.
63 *
64 * 2) Giant must be released when blocking on a sleepable lock.
65 *
66 * This rule is less obvious, but is a result of Giant providing the same
67 * semantics as spl(). Basically, when a thread sleeps, it must release
68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
69 * 2).
70 *
71 * 3) Giant may be acquired before or after sleepable locks.
72 *
73 * This rule is also not quite as obvious. Giant may be acquired after
74 * a sleepable lock because it is a non-sleepable lock and non-sleepable
75 * locks may always be acquired while holding a sleepable lock. The second
76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
80 * execute. Thus, acquiring Giant both before and after a sleepable lock
81 * will not result in a lock order reversal.
82 */
83
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD: releng/6.0/sys/kern/subr_witness.c 151667 2005-10-25 20:17:11Z jhb $");
86
87 #include "opt_ddb.h"
88 #include "opt_witness.h"
89
90 #include <sys/param.h>
91 #include <sys/bus.h>
92 #include <sys/kdb.h>
93 #include <sys/kernel.h>
94 #include <sys/ktr.h>
95 #include <sys/lock.h>
96 #include <sys/malloc.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/sysctl.h>
100 #include <sys/systm.h>
101
102 #include <ddb/ddb.h>
103
104 #include <machine/stdarg.h>
105
106 /* Define this to check for blessed mutexes */
107 #undef BLESSING
108
109 #define WITNESS_COUNT 1024
110 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
111 /*
112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
113 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should
114 * probably be safe for the most part, but it's still a SWAG.
115 */
116 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
117
118 #define WITNESS_NCHILDREN 6
119
120 struct witness_child_list_entry;
121
122 struct witness {
123 const char *w_name;
124 struct lock_class *w_class;
125 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
126 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
127 struct witness_child_list_entry *w_children; /* Great evilness... */
128 const char *w_file;
129 int w_line;
130 u_int w_level;
131 u_int w_refcount;
132 u_char w_Giant_squawked:1;
133 u_char w_other_squawked:1;
134 u_char w_same_squawked:1;
135 u_char w_displayed:1;
136 };
137
138 struct witness_child_list_entry {
139 struct witness_child_list_entry *wcl_next;
140 struct witness *wcl_children[WITNESS_NCHILDREN];
141 u_int wcl_count;
142 };
143
144 STAILQ_HEAD(witness_list, witness);
145
146 #ifdef BLESSING
147 struct witness_blessed {
148 const char *b_lock1;
149 const char *b_lock2;
150 };
151 #endif
152
153 struct witness_order_list_entry {
154 const char *w_name;
155 struct lock_class *w_class;
156 };
157
158 #ifdef BLESSING
159 static int blessed(struct witness *, struct witness *);
160 #endif
161 static int depart(struct witness *w);
162 static struct witness *enroll(const char *description,
163 struct lock_class *lock_class);
164 static int insertchild(struct witness *parent, struct witness *child);
165 static int isitmychild(struct witness *parent, struct witness *child);
166 static int isitmydescendant(struct witness *parent, struct witness *child);
167 static int itismychild(struct witness *parent, struct witness *child);
168 static int rebalancetree(struct witness_list *list);
169 static void removechild(struct witness *parent, struct witness *child);
170 static int reparentchildren(struct witness *newparent,
171 struct witness *oldparent);
172 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
173 static void witness_displaydescendants(void(*)(const char *fmt, ...),
174 struct witness *, int indent);
175 static const char *fixup_filename(const char *file);
176 static void witness_leveldescendents(struct witness *parent, int level);
177 static void witness_levelall(void);
178 static struct witness *witness_get(void);
179 static void witness_free(struct witness *m);
180 static struct witness_child_list_entry *witness_child_get(void);
181 static void witness_child_free(struct witness_child_list_entry *wcl);
182 static struct lock_list_entry *witness_lock_list_get(void);
183 static void witness_lock_list_free(struct lock_list_entry *lle);
184 static struct lock_instance *find_instance(struct lock_list_entry *lock_list,
185 struct lock_object *lock);
186 static void witness_list_lock(struct lock_instance *instance);
187 #ifdef DDB
188 static void witness_list(struct thread *td);
189 static void witness_display_list(void(*prnt)(const char *fmt, ...),
190 struct witness_list *list);
191 static void witness_display(void(*)(const char *fmt, ...));
192 #endif
193
194 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
195
196 /*
197 * If set to 0, witness is disabled. If set to 1, witness performs full lock
198 * order checking for all locks. If set to 2 or higher, then witness skips
199 * the full lock order check if the lock being acquired is at a higher level
200 * (i.e. farther down in the tree) than the current lock. This last mode is
201 * somewhat experimental and not considered fully safe. At runtime, this
202 * value may be set to 0 to turn off witness. witness is not allowed be
203 * turned on once it is turned off, however.
204 */
205 static int witness_watch = 1;
206 TUNABLE_INT("debug.witness.watch", &witness_watch);
207 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
208 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
209
210 #ifdef KDB
211 /*
212 * When KDB is enabled and witness_kdb is set to 1, it will cause the system
213 * to drop into kdebug() when:
214 * - a lock heirarchy violation occurs
215 * - locks are held when going to sleep.
216 */
217 #ifdef WITNESS_KDB
218 int witness_kdb = 1;
219 #else
220 int witness_kdb = 0;
221 #endif
222 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
223 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
224
225 /*
226 * When KDB is enabled and witness_trace is set to 1, it will cause the system
227 * to print a stack trace:
228 * - a lock heirarchy violation occurs
229 * - locks are held when going to sleep.
230 */
231 int witness_trace = 1;
232 TUNABLE_INT("debug.witness.trace", &witness_trace);
233 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
234 #endif /* KDB */
235
236 #ifdef WITNESS_SKIPSPIN
237 int witness_skipspin = 1;
238 #else
239 int witness_skipspin = 0;
240 #endif
241 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
242 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
243 &witness_skipspin, 0, "");
244
245 static struct mtx w_mtx;
246 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
247 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
248 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
249 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
250 static struct witness_child_list_entry *w_child_free = NULL;
251 static struct lock_list_entry *w_lock_list_free = NULL;
252
253 static struct witness w_data[WITNESS_COUNT];
254 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
255 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
256
257 static struct witness_order_list_entry order_lists[] = {
258 /*
259 * sx locks
260 */
261 { "proctree", &lock_class_sx },
262 { "allproc", &lock_class_sx },
263 { NULL, NULL },
264 /*
265 * Various mutexes
266 */
267 { "Giant", &lock_class_mtx_sleep },
268 { "filedesc structure", &lock_class_mtx_sleep },
269 { "pipe mutex", &lock_class_mtx_sleep },
270 { "sigio lock", &lock_class_mtx_sleep },
271 { "process group", &lock_class_mtx_sleep },
272 { "process lock", &lock_class_mtx_sleep },
273 { "session", &lock_class_mtx_sleep },
274 { "uidinfo hash", &lock_class_mtx_sleep },
275 { "uidinfo struct", &lock_class_mtx_sleep },
276 { "allprison", &lock_class_mtx_sleep },
277 { NULL, NULL },
278 /*
279 * Sockets
280 */
281 { "filedesc structure", &lock_class_mtx_sleep },
282 { "accept", &lock_class_mtx_sleep },
283 { "so_snd", &lock_class_mtx_sleep },
284 { "so_rcv", &lock_class_mtx_sleep },
285 { "sellck", &lock_class_mtx_sleep },
286 { NULL, NULL },
287 /*
288 * Routing
289 */
290 { "so_rcv", &lock_class_mtx_sleep },
291 { "radix node head", &lock_class_mtx_sleep },
292 { "rtentry", &lock_class_mtx_sleep },
293 { "ifaddr", &lock_class_mtx_sleep },
294 { NULL, NULL },
295 /*
296 * Multicast - protocol locks before interface locks.
297 */
298 { "in_multi_mtx", &lock_class_mtx_sleep },
299 { "igmp_mtx", &lock_class_mtx_sleep },
300 { "if_addr_mtx", &lock_class_mtx_sleep },
301 { NULL, NULL },
302 /*
303 * UNIX Domain Sockets
304 */
305 { "unp", &lock_class_mtx_sleep },
306 { "so_snd", &lock_class_mtx_sleep },
307 { NULL, NULL },
308 /*
309 * UDP/IP
310 */
311 { "udp", &lock_class_mtx_sleep },
312 { "udpinp", &lock_class_mtx_sleep },
313 { "so_snd", &lock_class_mtx_sleep },
314 { NULL, NULL },
315 /*
316 * TCP/IP
317 */
318 { "tcp", &lock_class_mtx_sleep },
319 { "tcpinp", &lock_class_mtx_sleep },
320 { "so_snd", &lock_class_mtx_sleep },
321 { NULL, NULL },
322 /*
323 * SLIP
324 */
325 { "slip_mtx", &lock_class_mtx_sleep },
326 { "slip sc_mtx", &lock_class_mtx_sleep },
327 { NULL, NULL },
328 /*
329 * netatalk
330 */
331 { "ddp_list_mtx", &lock_class_mtx_sleep },
332 { "ddp_mtx", &lock_class_mtx_sleep },
333 { NULL, NULL },
334 /*
335 * BPF
336 */
337 { "bpf global lock", &lock_class_mtx_sleep },
338 { "bpf interface lock", &lock_class_mtx_sleep },
339 { "bpf cdev lock", &lock_class_mtx_sleep },
340 { NULL, NULL },
341 /*
342 * NFS server
343 */
344 { "nfsd_mtx", &lock_class_mtx_sleep },
345 { "so_snd", &lock_class_mtx_sleep },
346 { NULL, NULL },
347 /*
348 * CDEV
349 */
350 { "system map", &lock_class_mtx_sleep },
351 { "vm page queue mutex", &lock_class_mtx_sleep },
352 { "vnode interlock", &lock_class_mtx_sleep },
353 { "cdev", &lock_class_mtx_sleep },
354 { NULL, NULL },
355 /*
356 * spin locks
357 */
358 #ifdef SMP
359 { "ap boot", &lock_class_mtx_spin },
360 #endif
361 { "rm.mutex_mtx", &lock_class_mtx_spin },
362 { "sio", &lock_class_mtx_spin },
363 #ifdef __i386__
364 { "cy", &lock_class_mtx_spin },
365 #endif
366 { "uart_hwmtx", &lock_class_mtx_spin },
367 { "sabtty", &lock_class_mtx_spin },
368 { "zstty", &lock_class_mtx_spin },
369 { "ng_node", &lock_class_mtx_spin },
370 { "ng_worklist", &lock_class_mtx_spin },
371 { "taskqueue_fast", &lock_class_mtx_spin },
372 { "intr table", &lock_class_mtx_spin },
373 { "ithread table lock", &lock_class_mtx_spin },
374 { "sleepq chain", &lock_class_mtx_spin },
375 { "sched lock", &lock_class_mtx_spin },
376 { "turnstile chain", &lock_class_mtx_spin },
377 { "td_contested", &lock_class_mtx_spin },
378 { "callout", &lock_class_mtx_spin },
379 { "entropy harvest mutex", &lock_class_mtx_spin },
380 /*
381 * leaf locks
382 */
383 { "allpmaps", &lock_class_mtx_spin },
384 { "vm page queue free mutex", &lock_class_mtx_spin },
385 { "icu", &lock_class_mtx_spin },
386 #ifdef SMP
387 { "smp rendezvous", &lock_class_mtx_spin },
388 #if defined(__i386__) || defined(__amd64__)
389 { "tlb", &lock_class_mtx_spin },
390 #endif
391 #ifdef __sparc64__
392 { "ipi", &lock_class_mtx_spin },
393 { "rtc_mtx", &lock_class_mtx_spin },
394 #endif
395 #endif
396 { "clk", &lock_class_mtx_spin },
397 { "mutex profiling lock", &lock_class_mtx_spin },
398 { "kse zombie lock", &lock_class_mtx_spin },
399 { "ALD Queue", &lock_class_mtx_spin },
400 #ifdef __ia64__
401 { "MCA spin lock", &lock_class_mtx_spin },
402 #endif
403 #if defined(__i386__) || defined(__amd64__)
404 { "pcicfg", &lock_class_mtx_spin },
405 { "NDIS thread lock", &lock_class_mtx_spin },
406 #endif
407 { "tw_osl_io_lock", &lock_class_mtx_spin },
408 { "tw_osl_q_lock", &lock_class_mtx_spin },
409 { "tw_cl_io_lock", &lock_class_mtx_spin },
410 { "tw_cl_intr_lock", &lock_class_mtx_spin },
411 { "tw_cl_gen_lock", &lock_class_mtx_spin },
412 { NULL, NULL },
413 { NULL, NULL }
414 };
415
416 #ifdef BLESSING
417 /*
418 * Pairs of locks which have been blessed
419 * Don't complain about order problems with blessed locks
420 */
421 static struct witness_blessed blessed_list[] = {
422 };
423 static int blessed_count =
424 sizeof(blessed_list) / sizeof(struct witness_blessed);
425 #endif
426
427 /*
428 * List of all locks in the system.
429 */
430 TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
431
432 static struct mtx all_mtx = {
433 { &lock_class_mtx_sleep, /* mtx_object.lo_class */
434 "All locks list", /* mtx_object.lo_name */
435 "All locks list", /* mtx_object.lo_type */
436 LO_INITIALIZED, /* mtx_object.lo_flags */
437 { NULL, NULL }, /* mtx_object.lo_list */
438 NULL }, /* mtx_object.lo_witness */
439 MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */
440 };
441
442 /*
443 * This global is set to 0 once it becomes safe to use the witness code.
444 */
445 static int witness_cold = 1;
446
447 /*
448 * This global is set to 1 once the static lock orders have been enrolled
449 * so that a warning can be issued for any spin locks enrolled later.
450 */
451 static int witness_spin_warn = 0;
452
453 /*
454 * Global variables for book keeping.
455 */
456 static int lock_cur_cnt;
457 static int lock_max_cnt;
458
459 /*
460 * The WITNESS-enabled diagnostic code.
461 */
462 static void
463 witness_initialize(void *dummy __unused)
464 {
465 struct lock_object *lock;
466 struct witness_order_list_entry *order;
467 struct witness *w, *w1;
468 int i;
469
470 /*
471 * We have to release Giant before initializing its witness
472 * structure so that WITNESS doesn't get confused.
473 */
474 mtx_unlock(&Giant);
475 mtx_assert(&Giant, MA_NOTOWNED);
476
477 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
478 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
479 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
480 MTX_NOWITNESS);
481 for (i = 0; i < WITNESS_COUNT; i++)
482 witness_free(&w_data[i]);
483 for (i = 0; i < WITNESS_CHILDCOUNT; i++)
484 witness_child_free(&w_childdata[i]);
485 for (i = 0; i < LOCK_CHILDCOUNT; i++)
486 witness_lock_list_free(&w_locklistdata[i]);
487
488 /* First add in all the specified order lists. */
489 for (order = order_lists; order->w_name != NULL; order++) {
490 w = enroll(order->w_name, order->w_class);
491 if (w == NULL)
492 continue;
493 w->w_file = "order list";
494 for (order++; order->w_name != NULL; order++) {
495 w1 = enroll(order->w_name, order->w_class);
496 if (w1 == NULL)
497 continue;
498 w1->w_file = "order list";
499 if (!itismychild(w, w1))
500 panic("Not enough memory for static orders!");
501 w = w1;
502 }
503 }
504 witness_spin_warn = 1;
505
506 /* Iterate through all locks and add them to witness. */
507 mtx_lock(&all_mtx);
508 TAILQ_FOREACH(lock, &all_locks, lo_list) {
509 if (lock->lo_flags & LO_WITNESS)
510 lock->lo_witness = enroll(lock->lo_type,
511 lock->lo_class);
512 else
513 lock->lo_witness = NULL;
514 }
515 mtx_unlock(&all_mtx);
516
517 /* Mark the witness code as being ready for use. */
518 witness_cold = 0;
519
520 mtx_lock(&Giant);
521 }
522 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
523
524 static int
525 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
526 {
527 int error, value;
528
529 value = witness_watch;
530 error = sysctl_handle_int(oidp, &value, 0, req);
531 if (error != 0 || req->newptr == NULL)
532 return (error);
533 error = suser(req->td);
534 if (error != 0)
535 return (error);
536 if (value == witness_watch)
537 return (0);
538 if (value != 0)
539 return (EINVAL);
540 witness_watch = 0;
541 return (0);
542 }
543
544 void
545 witness_init(struct lock_object *lock)
546 {
547 struct lock_class *class;
548
549 class = lock->lo_class;
550 if (lock->lo_flags & LO_INITIALIZED)
551 panic("%s: lock (%s) %s is already initialized", __func__,
552 class->lc_name, lock->lo_name);
553 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
554 (class->lc_flags & LC_RECURSABLE) == 0)
555 panic("%s: lock (%s) %s can not be recursable", __func__,
556 class->lc_name, lock->lo_name);
557 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
558 (class->lc_flags & LC_SLEEPABLE) == 0)
559 panic("%s: lock (%s) %s can not be sleepable", __func__,
560 class->lc_name, lock->lo_name);
561 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
562 (class->lc_flags & LC_UPGRADABLE) == 0)
563 panic("%s: lock (%s) %s can not be upgradable", __func__,
564 class->lc_name, lock->lo_name);
565
566 mtx_lock(&all_mtx);
567 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
568 lock->lo_flags |= LO_INITIALIZED;
569 lock_cur_cnt++;
570 if (lock_cur_cnt > lock_max_cnt)
571 lock_max_cnt = lock_cur_cnt;
572 mtx_unlock(&all_mtx);
573 if (!witness_cold && witness_watch != 0 && panicstr == NULL &&
574 (lock->lo_flags & LO_WITNESS) != 0)
575 lock->lo_witness = enroll(lock->lo_type, class);
576 else
577 lock->lo_witness = NULL;
578 }
579
580 void
581 witness_destroy(struct lock_object *lock)
582 {
583 struct witness *w;
584
585 if (witness_cold)
586 panic("lock (%s) %s destroyed while witness_cold",
587 lock->lo_class->lc_name, lock->lo_name);
588 if ((lock->lo_flags & LO_INITIALIZED) == 0)
589 panic("%s: lock (%s) %s is not initialized", __func__,
590 lock->lo_class->lc_name, lock->lo_name);
591
592 /* XXX: need to verify that no one holds the lock */
593 w = lock->lo_witness;
594 if (w != NULL) {
595 mtx_lock_spin(&w_mtx);
596 MPASS(w->w_refcount > 0);
597 w->w_refcount--;
598
599 /*
600 * Lock is already released if we have an allocation failure
601 * and depart() fails.
602 */
603 if (w->w_refcount != 0 || depart(w))
604 mtx_unlock_spin(&w_mtx);
605 }
606
607 mtx_lock(&all_mtx);
608 lock_cur_cnt--;
609 TAILQ_REMOVE(&all_locks, lock, lo_list);
610 lock->lo_flags &= ~LO_INITIALIZED;
611 mtx_unlock(&all_mtx);
612 }
613
614 #ifdef DDB
615 static void
616 witness_display_list(void(*prnt)(const char *fmt, ...),
617 struct witness_list *list)
618 {
619 struct witness *w;
620
621 STAILQ_FOREACH(w, list, w_typelist) {
622 if (w->w_file == NULL || w->w_level > 0)
623 continue;
624 /*
625 * This lock has no anscestors, display its descendants.
626 */
627 witness_displaydescendants(prnt, w, 0);
628 }
629 }
630
631 static void
632 witness_display(void(*prnt)(const char *fmt, ...))
633 {
634 struct witness *w;
635
636 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
637 witness_levelall();
638
639 /* Clear all the displayed flags. */
640 STAILQ_FOREACH(w, &w_all, w_list) {
641 w->w_displayed = 0;
642 }
643
644 /*
645 * First, handle sleep locks which have been acquired at least
646 * once.
647 */
648 prnt("Sleep locks:\n");
649 witness_display_list(prnt, &w_sleep);
650
651 /*
652 * Now do spin locks which have been acquired at least once.
653 */
654 prnt("\nSpin locks:\n");
655 witness_display_list(prnt, &w_spin);
656
657 /*
658 * Finally, any locks which have not been acquired yet.
659 */
660 prnt("\nLocks which were never acquired:\n");
661 STAILQ_FOREACH(w, &w_all, w_list) {
662 if (w->w_file != NULL || w->w_refcount == 0)
663 continue;
664 prnt("%s\n", w->w_name);
665 }
666 }
667 #endif /* DDB */
668
669 /* Trim useless garbage from filenames. */
670 static const char *
671 fixup_filename(const char *file)
672 {
673
674 if (file == NULL)
675 return (NULL);
676 while (strncmp(file, "../", 3) == 0)
677 file += 3;
678 return (file);
679 }
680
681 int
682 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
683 {
684
685 if (witness_watch == 0 || panicstr != NULL)
686 return (0);
687
688 /* Require locks that witness knows about. */
689 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
690 lock2->lo_witness == NULL)
691 return (EINVAL);
692
693 MPASS(!mtx_owned(&w_mtx));
694 mtx_lock_spin(&w_mtx);
695
696 /*
697 * If we already have either an explicit or implied lock order that
698 * is the other way around, then return an error.
699 */
700 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
701 mtx_unlock_spin(&w_mtx);
702 return (EDOOFUS);
703 }
704
705 /* Try to add the new order. */
706 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
707 lock2->lo_type, lock1->lo_type);
708 if (!itismychild(lock1->lo_witness, lock2->lo_witness))
709 return (ENOMEM);
710 mtx_unlock_spin(&w_mtx);
711 return (0);
712 }
713
714 void
715 witness_checkorder(struct lock_object *lock, int flags, const char *file,
716 int line)
717 {
718 struct lock_list_entry **lock_list, *lle;
719 struct lock_instance *lock1, *lock2;
720 struct lock_class *class;
721 struct witness *w, *w1;
722 struct thread *td;
723 int i, j;
724
725 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
726 panicstr != NULL)
727 return;
728
729 /*
730 * Try locks do not block if they fail to acquire the lock, thus
731 * there is no danger of deadlocks or of switching while holding a
732 * spin lock if we acquire a lock via a try operation. This
733 * function shouldn't even be called for try locks, so panic if
734 * that happens.
735 */
736 if (flags & LOP_TRYLOCK)
737 panic("%s should not be called for try lock operations",
738 __func__);
739
740 w = lock->lo_witness;
741 class = lock->lo_class;
742 td = curthread;
743 file = fixup_filename(file);
744
745 if (class->lc_flags & LC_SLEEPLOCK) {
746 /*
747 * Since spin locks include a critical section, this check
748 * implicitly enforces a lock order of all sleep locks before
749 * all spin locks.
750 */
751 if (td->td_critnest != 0 && !kdb_active)
752 panic("blockable sleep lock (%s) %s @ %s:%d",
753 class->lc_name, lock->lo_name, file, line);
754
755 /*
756 * If this is the first lock acquired then just return as
757 * no order checking is needed.
758 */
759 if (td->td_sleeplocks == NULL)
760 return;
761 lock_list = &td->td_sleeplocks;
762 } else {
763 /*
764 * If this is the first lock, just return as no order
765 * checking is needed. We check this in both if clauses
766 * here as unifying the check would require us to use a
767 * critical section to ensure we don't migrate while doing
768 * the check. Note that if this is not the first lock, we
769 * are already in a critical section and are safe for the
770 * rest of the check.
771 */
772 if (PCPU_GET(spinlocks) == NULL)
773 return;
774 lock_list = PCPU_PTR(spinlocks);
775 }
776
777 /*
778 * Check to see if we are recursing on a lock we already own. If
779 * so, make sure that we don't mismatch exclusive and shared lock
780 * acquires.
781 */
782 lock1 = find_instance(*lock_list, lock);
783 if (lock1 != NULL) {
784 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
785 (flags & LOP_EXCLUSIVE) == 0) {
786 printf("shared lock of (%s) %s @ %s:%d\n",
787 class->lc_name, lock->lo_name, file, line);
788 printf("while exclusively locked from %s:%d\n",
789 lock1->li_file, lock1->li_line);
790 panic("share->excl");
791 }
792 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
793 (flags & LOP_EXCLUSIVE) != 0) {
794 printf("exclusive lock of (%s) %s @ %s:%d\n",
795 class->lc_name, lock->lo_name, file, line);
796 printf("while share locked from %s:%d\n",
797 lock1->li_file, lock1->li_line);
798 panic("excl->share");
799 }
800 return;
801 }
802
803 /*
804 * Try locks do not block if they fail to acquire the lock, thus
805 * there is no danger of deadlocks or of switching while holding a
806 * spin lock if we acquire a lock via a try operation.
807 */
808 if (flags & LOP_TRYLOCK)
809 return;
810
811 /*
812 * Check for duplicate locks of the same type. Note that we only
813 * have to check for this on the last lock we just acquired. Any
814 * other cases will be caught as lock order violations.
815 */
816 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
817 w1 = lock1->li_lock->lo_witness;
818 if (w1 == w) {
819 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
820 (flags & LOP_DUPOK))
821 return;
822 w->w_same_squawked = 1;
823 printf("acquiring duplicate lock of same type: \"%s\"\n",
824 lock->lo_type);
825 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
826 lock1->li_file, lock1->li_line);
827 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
828 #ifdef KDB
829 goto debugger;
830 #else
831 return;
832 #endif
833 }
834 MPASS(!mtx_owned(&w_mtx));
835 mtx_lock_spin(&w_mtx);
836 /*
837 * If we have a known higher number just say ok
838 */
839 if (witness_watch > 1 && w->w_level > w1->w_level) {
840 mtx_unlock_spin(&w_mtx);
841 return;
842 }
843 /*
844 * If we know that the the lock we are acquiring comes after
845 * the lock we most recently acquired in the lock order tree,
846 * then there is no need for any further checks.
847 */
848 if (isitmydescendant(w1, w)) {
849 mtx_unlock_spin(&w_mtx);
850 return;
851 }
852 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
853 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
854
855 MPASS(j < WITNESS_COUNT);
856 lock1 = &lle->ll_children[i];
857 w1 = lock1->li_lock->lo_witness;
858
859 /*
860 * If this lock doesn't undergo witness checking,
861 * then skip it.
862 */
863 if (w1 == NULL) {
864 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
865 ("lock missing witness structure"));
866 continue;
867 }
868 /*
869 * If we are locking Giant and this is a sleepable
870 * lock, then skip it.
871 */
872 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
873 lock == &Giant.mtx_object)
874 continue;
875 /*
876 * If we are locking a sleepable lock and this lock
877 * is Giant, then skip it.
878 */
879 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
880 lock1->li_lock == &Giant.mtx_object)
881 continue;
882 /*
883 * If we are locking a sleepable lock and this lock
884 * isn't sleepable, we want to treat it as a lock
885 * order violation to enfore a general lock order of
886 * sleepable locks before non-sleepable locks.
887 */
888 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
889 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
890 goto reversal;
891 /*
892 * Check the lock order hierarchy for a reveresal.
893 */
894 if (!isitmydescendant(w, w1))
895 continue;
896 reversal:
897 /*
898 * We have a lock order violation, check to see if it
899 * is allowed or has already been yelled about.
900 */
901 mtx_unlock_spin(&w_mtx);
902 #ifdef BLESSING
903 /*
904 * If the lock order is blessed, just bail. We don't
905 * look for other lock order violations though, which
906 * may be a bug.
907 */
908 if (blessed(w, w1))
909 return;
910 #endif
911 if (lock1->li_lock == &Giant.mtx_object) {
912 if (w1->w_Giant_squawked)
913 return;
914 else
915 w1->w_Giant_squawked = 1;
916 } else {
917 if (w1->w_other_squawked)
918 return;
919 else
920 w1->w_other_squawked = 1;
921 }
922 /*
923 * Ok, yell about it.
924 */
925 printf("lock order reversal\n");
926 /*
927 * Try to locate an earlier lock with
928 * witness w in our list.
929 */
930 do {
931 lock2 = &lle->ll_children[i];
932 MPASS(lock2->li_lock != NULL);
933 if (lock2->li_lock->lo_witness == w)
934 break;
935 if (i == 0 && lle->ll_next != NULL) {
936 lle = lle->ll_next;
937 i = lle->ll_count - 1;
938 MPASS(i >= 0 && i < LOCK_NCHILDREN);
939 } else
940 i--;
941 } while (i >= 0);
942 if (i < 0) {
943 printf(" 1st %p %s (%s) @ %s:%d\n",
944 lock1->li_lock, lock1->li_lock->lo_name,
945 lock1->li_lock->lo_type, lock1->li_file,
946 lock1->li_line);
947 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
948 lock->lo_name, lock->lo_type, file, line);
949 } else {
950 printf(" 1st %p %s (%s) @ %s:%d\n",
951 lock2->li_lock, lock2->li_lock->lo_name,
952 lock2->li_lock->lo_type, lock2->li_file,
953 lock2->li_line);
954 printf(" 2nd %p %s (%s) @ %s:%d\n",
955 lock1->li_lock, lock1->li_lock->lo_name,
956 lock1->li_lock->lo_type, lock1->li_file,
957 lock1->li_line);
958 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
959 lock->lo_name, lock->lo_type, file, line);
960 }
961 #ifdef KDB
962 goto debugger;
963 #else
964 return;
965 #endif
966 }
967 }
968 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
969 /*
970 * If requested, build a new lock order. However, don't build a new
971 * relationship between a sleepable lock and Giant if it is in the
972 * wrong direction. The correct lock order is that sleepable locks
973 * always come before Giant.
974 */
975 if (flags & LOP_NEWORDER &&
976 !(lock1->li_lock == &Giant.mtx_object &&
977 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
978 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
979 lock->lo_type, lock1->li_lock->lo_type);
980 if (!itismychild(lock1->li_lock->lo_witness, w))
981 /* Witness is dead. */
982 return;
983 }
984 mtx_unlock_spin(&w_mtx);
985 return;
986
987 #ifdef KDB
988 debugger:
989 if (witness_trace)
990 kdb_backtrace();
991 if (witness_kdb)
992 kdb_enter(__func__);
993 #endif
994 }
995
996 void
997 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
998 {
999 struct lock_list_entry **lock_list, *lle;
1000 struct lock_instance *instance;
1001 struct witness *w;
1002 struct thread *td;
1003
1004 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1005 panicstr != NULL)
1006 return;
1007 w = lock->lo_witness;
1008 td = curthread;
1009 file = fixup_filename(file);
1010
1011 /* Determine lock list for this lock. */
1012 if (lock->lo_class->lc_flags & LC_SLEEPLOCK)
1013 lock_list = &td->td_sleeplocks;
1014 else
1015 lock_list = PCPU_PTR(spinlocks);
1016
1017 /* Check to see if we are recursing on a lock we already own. */
1018 instance = find_instance(*lock_list, lock);
1019 if (instance != NULL) {
1020 instance->li_flags++;
1021 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1022 td->td_proc->p_pid, lock->lo_name,
1023 instance->li_flags & LI_RECURSEMASK);
1024 instance->li_file = file;
1025 instance->li_line = line;
1026 return;
1027 }
1028
1029 /* Update per-witness last file and line acquire. */
1030 w->w_file = file;
1031 w->w_line = line;
1032
1033 /* Find the next open lock instance in the list and fill it. */
1034 lle = *lock_list;
1035 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1036 lle = witness_lock_list_get();
1037 if (lle == NULL)
1038 return;
1039 lle->ll_next = *lock_list;
1040 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1041 td->td_proc->p_pid, lle);
1042 *lock_list = lle;
1043 }
1044 instance = &lle->ll_children[lle->ll_count++];
1045 instance->li_lock = lock;
1046 instance->li_line = line;
1047 instance->li_file = file;
1048 if ((flags & LOP_EXCLUSIVE) != 0)
1049 instance->li_flags = LI_EXCLUSIVE;
1050 else
1051 instance->li_flags = 0;
1052 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1053 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1054 }
1055
1056 void
1057 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1058 {
1059 struct lock_instance *instance;
1060 struct lock_class *class;
1061
1062 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1063 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1064 return;
1065 class = lock->lo_class;
1066 file = fixup_filename(file);
1067 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1068 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1069 class->lc_name, lock->lo_name, file, line);
1070 if ((flags & LOP_TRYLOCK) == 0)
1071 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1072 lock->lo_name, file, line);
1073 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1074 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1075 class->lc_name, lock->lo_name, file, line);
1076 instance = find_instance(curthread->td_sleeplocks, lock);
1077 if (instance == NULL)
1078 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1079 class->lc_name, lock->lo_name, file, line);
1080 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1081 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1082 class->lc_name, lock->lo_name, file, line);
1083 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1084 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1085 class->lc_name, lock->lo_name,
1086 instance->li_flags & LI_RECURSEMASK, file, line);
1087 instance->li_flags |= LI_EXCLUSIVE;
1088 }
1089
1090 void
1091 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1092 int line)
1093 {
1094 struct lock_instance *instance;
1095 struct lock_class *class;
1096
1097 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1098 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1099 return;
1100 class = lock->lo_class;
1101 file = fixup_filename(file);
1102 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1103 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1104 class->lc_name, lock->lo_name, file, line);
1105 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1106 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1107 class->lc_name, lock->lo_name, file, line);
1108 instance = find_instance(curthread->td_sleeplocks, lock);
1109 if (instance == NULL)
1110 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1111 class->lc_name, lock->lo_name, file, line);
1112 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1113 panic("downgrade of shared lock (%s) %s @ %s:%d",
1114 class->lc_name, lock->lo_name, file, line);
1115 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1116 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1117 class->lc_name, lock->lo_name,
1118 instance->li_flags & LI_RECURSEMASK, file, line);
1119 instance->li_flags &= ~LI_EXCLUSIVE;
1120 }
1121
1122 void
1123 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1124 {
1125 struct lock_list_entry **lock_list, *lle;
1126 struct lock_instance *instance;
1127 struct lock_class *class;
1128 struct thread *td;
1129 register_t s;
1130 int i, j;
1131
1132 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1133 panicstr != NULL)
1134 return;
1135 td = curthread;
1136 class = lock->lo_class;
1137 file = fixup_filename(file);
1138
1139 /* Find lock instance associated with this lock. */
1140 if (class->lc_flags & LC_SLEEPLOCK)
1141 lock_list = &td->td_sleeplocks;
1142 else
1143 lock_list = PCPU_PTR(spinlocks);
1144 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1145 for (i = 0; i < (*lock_list)->ll_count; i++) {
1146 instance = &(*lock_list)->ll_children[i];
1147 if (instance->li_lock == lock)
1148 goto found;
1149 }
1150 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1151 file, line);
1152 found:
1153
1154 /* First, check for shared/exclusive mismatches. */
1155 if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1156 (flags & LOP_EXCLUSIVE) == 0) {
1157 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1158 lock->lo_name, file, line);
1159 printf("while exclusively locked from %s:%d\n",
1160 instance->li_file, instance->li_line);
1161 panic("excl->ushare");
1162 }
1163 if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1164 (flags & LOP_EXCLUSIVE) != 0) {
1165 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1166 lock->lo_name, file, line);
1167 printf("while share locked from %s:%d\n", instance->li_file,
1168 instance->li_line);
1169 panic("share->uexcl");
1170 }
1171
1172 /* If we are recursed, unrecurse. */
1173 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1174 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1175 td->td_proc->p_pid, instance->li_lock->lo_name,
1176 instance->li_flags);
1177 instance->li_flags--;
1178 return;
1179 }
1180
1181 /* Otherwise, remove this item from the list. */
1182 s = intr_disable();
1183 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1184 td->td_proc->p_pid, instance->li_lock->lo_name,
1185 (*lock_list)->ll_count - 1);
1186 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1187 (*lock_list)->ll_children[j] =
1188 (*lock_list)->ll_children[j + 1];
1189 (*lock_list)->ll_count--;
1190 intr_restore(s);
1191
1192 /* If this lock list entry is now empty, free it. */
1193 if ((*lock_list)->ll_count == 0) {
1194 lle = *lock_list;
1195 *lock_list = lle->ll_next;
1196 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1197 td->td_proc->p_pid, lle);
1198 witness_lock_list_free(lle);
1199 }
1200 }
1201
1202 /*
1203 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1204 * exempt Giant and sleepable locks from the checks as well. If any
1205 * non-exempt locks are held, then a supplied message is printed to the
1206 * console along with a list of the offending locks. If indicated in the
1207 * flags then a failure results in a panic as well.
1208 */
1209 int
1210 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1211 {
1212 struct lock_list_entry *lle;
1213 struct lock_instance *lock1;
1214 struct thread *td;
1215 va_list ap;
1216 int i, n;
1217
1218 if (witness_cold || witness_watch == 0 || panicstr != NULL)
1219 return (0);
1220 n = 0;
1221 td = curthread;
1222 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1223 for (i = lle->ll_count - 1; i >= 0; i--) {
1224 lock1 = &lle->ll_children[i];
1225 if (lock1->li_lock == lock)
1226 continue;
1227 if (flags & WARN_GIANTOK &&
1228 lock1->li_lock == &Giant.mtx_object)
1229 continue;
1230 if (flags & WARN_SLEEPOK &&
1231 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1232 continue;
1233 if (n == 0) {
1234 va_start(ap, fmt);
1235 vprintf(fmt, ap);
1236 va_end(ap);
1237 printf(" with the following");
1238 if (flags & WARN_SLEEPOK)
1239 printf(" non-sleepable");
1240 printf(" locks held:\n");
1241 }
1242 n++;
1243 witness_list_lock(lock1);
1244 }
1245 if (PCPU_GET(spinlocks) != NULL) {
1246 /*
1247 * Since we already hold a spinlock preemption is
1248 * already blocked.
1249 */
1250 if (n == 0) {
1251 va_start(ap, fmt);
1252 vprintf(fmt, ap);
1253 va_end(ap);
1254 printf(" with the following");
1255 if (flags & WARN_SLEEPOK)
1256 printf(" non-sleepable");
1257 printf(" locks held:\n");
1258 }
1259 n += witness_list_locks(PCPU_PTR(spinlocks));
1260 }
1261 if (flags & WARN_PANIC && n)
1262 panic("witness_warn");
1263 #ifdef KDB
1264 else if (witness_kdb && n)
1265 kdb_enter(__func__);
1266 else if (witness_trace && n)
1267 kdb_backtrace();
1268 #endif
1269 return (n);
1270 }
1271
1272 const char *
1273 witness_file(struct lock_object *lock)
1274 {
1275 struct witness *w;
1276
1277 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1278 return ("?");
1279 w = lock->lo_witness;
1280 return (w->w_file);
1281 }
1282
1283 int
1284 witness_line(struct lock_object *lock)
1285 {
1286 struct witness *w;
1287
1288 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1289 return (0);
1290 w = lock->lo_witness;
1291 return (w->w_line);
1292 }
1293
1294 static struct witness *
1295 enroll(const char *description, struct lock_class *lock_class)
1296 {
1297 struct witness *w;
1298
1299 if (witness_watch == 0 || panicstr != NULL)
1300 return (NULL);
1301 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1302 return (NULL);
1303 mtx_lock_spin(&w_mtx);
1304 STAILQ_FOREACH(w, &w_all, w_list) {
1305 if (w->w_name == description || (w->w_refcount > 0 &&
1306 strcmp(description, w->w_name) == 0)) {
1307 w->w_refcount++;
1308 mtx_unlock_spin(&w_mtx);
1309 if (lock_class != w->w_class)
1310 panic(
1311 "lock (%s) %s does not match earlier (%s) lock",
1312 description, lock_class->lc_name,
1313 w->w_class->lc_name);
1314 return (w);
1315 }
1316 }
1317 /*
1318 * We issue a warning for any spin locks not defined in the static
1319 * order list as a way to discourage their use (folks should really
1320 * be using non-spin mutexes most of the time). However, several
1321 * 3rd part device drivers use spin locks because that is all they
1322 * have available on Windows and Linux and they think that normal
1323 * mutexes are insufficient.
1324 */
1325 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1326 printf("WITNESS: spin lock %s not in order list", description);
1327 if ((w = witness_get()) == NULL)
1328 return (NULL);
1329 w->w_name = description;
1330 w->w_class = lock_class;
1331 w->w_refcount = 1;
1332 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1333 if (lock_class->lc_flags & LC_SPINLOCK)
1334 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1335 else if (lock_class->lc_flags & LC_SLEEPLOCK)
1336 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1337 else {
1338 mtx_unlock_spin(&w_mtx);
1339 panic("lock class %s is not sleep or spin",
1340 lock_class->lc_name);
1341 }
1342 mtx_unlock_spin(&w_mtx);
1343 return (w);
1344 }
1345
1346 /* Don't let the door bang you on the way out... */
1347 static int
1348 depart(struct witness *w)
1349 {
1350 struct witness_child_list_entry *wcl, *nwcl;
1351 struct witness_list *list;
1352 struct witness *parent;
1353
1354 MPASS(w->w_refcount == 0);
1355 if (w->w_class->lc_flags & LC_SLEEPLOCK)
1356 list = &w_sleep;
1357 else
1358 list = &w_spin;
1359 /*
1360 * First, we run through the entire tree looking for any
1361 * witnesses that the outgoing witness is a child of. For
1362 * each parent that we find, we reparent all the direct
1363 * children of the outgoing witness to its parent.
1364 */
1365 STAILQ_FOREACH(parent, list, w_typelist) {
1366 if (!isitmychild(parent, w))
1367 continue;
1368 removechild(parent, w);
1369 if (!reparentchildren(parent, w))
1370 return (0);
1371 }
1372
1373 /*
1374 * Now we go through and free up the child list of the
1375 * outgoing witness.
1376 */
1377 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1378 nwcl = wcl->wcl_next;
1379 witness_child_free(wcl);
1380 }
1381
1382 /*
1383 * Detach from various lists and free.
1384 */
1385 STAILQ_REMOVE(list, w, witness, w_typelist);
1386 STAILQ_REMOVE(&w_all, w, witness, w_list);
1387 witness_free(w);
1388
1389 /* Finally, fixup the tree. */
1390 return (rebalancetree(list));
1391 }
1392
1393 /*
1394 * Prune an entire lock order tree. We look for cases where a lock
1395 * is now both a descendant and a direct child of a given lock. In
1396 * that case, we want to remove the direct child link from the tree.
1397 *
1398 * Returns false if insertchild() fails.
1399 */
1400 static int
1401 rebalancetree(struct witness_list *list)
1402 {
1403 struct witness *child, *parent;
1404
1405 STAILQ_FOREACH(child, list, w_typelist) {
1406 STAILQ_FOREACH(parent, list, w_typelist) {
1407 if (!isitmychild(parent, child))
1408 continue;
1409 removechild(parent, child);
1410 if (isitmydescendant(parent, child))
1411 continue;
1412 if (!insertchild(parent, child))
1413 return (0);
1414 }
1415 }
1416 witness_levelall();
1417 return (1);
1418 }
1419
1420 /*
1421 * Add "child" as a direct child of "parent". Returns false if
1422 * we fail due to out of memory.
1423 */
1424 static int
1425 insertchild(struct witness *parent, struct witness *child)
1426 {
1427 struct witness_child_list_entry **wcl;
1428
1429 MPASS(child != NULL && parent != NULL);
1430
1431 /*
1432 * Insert "child" after "parent"
1433 */
1434 wcl = &parent->w_children;
1435 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1436 wcl = &(*wcl)->wcl_next;
1437 if (*wcl == NULL) {
1438 *wcl = witness_child_get();
1439 if (*wcl == NULL)
1440 return (0);
1441 }
1442 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1443
1444 return (1);
1445 }
1446
1447 /*
1448 * Make all the direct descendants of oldparent be direct descendants
1449 * of newparent.
1450 */
1451 static int
1452 reparentchildren(struct witness *newparent, struct witness *oldparent)
1453 {
1454 struct witness_child_list_entry *wcl;
1455 int i;
1456
1457 /* Avoid making a witness a child of itself. */
1458 MPASS(!isitmychild(oldparent, newparent));
1459
1460 for (wcl = oldparent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1461 for (i = 0; i < wcl->wcl_count; i++)
1462 if (!insertchild(newparent, wcl->wcl_children[i]))
1463 return (0);
1464 return (1);
1465 }
1466
1467 static int
1468 itismychild(struct witness *parent, struct witness *child)
1469 {
1470 struct witness_list *list;
1471
1472 MPASS(child != NULL && parent != NULL);
1473 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1474 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1475 panic(
1476 "%s: parent (%s) and child (%s) are not the same lock type",
1477 __func__, parent->w_class->lc_name,
1478 child->w_class->lc_name);
1479
1480 if (!insertchild(parent, child))
1481 return (0);
1482
1483 if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1484 list = &w_sleep;
1485 else
1486 list = &w_spin;
1487 return (rebalancetree(list));
1488 }
1489
1490 static void
1491 removechild(struct witness *parent, struct witness *child)
1492 {
1493 struct witness_child_list_entry **wcl, *wcl1;
1494 int i;
1495
1496 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1497 for (i = 0; i < (*wcl)->wcl_count; i++)
1498 if ((*wcl)->wcl_children[i] == child)
1499 goto found;
1500 return;
1501 found:
1502 (*wcl)->wcl_count--;
1503 if ((*wcl)->wcl_count > i)
1504 (*wcl)->wcl_children[i] =
1505 (*wcl)->wcl_children[(*wcl)->wcl_count];
1506 MPASS((*wcl)->wcl_children[i] != NULL);
1507 if ((*wcl)->wcl_count != 0)
1508 return;
1509 wcl1 = *wcl;
1510 *wcl = wcl1->wcl_next;
1511 witness_child_free(wcl1);
1512 }
1513
1514 static int
1515 isitmychild(struct witness *parent, struct witness *child)
1516 {
1517 struct witness_child_list_entry *wcl;
1518 int i;
1519
1520 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1521 for (i = 0; i < wcl->wcl_count; i++) {
1522 if (wcl->wcl_children[i] == child)
1523 return (1);
1524 }
1525 }
1526 return (0);
1527 }
1528
1529 static int
1530 isitmydescendant(struct witness *parent, struct witness *child)
1531 {
1532 struct witness_child_list_entry *wcl;
1533 int i, j;
1534
1535 if (isitmychild(parent, child))
1536 return (1);
1537 j = 0;
1538 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1539 MPASS(j < 1000);
1540 for (i = 0; i < wcl->wcl_count; i++) {
1541 if (isitmydescendant(wcl->wcl_children[i], child))
1542 return (1);
1543 }
1544 j++;
1545 }
1546 return (0);
1547 }
1548
1549 static void
1550 witness_levelall (void)
1551 {
1552 struct witness_list *list;
1553 struct witness *w, *w1;
1554
1555 /*
1556 * First clear all levels.
1557 */
1558 STAILQ_FOREACH(w, &w_all, w_list) {
1559 w->w_level = 0;
1560 }
1561
1562 /*
1563 * Look for locks with no parent and level all their descendants.
1564 */
1565 STAILQ_FOREACH(w, &w_all, w_list) {
1566 /*
1567 * This is just an optimization, technically we could get
1568 * away just walking the all list each time.
1569 */
1570 if (w->w_class->lc_flags & LC_SLEEPLOCK)
1571 list = &w_sleep;
1572 else
1573 list = &w_spin;
1574 STAILQ_FOREACH(w1, list, w_typelist) {
1575 if (isitmychild(w1, w))
1576 goto skip;
1577 }
1578 witness_leveldescendents(w, 0);
1579 skip:
1580 ; /* silence GCC 3.x */
1581 }
1582 }
1583
1584 static void
1585 witness_leveldescendents(struct witness *parent, int level)
1586 {
1587 struct witness_child_list_entry *wcl;
1588 int i;
1589
1590 if (parent->w_level < level)
1591 parent->w_level = level;
1592 level++;
1593 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1594 for (i = 0; i < wcl->wcl_count; i++)
1595 witness_leveldescendents(wcl->wcl_children[i], level);
1596 }
1597
1598 static void
1599 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
1600 struct witness *parent, int indent)
1601 {
1602 struct witness_child_list_entry *wcl;
1603 int i, level;
1604
1605 level = parent->w_level;
1606 prnt("%-2d", level);
1607 for (i = 0; i < indent; i++)
1608 prnt(" ");
1609 if (parent->w_refcount > 0)
1610 prnt("%s", parent->w_name);
1611 else
1612 prnt("(dead)");
1613 if (parent->w_displayed) {
1614 prnt(" -- (already displayed)\n");
1615 return;
1616 }
1617 parent->w_displayed = 1;
1618 if (parent->w_refcount > 0) {
1619 if (parent->w_file != NULL)
1620 prnt(" -- last acquired @ %s:%d", parent->w_file,
1621 parent->w_line);
1622 }
1623 prnt("\n");
1624 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1625 for (i = 0; i < wcl->wcl_count; i++)
1626 witness_displaydescendants(prnt,
1627 wcl->wcl_children[i], indent + 1);
1628 }
1629
1630 #ifdef BLESSING
1631 static int
1632 blessed(struct witness *w1, struct witness *w2)
1633 {
1634 int i;
1635 struct witness_blessed *b;
1636
1637 for (i = 0; i < blessed_count; i++) {
1638 b = &blessed_list[i];
1639 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1640 if (strcmp(w2->w_name, b->b_lock2) == 0)
1641 return (1);
1642 continue;
1643 }
1644 if (strcmp(w1->w_name, b->b_lock2) == 0)
1645 if (strcmp(w2->w_name, b->b_lock1) == 0)
1646 return (1);
1647 }
1648 return (0);
1649 }
1650 #endif
1651
1652 static struct witness *
1653 witness_get(void)
1654 {
1655 struct witness *w;
1656
1657 if (witness_watch == 0) {
1658 mtx_unlock_spin(&w_mtx);
1659 return (NULL);
1660 }
1661 if (STAILQ_EMPTY(&w_free)) {
1662 witness_watch = 0;
1663 mtx_unlock_spin(&w_mtx);
1664 printf("%s: witness exhausted\n", __func__);
1665 return (NULL);
1666 }
1667 w = STAILQ_FIRST(&w_free);
1668 STAILQ_REMOVE_HEAD(&w_free, w_list);
1669 bzero(w, sizeof(*w));
1670 return (w);
1671 }
1672
1673 static void
1674 witness_free(struct witness *w)
1675 {
1676
1677 STAILQ_INSERT_HEAD(&w_free, w, w_list);
1678 }
1679
1680 static struct witness_child_list_entry *
1681 witness_child_get(void)
1682 {
1683 struct witness_child_list_entry *wcl;
1684
1685 if (witness_watch == 0) {
1686 mtx_unlock_spin(&w_mtx);
1687 return (NULL);
1688 }
1689 wcl = w_child_free;
1690 if (wcl == NULL) {
1691 witness_watch = 0;
1692 mtx_unlock_spin(&w_mtx);
1693 printf("%s: witness exhausted\n", __func__);
1694 return (NULL);
1695 }
1696 w_child_free = wcl->wcl_next;
1697 bzero(wcl, sizeof(*wcl));
1698 return (wcl);
1699 }
1700
1701 static void
1702 witness_child_free(struct witness_child_list_entry *wcl)
1703 {
1704
1705 wcl->wcl_next = w_child_free;
1706 w_child_free = wcl;
1707 }
1708
1709 static struct lock_list_entry *
1710 witness_lock_list_get(void)
1711 {
1712 struct lock_list_entry *lle;
1713
1714 if (witness_watch == 0)
1715 return (NULL);
1716 mtx_lock_spin(&w_mtx);
1717 lle = w_lock_list_free;
1718 if (lle == NULL) {
1719 witness_watch = 0;
1720 mtx_unlock_spin(&w_mtx);
1721 printf("%s: witness exhausted\n", __func__);
1722 return (NULL);
1723 }
1724 w_lock_list_free = lle->ll_next;
1725 mtx_unlock_spin(&w_mtx);
1726 bzero(lle, sizeof(*lle));
1727 return (lle);
1728 }
1729
1730 static void
1731 witness_lock_list_free(struct lock_list_entry *lle)
1732 {
1733
1734 mtx_lock_spin(&w_mtx);
1735 lle->ll_next = w_lock_list_free;
1736 w_lock_list_free = lle;
1737 mtx_unlock_spin(&w_mtx);
1738 }
1739
1740 static struct lock_instance *
1741 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1742 {
1743 struct lock_list_entry *lle;
1744 struct lock_instance *instance;
1745 int i;
1746
1747 for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1748 for (i = lle->ll_count - 1; i >= 0; i--) {
1749 instance = &lle->ll_children[i];
1750 if (instance->li_lock == lock)
1751 return (instance);
1752 }
1753 return (NULL);
1754 }
1755
1756 static void
1757 witness_list_lock(struct lock_instance *instance)
1758 {
1759 struct lock_object *lock;
1760
1761 lock = instance->li_lock;
1762 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1763 "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name);
1764 if (lock->lo_type != lock->lo_name)
1765 printf(" (%s)", lock->lo_type);
1766 printf(" r = %d (%p) locked @ %s:%d\n",
1767 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1768 instance->li_line);
1769 }
1770
1771 #ifdef DDB
1772 static int
1773 witness_thread_has_locks(struct thread *td)
1774 {
1775
1776 return (td->td_sleeplocks != NULL);
1777 }
1778
1779 static int
1780 witness_proc_has_locks(struct proc *p)
1781 {
1782 struct thread *td;
1783
1784 FOREACH_THREAD_IN_PROC(p, td) {
1785 if (witness_thread_has_locks(td))
1786 return (1);
1787 }
1788 return (0);
1789 }
1790 #endif
1791
1792 int
1793 witness_list_locks(struct lock_list_entry **lock_list)
1794 {
1795 struct lock_list_entry *lle;
1796 int i, nheld;
1797
1798 nheld = 0;
1799 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1800 for (i = lle->ll_count - 1; i >= 0; i--) {
1801 witness_list_lock(&lle->ll_children[i]);
1802 nheld++;
1803 }
1804 return (nheld);
1805 }
1806
1807 /*
1808 * This is a bit risky at best. We call this function when we have timed
1809 * out acquiring a spin lock, and we assume that the other CPU is stuck
1810 * with this lock held. So, we go groveling around in the other CPU's
1811 * per-cpu data to try to find the lock instance for this spin lock to
1812 * see when it was last acquired.
1813 */
1814 void
1815 witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1816 {
1817 struct lock_instance *instance;
1818 struct pcpu *pc;
1819
1820 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1821 return;
1822 pc = pcpu_find(owner->td_oncpu);
1823 instance = find_instance(pc->pc_spinlocks, lock);
1824 if (instance != NULL)
1825 witness_list_lock(instance);
1826 }
1827
1828 void
1829 witness_save(struct lock_object *lock, const char **filep, int *linep)
1830 {
1831 struct lock_instance *instance;
1832
1833 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1834 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1835 return;
1836 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1837 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1838 lock->lo_class->lc_name, lock->lo_name);
1839 instance = find_instance(curthread->td_sleeplocks, lock);
1840 if (instance == NULL)
1841 panic("%s: lock (%s) %s not locked", __func__,
1842 lock->lo_class->lc_name, lock->lo_name);
1843 *filep = instance->li_file;
1844 *linep = instance->li_line;
1845 }
1846
1847 void
1848 witness_restore(struct lock_object *lock, const char *file, int line)
1849 {
1850 struct lock_instance *instance;
1851
1852 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1853 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1854 return;
1855 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1856 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1857 lock->lo_class->lc_name, lock->lo_name);
1858 instance = find_instance(curthread->td_sleeplocks, lock);
1859 if (instance == NULL)
1860 panic("%s: lock (%s) %s not locked", __func__,
1861 lock->lo_class->lc_name, lock->lo_name);
1862 lock->lo_witness->w_file = file;
1863 lock->lo_witness->w_line = line;
1864 instance->li_file = file;
1865 instance->li_line = line;
1866 }
1867
1868 void
1869 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1870 {
1871 #ifdef INVARIANT_SUPPORT
1872 struct lock_instance *instance;
1873
1874 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1875 return;
1876 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
1877 instance = find_instance(curthread->td_sleeplocks, lock);
1878 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
1879 instance = find_instance(PCPU_GET(spinlocks), lock);
1880 else {
1881 panic("Lock (%s) %s is not sleep or spin!",
1882 lock->lo_class->lc_name, lock->lo_name);
1883 }
1884 file = fixup_filename(file);
1885 switch (flags) {
1886 case LA_UNLOCKED:
1887 if (instance != NULL)
1888 panic("Lock (%s) %s locked @ %s:%d.",
1889 lock->lo_class->lc_name, lock->lo_name, file, line);
1890 break;
1891 case LA_LOCKED:
1892 case LA_LOCKED | LA_RECURSED:
1893 case LA_LOCKED | LA_NOTRECURSED:
1894 case LA_SLOCKED:
1895 case LA_SLOCKED | LA_RECURSED:
1896 case LA_SLOCKED | LA_NOTRECURSED:
1897 case LA_XLOCKED:
1898 case LA_XLOCKED | LA_RECURSED:
1899 case LA_XLOCKED | LA_NOTRECURSED:
1900 if (instance == NULL) {
1901 panic("Lock (%s) %s not locked @ %s:%d.",
1902 lock->lo_class->lc_name, lock->lo_name, file, line);
1903 break;
1904 }
1905 if ((flags & LA_XLOCKED) != 0 &&
1906 (instance->li_flags & LI_EXCLUSIVE) == 0)
1907 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1908 lock->lo_class->lc_name, lock->lo_name, file, line);
1909 if ((flags & LA_SLOCKED) != 0 &&
1910 (instance->li_flags & LI_EXCLUSIVE) != 0)
1911 panic("Lock (%s) %s exclusively locked @ %s:%d.",
1912 lock->lo_class->lc_name, lock->lo_name, file, line);
1913 if ((flags & LA_RECURSED) != 0 &&
1914 (instance->li_flags & LI_RECURSEMASK) == 0)
1915 panic("Lock (%s) %s not recursed @ %s:%d.",
1916 lock->lo_class->lc_name, lock->lo_name, file, line);
1917 if ((flags & LA_NOTRECURSED) != 0 &&
1918 (instance->li_flags & LI_RECURSEMASK) != 0)
1919 panic("Lock (%s) %s recursed @ %s:%d.",
1920 lock->lo_class->lc_name, lock->lo_name, file, line);
1921 break;
1922 default:
1923 panic("Invalid lock assertion at %s:%d.", file, line);
1924
1925 }
1926 #endif /* INVARIANT_SUPPORT */
1927 }
1928
1929 #ifdef DDB
1930 static void
1931 witness_list(struct thread *td)
1932 {
1933
1934 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1935 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1936
1937 if (witness_watch == 0)
1938 return;
1939
1940 witness_list_locks(&td->td_sleeplocks);
1941
1942 /*
1943 * We only handle spinlocks if td == curthread. This is somewhat broken
1944 * if td is currently executing on some other CPU and holds spin locks
1945 * as we won't display those locks. If we had a MI way of getting
1946 * the per-cpu data for a given cpu then we could use
1947 * td->td_oncpu to get the list of spinlocks for this thread
1948 * and "fix" this.
1949 *
1950 * That still wouldn't really fix this unless we locked sched_lock
1951 * or stopped the other CPU to make sure it wasn't changing the list
1952 * out from under us. It is probably best to just not try to handle
1953 * threads on other CPU's for now.
1954 */
1955 if (td == curthread && PCPU_GET(spinlocks) != NULL)
1956 witness_list_locks(PCPU_PTR(spinlocks));
1957 }
1958
1959 DB_SHOW_COMMAND(locks, db_witness_list)
1960 {
1961 struct thread *td;
1962 pid_t pid;
1963 struct proc *p;
1964
1965 if (have_addr) {
1966 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1967 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1968 ((addr >> 16) % 16) * 10000;
1969 /* sx_slock(&allproc_lock); */
1970 FOREACH_PROC_IN_SYSTEM(p) {
1971 if (p->p_pid == pid)
1972 break;
1973 }
1974 /* sx_sunlock(&allproc_lock); */
1975 if (p == NULL) {
1976 db_printf("pid %d not found\n", pid);
1977 return;
1978 }
1979 FOREACH_THREAD_IN_PROC(p, td) {
1980 witness_list(td);
1981 }
1982 } else {
1983 td = curthread;
1984 witness_list(td);
1985 }
1986 }
1987
1988 DB_SHOW_COMMAND(alllocks, db_witness_list_all)
1989 {
1990 struct thread *td;
1991 struct proc *p;
1992
1993 /*
1994 * It would be nice to list only threads and processes that actually
1995 * held sleep locks, but that information is currently not exported
1996 * by WITNESS.
1997 */
1998 FOREACH_PROC_IN_SYSTEM(p) {
1999 if (!witness_proc_has_locks(p))
2000 continue;
2001 FOREACH_THREAD_IN_PROC(p, td) {
2002 if (!witness_thread_has_locks(td))
2003 continue;
2004 printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2005 p->p_comm, td, td->td_tid);
2006 witness_list(td);
2007 }
2008 }
2009 }
2010
2011 DB_SHOW_COMMAND(witness, db_witness_display)
2012 {
2013
2014 witness_display(db_printf);
2015 }
2016 #endif
Cache object: 0807e224bdc9a44589cdb01929694c4f
|