1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Implementation of the `witness' lock verifier. Originally implemented for
34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
35 * classes in FreeBSD.
36 */
37
38 /*
39 * Main Entry: witness
40 * Pronunciation: 'wit-n&s
41 * Function: noun
42 * Etymology: Middle English witnesse, from Old English witnes knowledge,
43 * testimony, witness, from 2wit
44 * Date: before 12th century
45 * 1 : attestation of a fact or event : TESTIMONY
46 * 2 : one that gives evidence; specifically : one who testifies in
47 * a cause or before a judicial tribunal
48 * 3 : one asked to be present at a transaction so as to be able to
49 * testify to its having taken place
50 * 4 : one who has personal knowledge of something
51 * 5 a : something serving as evidence or proof : SIGN
52 * b : public affirmation by word or example of usually
53 * religious faith or conviction <the heroic witness to divine
54 * life -- Pilot>
55 * 6 capitalized : a member of the Jehovah's Witnesses
56 */
57
58 /*
59 * Special rules concerning Giant and lock orders:
60 *
61 * 1) Giant must be acquired before any other mutexes. Stated another way,
62 * no other mutex may be held when Giant is acquired.
63 *
64 * 2) Giant must be released when blocking on a sleepable lock.
65 *
66 * This rule is less obvious, but is a result of Giant providing the same
67 * semantics as spl(). Basically, when a thread sleeps, it must release
68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
69 * 2).
70 *
71 * 3) Giant may be acquired before or after sleepable locks.
72 *
73 * This rule is also not quite as obvious. Giant may be acquired after
74 * a sleepable lock because it is a non-sleepable lock and non-sleepable
75 * locks may always be acquired while holding a sleepable lock. The second
76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
80 * execute. Thus, acquiring Giant both before and after a sleepable lock
81 * will not result in a lock order reversal.
82 */
83
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD: releng/6.1/sys/kern/subr_witness.c 157869 2006-04-19 16:00:05Z jhb $");
86
87 #include "opt_ddb.h"
88 #include "opt_witness.h"
89
90 #include <sys/param.h>
91 #include <sys/bus.h>
92 #include <sys/kdb.h>
93 #include <sys/kernel.h>
94 #include <sys/ktr.h>
95 #include <sys/lock.h>
96 #include <sys/malloc.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/sysctl.h>
100 #include <sys/systm.h>
101
102 #include <ddb/ddb.h>
103
104 #include <machine/stdarg.h>
105
106 /* Define this to check for blessed mutexes */
107 #undef BLESSING
108
109 #define WITNESS_COUNT 1024
110 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
111 /*
112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
113 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should
114 * probably be safe for the most part, but it's still a SWAG.
115 */
116 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
117
118 #define WITNESS_NCHILDREN 6
119
120 struct witness_child_list_entry;
121
122 struct witness {
123 const char *w_name;
124 struct lock_class *w_class;
125 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
126 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
127 struct witness_child_list_entry *w_children; /* Great evilness... */
128 const char *w_file;
129 int w_line;
130 u_int w_level;
131 u_int w_refcount;
132 u_char w_Giant_squawked:1;
133 u_char w_other_squawked:1;
134 u_char w_same_squawked:1;
135 u_char w_displayed:1;
136 };
137
138 struct witness_child_list_entry {
139 struct witness_child_list_entry *wcl_next;
140 struct witness *wcl_children[WITNESS_NCHILDREN];
141 u_int wcl_count;
142 };
143
144 STAILQ_HEAD(witness_list, witness);
145
146 #ifdef BLESSING
147 struct witness_blessed {
148 const char *b_lock1;
149 const char *b_lock2;
150 };
151 #endif
152
153 struct witness_order_list_entry {
154 const char *w_name;
155 struct lock_class *w_class;
156 };
157
158 #ifdef BLESSING
159 static int blessed(struct witness *, struct witness *);
160 #endif
161 static int depart(struct witness *w);
162 static struct witness *enroll(const char *description,
163 struct lock_class *lock_class);
164 static int insertchild(struct witness *parent, struct witness *child);
165 static int isitmychild(struct witness *parent, struct witness *child);
166 static int isitmydescendant(struct witness *parent, struct witness *child);
167 static int itismychild(struct witness *parent, struct witness *child);
168 static void removechild(struct witness *parent, struct witness *child);
169 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
170 static const char *fixup_filename(const char *file);
171 static struct witness *witness_get(void);
172 static void witness_free(struct witness *m);
173 static struct witness_child_list_entry *witness_child_get(void);
174 static void witness_child_free(struct witness_child_list_entry *wcl);
175 static struct lock_list_entry *witness_lock_list_get(void);
176 static void witness_lock_list_free(struct lock_list_entry *lle);
177 static struct lock_instance *find_instance(struct lock_list_entry *lock_list,
178 struct lock_object *lock);
179 static void witness_list_lock(struct lock_instance *instance);
180 #ifdef DDB
181 static void witness_leveldescendents(struct witness *parent, int level);
182 static void witness_levelall(void);
183 static void witness_displaydescendants(void(*)(const char *fmt, ...),
184 struct witness *, int indent);
185 static void witness_display_list(void(*prnt)(const char *fmt, ...),
186 struct witness_list *list);
187 static void witness_display(void(*)(const char *fmt, ...));
188 static void witness_list(struct thread *td);
189 #endif
190
191 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
192
193 /*
194 * If set to 0, witness is disabled. If set to a non-zero value, witness
195 * performs full lock order checking for all locks. At runtime, this
196 * value may be set to 0 to turn off witness. witness is not allowed be
197 * turned on once it is turned off, however.
198 */
199 static int witness_watch = 1;
200 TUNABLE_INT("debug.witness.watch", &witness_watch);
201 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
202 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
203
204 #ifdef KDB
205 /*
206 * When KDB is enabled and witness_kdb is set to 1, it will cause the system
207 * to drop into kdebug() when:
208 * - a lock heirarchy violation occurs
209 * - locks are held when going to sleep.
210 */
211 #ifdef WITNESS_KDB
212 int witness_kdb = 1;
213 #else
214 int witness_kdb = 0;
215 #endif
216 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
217 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
218
219 /*
220 * When KDB is enabled and witness_trace is set to 1, it will cause the system
221 * to print a stack trace:
222 * - a lock heirarchy violation occurs
223 * - locks are held when going to sleep.
224 */
225 int witness_trace = 1;
226 TUNABLE_INT("debug.witness.trace", &witness_trace);
227 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
228 #endif /* KDB */
229
230 #ifdef WITNESS_SKIPSPIN
231 int witness_skipspin = 1;
232 #else
233 int witness_skipspin = 0;
234 #endif
235 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
236 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
237 &witness_skipspin, 0, "");
238
239 static struct mtx w_mtx;
240 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
241 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
242 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
243 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
244 static struct witness_child_list_entry *w_child_free = NULL;
245 static struct lock_list_entry *w_lock_list_free = NULL;
246
247 static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt;
248 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
249 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
250 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
251 "");
252 SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD,
253 &w_child_free_cnt, 0, "");
254 SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0,
255 "");
256
257 static struct witness w_data[WITNESS_COUNT];
258 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
259 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
260
261 static struct witness_order_list_entry order_lists[] = {
262 /*
263 * sx locks
264 */
265 { "proctree", &lock_class_sx },
266 { "allproc", &lock_class_sx },
267 { NULL, NULL },
268 /*
269 * Various mutexes
270 */
271 { "Giant", &lock_class_mtx_sleep },
272 { "filedesc structure", &lock_class_mtx_sleep },
273 { "pipe mutex", &lock_class_mtx_sleep },
274 { "sigio lock", &lock_class_mtx_sleep },
275 { "process group", &lock_class_mtx_sleep },
276 { "process lock", &lock_class_mtx_sleep },
277 { "session", &lock_class_mtx_sleep },
278 { "uidinfo hash", &lock_class_mtx_sleep },
279 { "uidinfo struct", &lock_class_mtx_sleep },
280 { "allprison", &lock_class_mtx_sleep },
281 { NULL, NULL },
282 /*
283 * Sockets
284 */
285 { "filedesc structure", &lock_class_mtx_sleep },
286 { "accept", &lock_class_mtx_sleep },
287 { "so_snd", &lock_class_mtx_sleep },
288 { "so_rcv", &lock_class_mtx_sleep },
289 { "sellck", &lock_class_mtx_sleep },
290 { NULL, NULL },
291 /*
292 * Routing
293 */
294 { "so_rcv", &lock_class_mtx_sleep },
295 { "radix node head", &lock_class_mtx_sleep },
296 { "rtentry", &lock_class_mtx_sleep },
297 { "ifaddr", &lock_class_mtx_sleep },
298 { NULL, NULL },
299 /*
300 * Multicast - protocol locks before interface locks.
301 */
302 { "in_multi_mtx", &lock_class_mtx_sleep },
303 { "igmp_mtx", &lock_class_mtx_sleep },
304 { "if_addr_mtx", &lock_class_mtx_sleep },
305 { NULL, NULL },
306 /*
307 * UNIX Domain Sockets
308 */
309 { "unp", &lock_class_mtx_sleep },
310 { "so_snd", &lock_class_mtx_sleep },
311 { NULL, NULL },
312 /*
313 * UDP/IP
314 */
315 { "udp", &lock_class_mtx_sleep },
316 { "udpinp", &lock_class_mtx_sleep },
317 { "so_snd", &lock_class_mtx_sleep },
318 { NULL, NULL },
319 /*
320 * TCP/IP
321 */
322 { "tcp", &lock_class_mtx_sleep },
323 { "tcpinp", &lock_class_mtx_sleep },
324 { "so_snd", &lock_class_mtx_sleep },
325 { NULL, NULL },
326 /*
327 * SLIP
328 */
329 { "slip_mtx", &lock_class_mtx_sleep },
330 { "slip sc_mtx", &lock_class_mtx_sleep },
331 { NULL, NULL },
332 /*
333 * netatalk
334 */
335 { "ddp_list_mtx", &lock_class_mtx_sleep },
336 { "ddp_mtx", &lock_class_mtx_sleep },
337 { NULL, NULL },
338 /*
339 * BPF
340 */
341 { "bpf global lock", &lock_class_mtx_sleep },
342 { "bpf interface lock", &lock_class_mtx_sleep },
343 { "bpf cdev lock", &lock_class_mtx_sleep },
344 { NULL, NULL },
345 /*
346 * NFS server
347 */
348 { "nfsd_mtx", &lock_class_mtx_sleep },
349 { "so_snd", &lock_class_mtx_sleep },
350 { NULL, NULL },
351 /*
352 * CDEV
353 */
354 { "system map", &lock_class_mtx_sleep },
355 { "vm page queue mutex", &lock_class_mtx_sleep },
356 { "vnode interlock", &lock_class_mtx_sleep },
357 { "cdev", &lock_class_mtx_sleep },
358 { NULL, NULL },
359 /*
360 * spin locks
361 */
362 #ifdef SMP
363 { "ap boot", &lock_class_mtx_spin },
364 #endif
365 { "rm.mutex_mtx", &lock_class_mtx_spin },
366 { "sio", &lock_class_mtx_spin },
367 #ifdef __i386__
368 { "cy", &lock_class_mtx_spin },
369 #endif
370 { "uart_hwmtx", &lock_class_mtx_spin },
371 { "sabtty", &lock_class_mtx_spin },
372 { "zstty", &lock_class_mtx_spin },
373 { "ng_node", &lock_class_mtx_spin },
374 { "ng_worklist", &lock_class_mtx_spin },
375 { "taskqueue_fast", &lock_class_mtx_spin },
376 { "intr table", &lock_class_mtx_spin },
377 { "sleepq chain", &lock_class_mtx_spin },
378 { "sched lock", &lock_class_mtx_spin },
379 { "turnstile chain", &lock_class_mtx_spin },
380 { "td_contested", &lock_class_mtx_spin },
381 { "callout", &lock_class_mtx_spin },
382 { "entropy harvest mutex", &lock_class_mtx_spin },
383 /*
384 * leaf locks
385 */
386 { "allpmaps", &lock_class_mtx_spin },
387 { "vm page queue free mutex", &lock_class_mtx_spin },
388 { "icu", &lock_class_mtx_spin },
389 #ifdef SMP
390 { "smp rendezvous", &lock_class_mtx_spin },
391 #if defined(__i386__) || defined(__amd64__)
392 { "tlb", &lock_class_mtx_spin },
393 #endif
394 #ifdef __sparc64__
395 { "ipi", &lock_class_mtx_spin },
396 { "rtc_mtx", &lock_class_mtx_spin },
397 #endif
398 #endif
399 { "clk", &lock_class_mtx_spin },
400 { "mutex profiling lock", &lock_class_mtx_spin },
401 { "kse zombie lock", &lock_class_mtx_spin },
402 { "ALD Queue", &lock_class_mtx_spin },
403 #ifdef __ia64__
404 { "MCA spin lock", &lock_class_mtx_spin },
405 #endif
406 #if defined(__i386__) || defined(__amd64__)
407 { "pcicfg", &lock_class_mtx_spin },
408 { "NDIS thread lock", &lock_class_mtx_spin },
409 #endif
410 { "tw_osl_io_lock", &lock_class_mtx_spin },
411 { "tw_osl_q_lock", &lock_class_mtx_spin },
412 { "tw_cl_io_lock", &lock_class_mtx_spin },
413 { "tw_cl_intr_lock", &lock_class_mtx_spin },
414 { "tw_cl_gen_lock", &lock_class_mtx_spin },
415 { NULL, NULL },
416 { NULL, NULL }
417 };
418
419 #ifdef BLESSING
420 /*
421 * Pairs of locks which have been blessed
422 * Don't complain about order problems with blessed locks
423 */
424 static struct witness_blessed blessed_list[] = {
425 };
426 static int blessed_count =
427 sizeof(blessed_list) / sizeof(struct witness_blessed);
428 #endif
429
430 /*
431 * List of all locks in the system.
432 */
433 TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
434
435 static struct mtx all_mtx = {
436 { &lock_class_mtx_sleep, /* mtx_object.lo_class */
437 "All locks list", /* mtx_object.lo_name */
438 "All locks list", /* mtx_object.lo_type */
439 LO_INITIALIZED, /* mtx_object.lo_flags */
440 { NULL, NULL }, /* mtx_object.lo_list */
441 NULL }, /* mtx_object.lo_witness */
442 MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */
443 };
444
445 /*
446 * This global is set to 0 once it becomes safe to use the witness code.
447 */
448 static int witness_cold = 1;
449
450 /*
451 * This global is set to 1 once the static lock orders have been enrolled
452 * so that a warning can be issued for any spin locks enrolled later.
453 */
454 static int witness_spin_warn = 0;
455
456 /*
457 * Global variables for book keeping.
458 */
459 static int lock_cur_cnt;
460 static int lock_max_cnt;
461
462 /*
463 * The WITNESS-enabled diagnostic code.
464 */
465 static void
466 witness_initialize(void *dummy __unused)
467 {
468 struct lock_object *lock;
469 struct witness_order_list_entry *order;
470 struct witness *w, *w1;
471 int i;
472
473 /*
474 * We have to release Giant before initializing its witness
475 * structure so that WITNESS doesn't get confused.
476 */
477 mtx_unlock(&Giant);
478 mtx_assert(&Giant, MA_NOTOWNED);
479
480 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
481 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
482 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
483 MTX_NOWITNESS);
484 for (i = 0; i < WITNESS_COUNT; i++)
485 witness_free(&w_data[i]);
486 for (i = 0; i < WITNESS_CHILDCOUNT; i++)
487 witness_child_free(&w_childdata[i]);
488 for (i = 0; i < LOCK_CHILDCOUNT; i++)
489 witness_lock_list_free(&w_locklistdata[i]);
490
491 /* First add in all the specified order lists. */
492 for (order = order_lists; order->w_name != NULL; order++) {
493 w = enroll(order->w_name, order->w_class);
494 if (w == NULL)
495 continue;
496 w->w_file = "order list";
497 for (order++; order->w_name != NULL; order++) {
498 w1 = enroll(order->w_name, order->w_class);
499 if (w1 == NULL)
500 continue;
501 w1->w_file = "order list";
502 if (!itismychild(w, w1))
503 panic("Not enough memory for static orders!");
504 w = w1;
505 }
506 }
507 witness_spin_warn = 1;
508
509 /* Iterate through all locks and add them to witness. */
510 mtx_lock(&all_mtx);
511 TAILQ_FOREACH(lock, &all_locks, lo_list) {
512 if (lock->lo_flags & LO_WITNESS)
513 lock->lo_witness = enroll(lock->lo_type,
514 lock->lo_class);
515 else
516 lock->lo_witness = NULL;
517 }
518 mtx_unlock(&all_mtx);
519
520 /* Mark the witness code as being ready for use. */
521 witness_cold = 0;
522
523 mtx_lock(&Giant);
524 }
525 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
526
527 static int
528 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
529 {
530 int error, value;
531
532 value = witness_watch;
533 error = sysctl_handle_int(oidp, &value, 0, req);
534 if (error != 0 || req->newptr == NULL)
535 return (error);
536 error = suser(req->td);
537 if (error != 0)
538 return (error);
539 if (value == witness_watch)
540 return (0);
541 if (value != 0)
542 return (EINVAL);
543 witness_watch = 0;
544 return (0);
545 }
546
547 void
548 witness_init(struct lock_object *lock)
549 {
550 struct lock_class *class;
551
552 class = lock->lo_class;
553 if (lock->lo_flags & LO_INITIALIZED)
554 panic("%s: lock (%s) %s is already initialized", __func__,
555 class->lc_name, lock->lo_name);
556 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
557 (class->lc_flags & LC_RECURSABLE) == 0)
558 panic("%s: lock (%s) %s can not be recursable", __func__,
559 class->lc_name, lock->lo_name);
560 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
561 (class->lc_flags & LC_SLEEPABLE) == 0)
562 panic("%s: lock (%s) %s can not be sleepable", __func__,
563 class->lc_name, lock->lo_name);
564 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
565 (class->lc_flags & LC_UPGRADABLE) == 0)
566 panic("%s: lock (%s) %s can not be upgradable", __func__,
567 class->lc_name, lock->lo_name);
568
569 mtx_lock(&all_mtx);
570 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
571 lock->lo_flags |= LO_INITIALIZED;
572 lock_cur_cnt++;
573 if (lock_cur_cnt > lock_max_cnt)
574 lock_max_cnt = lock_cur_cnt;
575 mtx_unlock(&all_mtx);
576 if (!witness_cold && witness_watch != 0 && panicstr == NULL &&
577 (lock->lo_flags & LO_WITNESS) != 0)
578 lock->lo_witness = enroll(lock->lo_type, class);
579 else
580 lock->lo_witness = NULL;
581 }
582
583 void
584 witness_destroy(struct lock_object *lock)
585 {
586 struct witness *w;
587
588 if (witness_cold)
589 panic("lock (%s) %s destroyed while witness_cold",
590 lock->lo_class->lc_name, lock->lo_name);
591 if ((lock->lo_flags & LO_INITIALIZED) == 0)
592 panic("%s: lock (%s) %s is not initialized", __func__,
593 lock->lo_class->lc_name, lock->lo_name);
594
595 /* XXX: need to verify that no one holds the lock */
596 w = lock->lo_witness;
597 if (w != NULL) {
598 mtx_lock_spin(&w_mtx);
599 MPASS(w->w_refcount > 0);
600 w->w_refcount--;
601
602 /*
603 * Lock is already released if we have an allocation failure
604 * and depart() fails.
605 */
606 if (w->w_refcount != 0 || depart(w))
607 mtx_unlock_spin(&w_mtx);
608 }
609
610 mtx_lock(&all_mtx);
611 lock_cur_cnt--;
612 TAILQ_REMOVE(&all_locks, lock, lo_list);
613 lock->lo_flags &= ~LO_INITIALIZED;
614 mtx_unlock(&all_mtx);
615 }
616
617 #ifdef DDB
618 static void
619 witness_levelall (void)
620 {
621 struct witness_list *list;
622 struct witness *w, *w1;
623
624 /*
625 * First clear all levels.
626 */
627 STAILQ_FOREACH(w, &w_all, w_list) {
628 w->w_level = 0;
629 }
630
631 /*
632 * Look for locks with no parent and level all their descendants.
633 */
634 STAILQ_FOREACH(w, &w_all, w_list) {
635 /*
636 * This is just an optimization, technically we could get
637 * away just walking the all list each time.
638 */
639 if (w->w_class->lc_flags & LC_SLEEPLOCK)
640 list = &w_sleep;
641 else
642 list = &w_spin;
643 STAILQ_FOREACH(w1, list, w_typelist) {
644 if (isitmychild(w1, w))
645 goto skip;
646 }
647 witness_leveldescendents(w, 0);
648 skip:
649 ; /* silence GCC 3.x */
650 }
651 }
652
653 static void
654 witness_leveldescendents(struct witness *parent, int level)
655 {
656 struct witness_child_list_entry *wcl;
657 int i;
658
659 if (parent->w_level < level)
660 parent->w_level = level;
661 level++;
662 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
663 for (i = 0; i < wcl->wcl_count; i++)
664 witness_leveldescendents(wcl->wcl_children[i], level);
665 }
666
667 static void
668 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
669 struct witness *parent, int indent)
670 {
671 struct witness_child_list_entry *wcl;
672 int i, level;
673
674 level = parent->w_level;
675 prnt("%-2d", level);
676 for (i = 0; i < indent; i++)
677 prnt(" ");
678 if (parent->w_refcount > 0)
679 prnt("%s", parent->w_name);
680 else
681 prnt("(dead)");
682 if (parent->w_displayed) {
683 prnt(" -- (already displayed)\n");
684 return;
685 }
686 parent->w_displayed = 1;
687 if (parent->w_refcount > 0) {
688 if (parent->w_file != NULL)
689 prnt(" -- last acquired @ %s:%d", parent->w_file,
690 parent->w_line);
691 }
692 prnt("\n");
693 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
694 for (i = 0; i < wcl->wcl_count; i++)
695 witness_displaydescendants(prnt,
696 wcl->wcl_children[i], indent + 1);
697 }
698
699 static void
700 witness_display_list(void(*prnt)(const char *fmt, ...),
701 struct witness_list *list)
702 {
703 struct witness *w;
704
705 STAILQ_FOREACH(w, list, w_typelist) {
706 if (w->w_file == NULL || w->w_level > 0)
707 continue;
708 /*
709 * This lock has no anscestors, display its descendants.
710 */
711 witness_displaydescendants(prnt, w, 0);
712 }
713 }
714
715 static void
716 witness_display(void(*prnt)(const char *fmt, ...))
717 {
718 struct witness *w;
719
720 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
721 witness_levelall();
722
723 /* Clear all the displayed flags. */
724 STAILQ_FOREACH(w, &w_all, w_list) {
725 w->w_displayed = 0;
726 }
727
728 /*
729 * First, handle sleep locks which have been acquired at least
730 * once.
731 */
732 prnt("Sleep locks:\n");
733 witness_display_list(prnt, &w_sleep);
734
735 /*
736 * Now do spin locks which have been acquired at least once.
737 */
738 prnt("\nSpin locks:\n");
739 witness_display_list(prnt, &w_spin);
740
741 /*
742 * Finally, any locks which have not been acquired yet.
743 */
744 prnt("\nLocks which were never acquired:\n");
745 STAILQ_FOREACH(w, &w_all, w_list) {
746 if (w->w_file != NULL || w->w_refcount == 0)
747 continue;
748 prnt("%s\n", w->w_name);
749 }
750 }
751 #endif /* DDB */
752
753 /* Trim useless garbage from filenames. */
754 static const char *
755 fixup_filename(const char *file)
756 {
757
758 if (file == NULL)
759 return (NULL);
760 while (strncmp(file, "../", 3) == 0)
761 file += 3;
762 return (file);
763 }
764
765 int
766 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
767 {
768
769 if (witness_watch == 0 || panicstr != NULL)
770 return (0);
771
772 /* Require locks that witness knows about. */
773 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
774 lock2->lo_witness == NULL)
775 return (EINVAL);
776
777 MPASS(!mtx_owned(&w_mtx));
778 mtx_lock_spin(&w_mtx);
779
780 /*
781 * If we already have either an explicit or implied lock order that
782 * is the other way around, then return an error.
783 */
784 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
785 mtx_unlock_spin(&w_mtx);
786 return (EDOOFUS);
787 }
788
789 /* Try to add the new order. */
790 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
791 lock2->lo_type, lock1->lo_type);
792 if (!itismychild(lock1->lo_witness, lock2->lo_witness))
793 return (ENOMEM);
794 mtx_unlock_spin(&w_mtx);
795 return (0);
796 }
797
798 void
799 witness_checkorder(struct lock_object *lock, int flags, const char *file,
800 int line)
801 {
802 struct lock_list_entry **lock_list, *lle;
803 struct lock_instance *lock1, *lock2;
804 struct lock_class *class;
805 struct witness *w, *w1;
806 struct thread *td;
807 int i, j;
808
809 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
810 panicstr != NULL)
811 return;
812
813 /*
814 * Try locks do not block if they fail to acquire the lock, thus
815 * there is no danger of deadlocks or of switching while holding a
816 * spin lock if we acquire a lock via a try operation. This
817 * function shouldn't even be called for try locks, so panic if
818 * that happens.
819 */
820 if (flags & LOP_TRYLOCK)
821 panic("%s should not be called for try lock operations",
822 __func__);
823
824 w = lock->lo_witness;
825 class = lock->lo_class;
826 td = curthread;
827 file = fixup_filename(file);
828
829 if (class->lc_flags & LC_SLEEPLOCK) {
830 /*
831 * Since spin locks include a critical section, this check
832 * implicitly enforces a lock order of all sleep locks before
833 * all spin locks.
834 */
835 if (td->td_critnest != 0 && !kdb_active)
836 panic("blockable sleep lock (%s) %s @ %s:%d",
837 class->lc_name, lock->lo_name, file, line);
838
839 /*
840 * If this is the first lock acquired then just return as
841 * no order checking is needed.
842 */
843 if (td->td_sleeplocks == NULL)
844 return;
845 lock_list = &td->td_sleeplocks;
846 } else {
847 /*
848 * If this is the first lock, just return as no order
849 * checking is needed. We check this in both if clauses
850 * here as unifying the check would require us to use a
851 * critical section to ensure we don't migrate while doing
852 * the check. Note that if this is not the first lock, we
853 * are already in a critical section and are safe for the
854 * rest of the check.
855 */
856 if (PCPU_GET(spinlocks) == NULL)
857 return;
858 lock_list = PCPU_PTR(spinlocks);
859 }
860
861 /*
862 * Check to see if we are recursing on a lock we already own. If
863 * so, make sure that we don't mismatch exclusive and shared lock
864 * acquires.
865 */
866 lock1 = find_instance(*lock_list, lock);
867 if (lock1 != NULL) {
868 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
869 (flags & LOP_EXCLUSIVE) == 0) {
870 printf("shared lock of (%s) %s @ %s:%d\n",
871 class->lc_name, lock->lo_name, file, line);
872 printf("while exclusively locked from %s:%d\n",
873 lock1->li_file, lock1->li_line);
874 panic("share->excl");
875 }
876 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
877 (flags & LOP_EXCLUSIVE) != 0) {
878 printf("exclusive lock of (%s) %s @ %s:%d\n",
879 class->lc_name, lock->lo_name, file, line);
880 printf("while share locked from %s:%d\n",
881 lock1->li_file, lock1->li_line);
882 panic("excl->share");
883 }
884 return;
885 }
886
887 /*
888 * Try locks do not block if they fail to acquire the lock, thus
889 * there is no danger of deadlocks or of switching while holding a
890 * spin lock if we acquire a lock via a try operation.
891 */
892 if (flags & LOP_TRYLOCK)
893 return;
894
895 /*
896 * Check for duplicate locks of the same type. Note that we only
897 * have to check for this on the last lock we just acquired. Any
898 * other cases will be caught as lock order violations.
899 */
900 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
901 w1 = lock1->li_lock->lo_witness;
902 if (w1 == w) {
903 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
904 (flags & LOP_DUPOK))
905 return;
906 w->w_same_squawked = 1;
907 printf("acquiring duplicate lock of same type: \"%s\"\n",
908 lock->lo_type);
909 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
910 lock1->li_file, lock1->li_line);
911 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
912 #ifdef KDB
913 goto debugger;
914 #else
915 return;
916 #endif
917 }
918 MPASS(!mtx_owned(&w_mtx));
919 mtx_lock_spin(&w_mtx);
920 /*
921 * If we know that the the lock we are acquiring comes after
922 * the lock we most recently acquired in the lock order tree,
923 * then there is no need for any further checks.
924 */
925 if (isitmychild(w1, w)) {
926 mtx_unlock_spin(&w_mtx);
927 return;
928 }
929 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
930 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
931
932 MPASS(j < WITNESS_COUNT);
933 lock1 = &lle->ll_children[i];
934 w1 = lock1->li_lock->lo_witness;
935
936 /*
937 * If this lock doesn't undergo witness checking,
938 * then skip it.
939 */
940 if (w1 == NULL) {
941 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
942 ("lock missing witness structure"));
943 continue;
944 }
945 /*
946 * If we are locking Giant and this is a sleepable
947 * lock, then skip it.
948 */
949 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
950 lock == &Giant.mtx_object)
951 continue;
952 /*
953 * If we are locking a sleepable lock and this lock
954 * is Giant, then skip it.
955 */
956 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
957 lock1->li_lock == &Giant.mtx_object)
958 continue;
959 /*
960 * If we are locking a sleepable lock and this lock
961 * isn't sleepable, we want to treat it as a lock
962 * order violation to enfore a general lock order of
963 * sleepable locks before non-sleepable locks.
964 */
965 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
966 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
967 goto reversal;
968 /*
969 * If we are locking Giant and this is a non-sleepable
970 * lock, then treat it as a reversal.
971 */
972 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
973 lock == &Giant.mtx_object)
974 goto reversal;
975 /*
976 * Check the lock order hierarchy for a reveresal.
977 */
978 if (!isitmydescendant(w, w1))
979 continue;
980 reversal:
981 /*
982 * We have a lock order violation, check to see if it
983 * is allowed or has already been yelled about.
984 */
985 mtx_unlock_spin(&w_mtx);
986 #ifdef BLESSING
987 /*
988 * If the lock order is blessed, just bail. We don't
989 * look for other lock order violations though, which
990 * may be a bug.
991 */
992 if (blessed(w, w1))
993 return;
994 #endif
995 if (lock1->li_lock == &Giant.mtx_object) {
996 if (w1->w_Giant_squawked)
997 return;
998 else
999 w1->w_Giant_squawked = 1;
1000 } else {
1001 if (w1->w_other_squawked)
1002 return;
1003 else
1004 w1->w_other_squawked = 1;
1005 }
1006 /*
1007 * Ok, yell about it.
1008 */
1009 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1010 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1011 printf(
1012 "lock order reversal: (sleepable after non-sleepable)\n");
1013 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1014 && lock == &Giant.mtx_object)
1015 printf(
1016 "lock order reversal: (Giant after non-sleepable)\n");
1017 else
1018 printf("lock order reversal:\n");
1019 /*
1020 * Try to locate an earlier lock with
1021 * witness w in our list.
1022 */
1023 do {
1024 lock2 = &lle->ll_children[i];
1025 MPASS(lock2->li_lock != NULL);
1026 if (lock2->li_lock->lo_witness == w)
1027 break;
1028 if (i == 0 && lle->ll_next != NULL) {
1029 lle = lle->ll_next;
1030 i = lle->ll_count - 1;
1031 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1032 } else
1033 i--;
1034 } while (i >= 0);
1035 if (i < 0) {
1036 printf(" 1st %p %s (%s) @ %s:%d\n",
1037 lock1->li_lock, lock1->li_lock->lo_name,
1038 lock1->li_lock->lo_type, lock1->li_file,
1039 lock1->li_line);
1040 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1041 lock->lo_name, lock->lo_type, file, line);
1042 } else {
1043 printf(" 1st %p %s (%s) @ %s:%d\n",
1044 lock2->li_lock, lock2->li_lock->lo_name,
1045 lock2->li_lock->lo_type, lock2->li_file,
1046 lock2->li_line);
1047 printf(" 2nd %p %s (%s) @ %s:%d\n",
1048 lock1->li_lock, lock1->li_lock->lo_name,
1049 lock1->li_lock->lo_type, lock1->li_file,
1050 lock1->li_line);
1051 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1052 lock->lo_name, lock->lo_type, file, line);
1053 }
1054 #ifdef KDB
1055 goto debugger;
1056 #else
1057 return;
1058 #endif
1059 }
1060 }
1061 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
1062 /*
1063 * If requested, build a new lock order. However, don't build a new
1064 * relationship between a sleepable lock and Giant if it is in the
1065 * wrong direction. The correct lock order is that sleepable locks
1066 * always come before Giant.
1067 */
1068 if (flags & LOP_NEWORDER &&
1069 !(lock1->li_lock == &Giant.mtx_object &&
1070 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1071 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1072 lock->lo_type, lock1->li_lock->lo_type);
1073 if (!itismychild(lock1->li_lock->lo_witness, w))
1074 /* Witness is dead. */
1075 return;
1076 }
1077 mtx_unlock_spin(&w_mtx);
1078 return;
1079
1080 #ifdef KDB
1081 debugger:
1082 if (witness_trace)
1083 kdb_backtrace();
1084 if (witness_kdb)
1085 kdb_enter(__func__);
1086 #endif
1087 }
1088
1089 void
1090 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1091 {
1092 struct lock_list_entry **lock_list, *lle;
1093 struct lock_instance *instance;
1094 struct witness *w;
1095 struct thread *td;
1096
1097 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1098 panicstr != NULL)
1099 return;
1100 w = lock->lo_witness;
1101 td = curthread;
1102 file = fixup_filename(file);
1103
1104 /* Determine lock list for this lock. */
1105 if (lock->lo_class->lc_flags & LC_SLEEPLOCK)
1106 lock_list = &td->td_sleeplocks;
1107 else
1108 lock_list = PCPU_PTR(spinlocks);
1109
1110 /* Check to see if we are recursing on a lock we already own. */
1111 instance = find_instance(*lock_list, lock);
1112 if (instance != NULL) {
1113 instance->li_flags++;
1114 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1115 td->td_proc->p_pid, lock->lo_name,
1116 instance->li_flags & LI_RECURSEMASK);
1117 instance->li_file = file;
1118 instance->li_line = line;
1119 return;
1120 }
1121
1122 /* Update per-witness last file and line acquire. */
1123 w->w_file = file;
1124 w->w_line = line;
1125
1126 /* Find the next open lock instance in the list and fill it. */
1127 lle = *lock_list;
1128 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1129 lle = witness_lock_list_get();
1130 if (lle == NULL)
1131 return;
1132 lle->ll_next = *lock_list;
1133 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1134 td->td_proc->p_pid, lle);
1135 *lock_list = lle;
1136 }
1137 instance = &lle->ll_children[lle->ll_count++];
1138 instance->li_lock = lock;
1139 instance->li_line = line;
1140 instance->li_file = file;
1141 if ((flags & LOP_EXCLUSIVE) != 0)
1142 instance->li_flags = LI_EXCLUSIVE;
1143 else
1144 instance->li_flags = 0;
1145 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1146 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1147 }
1148
1149 void
1150 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1151 {
1152 struct lock_instance *instance;
1153 struct lock_class *class;
1154
1155 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1156 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1157 return;
1158 class = lock->lo_class;
1159 file = fixup_filename(file);
1160 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1161 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1162 class->lc_name, lock->lo_name, file, line);
1163 if ((flags & LOP_TRYLOCK) == 0)
1164 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1165 lock->lo_name, file, line);
1166 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1167 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1168 class->lc_name, lock->lo_name, file, line);
1169 instance = find_instance(curthread->td_sleeplocks, lock);
1170 if (instance == NULL)
1171 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1172 class->lc_name, lock->lo_name, file, line);
1173 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1174 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1175 class->lc_name, lock->lo_name, file, line);
1176 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1177 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1178 class->lc_name, lock->lo_name,
1179 instance->li_flags & LI_RECURSEMASK, file, line);
1180 instance->li_flags |= LI_EXCLUSIVE;
1181 }
1182
1183 void
1184 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1185 int line)
1186 {
1187 struct lock_instance *instance;
1188 struct lock_class *class;
1189
1190 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1191 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1192 return;
1193 class = lock->lo_class;
1194 file = fixup_filename(file);
1195 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1196 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1197 class->lc_name, lock->lo_name, file, line);
1198 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1199 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1200 class->lc_name, lock->lo_name, file, line);
1201 instance = find_instance(curthread->td_sleeplocks, lock);
1202 if (instance == NULL)
1203 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1204 class->lc_name, lock->lo_name, file, line);
1205 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1206 panic("downgrade of shared lock (%s) %s @ %s:%d",
1207 class->lc_name, lock->lo_name, file, line);
1208 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1209 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1210 class->lc_name, lock->lo_name,
1211 instance->li_flags & LI_RECURSEMASK, file, line);
1212 instance->li_flags &= ~LI_EXCLUSIVE;
1213 }
1214
1215 void
1216 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1217 {
1218 struct lock_list_entry **lock_list, *lle;
1219 struct lock_instance *instance;
1220 struct lock_class *class;
1221 struct thread *td;
1222 register_t s;
1223 int i, j;
1224
1225 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1226 panicstr != NULL)
1227 return;
1228 td = curthread;
1229 class = lock->lo_class;
1230 file = fixup_filename(file);
1231
1232 /* Find lock instance associated with this lock. */
1233 if (class->lc_flags & LC_SLEEPLOCK)
1234 lock_list = &td->td_sleeplocks;
1235 else
1236 lock_list = PCPU_PTR(spinlocks);
1237 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1238 for (i = 0; i < (*lock_list)->ll_count; i++) {
1239 instance = &(*lock_list)->ll_children[i];
1240 if (instance->li_lock == lock)
1241 goto found;
1242 }
1243 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1244 file, line);
1245 found:
1246
1247 /* First, check for shared/exclusive mismatches. */
1248 if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1249 (flags & LOP_EXCLUSIVE) == 0) {
1250 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1251 lock->lo_name, file, line);
1252 printf("while exclusively locked from %s:%d\n",
1253 instance->li_file, instance->li_line);
1254 panic("excl->ushare");
1255 }
1256 if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1257 (flags & LOP_EXCLUSIVE) != 0) {
1258 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1259 lock->lo_name, file, line);
1260 printf("while share locked from %s:%d\n", instance->li_file,
1261 instance->li_line);
1262 panic("share->uexcl");
1263 }
1264
1265 /* If we are recursed, unrecurse. */
1266 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1267 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1268 td->td_proc->p_pid, instance->li_lock->lo_name,
1269 instance->li_flags);
1270 instance->li_flags--;
1271 return;
1272 }
1273
1274 /* Otherwise, remove this item from the list. */
1275 s = intr_disable();
1276 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1277 td->td_proc->p_pid, instance->li_lock->lo_name,
1278 (*lock_list)->ll_count - 1);
1279 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1280 (*lock_list)->ll_children[j] =
1281 (*lock_list)->ll_children[j + 1];
1282 (*lock_list)->ll_count--;
1283 intr_restore(s);
1284
1285 /* If this lock list entry is now empty, free it. */
1286 if ((*lock_list)->ll_count == 0) {
1287 lle = *lock_list;
1288 *lock_list = lle->ll_next;
1289 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1290 td->td_proc->p_pid, lle);
1291 witness_lock_list_free(lle);
1292 }
1293 }
1294
1295 /*
1296 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1297 * exempt Giant and sleepable locks from the checks as well. If any
1298 * non-exempt locks are held, then a supplied message is printed to the
1299 * console along with a list of the offending locks. If indicated in the
1300 * flags then a failure results in a panic as well.
1301 */
1302 int
1303 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1304 {
1305 struct lock_list_entry *lle;
1306 struct lock_instance *lock1;
1307 struct thread *td;
1308 va_list ap;
1309 int i, n;
1310
1311 if (witness_cold || witness_watch == 0 || panicstr != NULL)
1312 return (0);
1313 n = 0;
1314 td = curthread;
1315 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1316 for (i = lle->ll_count - 1; i >= 0; i--) {
1317 lock1 = &lle->ll_children[i];
1318 if (lock1->li_lock == lock)
1319 continue;
1320 if (flags & WARN_GIANTOK &&
1321 lock1->li_lock == &Giant.mtx_object)
1322 continue;
1323 if (flags & WARN_SLEEPOK &&
1324 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1325 continue;
1326 if (n == 0) {
1327 va_start(ap, fmt);
1328 vprintf(fmt, ap);
1329 va_end(ap);
1330 printf(" with the following");
1331 if (flags & WARN_SLEEPOK)
1332 printf(" non-sleepable");
1333 printf(" locks held:\n");
1334 }
1335 n++;
1336 witness_list_lock(lock1);
1337 }
1338 if (PCPU_GET(spinlocks) != NULL) {
1339 /*
1340 * Since we already hold a spinlock preemption is
1341 * already blocked.
1342 */
1343 if (n == 0) {
1344 va_start(ap, fmt);
1345 vprintf(fmt, ap);
1346 va_end(ap);
1347 printf(" with the following");
1348 if (flags & WARN_SLEEPOK)
1349 printf(" non-sleepable");
1350 printf(" locks held:\n");
1351 }
1352 n += witness_list_locks(PCPU_PTR(spinlocks));
1353 }
1354 if (flags & WARN_PANIC && n)
1355 panic("witness_warn");
1356 #ifdef KDB
1357 else if (witness_kdb && n)
1358 kdb_enter(__func__);
1359 else if (witness_trace && n)
1360 kdb_backtrace();
1361 #endif
1362 return (n);
1363 }
1364
1365 const char *
1366 witness_file(struct lock_object *lock)
1367 {
1368 struct witness *w;
1369
1370 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1371 return ("?");
1372 w = lock->lo_witness;
1373 return (w->w_file);
1374 }
1375
1376 int
1377 witness_line(struct lock_object *lock)
1378 {
1379 struct witness *w;
1380
1381 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1382 return (0);
1383 w = lock->lo_witness;
1384 return (w->w_line);
1385 }
1386
1387 static struct witness *
1388 enroll(const char *description, struct lock_class *lock_class)
1389 {
1390 struct witness *w;
1391
1392 if (witness_watch == 0 || panicstr != NULL)
1393 return (NULL);
1394 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1395 return (NULL);
1396 mtx_lock_spin(&w_mtx);
1397 STAILQ_FOREACH(w, &w_all, w_list) {
1398 if (w->w_name == description || (w->w_refcount > 0 &&
1399 strcmp(description, w->w_name) == 0)) {
1400 w->w_refcount++;
1401 mtx_unlock_spin(&w_mtx);
1402 if (lock_class != w->w_class)
1403 panic(
1404 "lock (%s) %s does not match earlier (%s) lock",
1405 description, lock_class->lc_name,
1406 w->w_class->lc_name);
1407 return (w);
1408 }
1409 }
1410 if ((w = witness_get()) == NULL)
1411 goto out;
1412 w->w_name = description;
1413 w->w_class = lock_class;
1414 w->w_refcount = 1;
1415 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1416 if (lock_class->lc_flags & LC_SPINLOCK) {
1417 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1418 w_spin_cnt++;
1419 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1420 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1421 w_sleep_cnt++;
1422 } else {
1423 mtx_unlock_spin(&w_mtx);
1424 panic("lock class %s is not sleep or spin",
1425 lock_class->lc_name);
1426 }
1427 mtx_unlock_spin(&w_mtx);
1428 out:
1429 /*
1430 * We issue a warning for any spin locks not defined in the static
1431 * order list as a way to discourage their use (folks should really
1432 * be using non-spin mutexes most of the time). However, several
1433 * 3rd part device drivers use spin locks because that is all they
1434 * have available on Windows and Linux and they think that normal
1435 * mutexes are insufficient.
1436 */
1437 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1438 printf("WITNESS: spin lock %s not in order list\n",
1439 description);
1440 return (w);
1441 }
1442
1443 /* Don't let the door bang you on the way out... */
1444 static int
1445 depart(struct witness *w)
1446 {
1447 struct witness_child_list_entry *wcl, *nwcl;
1448 struct witness_list *list;
1449 struct witness *parent;
1450
1451 MPASS(w->w_refcount == 0);
1452 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1453 list = &w_sleep;
1454 w_sleep_cnt--;
1455 } else {
1456 list = &w_spin;
1457 w_spin_cnt--;
1458 }
1459 /*
1460 * First, we run through the entire tree looking for any
1461 * witnesses that the outgoing witness is a child of. For
1462 * each parent that we find, we reparent all the direct
1463 * children of the outgoing witness to its parent.
1464 */
1465 STAILQ_FOREACH(parent, list, w_typelist) {
1466 if (!isitmychild(parent, w))
1467 continue;
1468 removechild(parent, w);
1469 }
1470
1471 /*
1472 * Now we go through and free up the child list of the
1473 * outgoing witness.
1474 */
1475 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1476 nwcl = wcl->wcl_next;
1477 w_child_cnt--;
1478 witness_child_free(wcl);
1479 }
1480
1481 /*
1482 * Detach from various lists and free.
1483 */
1484 STAILQ_REMOVE(list, w, witness, w_typelist);
1485 STAILQ_REMOVE(&w_all, w, witness, w_list);
1486 witness_free(w);
1487
1488 return (1);
1489 }
1490
1491 /*
1492 * Add "child" as a direct child of "parent". Returns false if
1493 * we fail due to out of memory.
1494 */
1495 static int
1496 insertchild(struct witness *parent, struct witness *child)
1497 {
1498 struct witness_child_list_entry **wcl;
1499
1500 MPASS(child != NULL && parent != NULL);
1501
1502 /*
1503 * Insert "child" after "parent"
1504 */
1505 wcl = &parent->w_children;
1506 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1507 wcl = &(*wcl)->wcl_next;
1508 if (*wcl == NULL) {
1509 *wcl = witness_child_get();
1510 if (*wcl == NULL)
1511 return (0);
1512 w_child_cnt++;
1513 }
1514 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1515
1516 return (1);
1517 }
1518
1519
1520 static int
1521 itismychild(struct witness *parent, struct witness *child)
1522 {
1523 struct witness_list *list;
1524
1525 MPASS(child != NULL && parent != NULL);
1526 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1527 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1528 panic(
1529 "%s: parent (%s) and child (%s) are not the same lock type",
1530 __func__, parent->w_class->lc_name,
1531 child->w_class->lc_name);
1532
1533 if (!insertchild(parent, child))
1534 return (0);
1535
1536 if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1537 list = &w_sleep;
1538 else
1539 list = &w_spin;
1540 return (1);
1541 }
1542
1543 static void
1544 removechild(struct witness *parent, struct witness *child)
1545 {
1546 struct witness_child_list_entry **wcl, *wcl1;
1547 int i;
1548
1549 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1550 for (i = 0; i < (*wcl)->wcl_count; i++)
1551 if ((*wcl)->wcl_children[i] == child)
1552 goto found;
1553 return;
1554 found:
1555 (*wcl)->wcl_count--;
1556 if ((*wcl)->wcl_count > i)
1557 (*wcl)->wcl_children[i] =
1558 (*wcl)->wcl_children[(*wcl)->wcl_count];
1559 MPASS((*wcl)->wcl_children[i] != NULL);
1560 if ((*wcl)->wcl_count != 0)
1561 return;
1562 wcl1 = *wcl;
1563 *wcl = wcl1->wcl_next;
1564 w_child_cnt--;
1565 witness_child_free(wcl1);
1566 }
1567
1568 static int
1569 isitmychild(struct witness *parent, struct witness *child)
1570 {
1571 struct witness_child_list_entry *wcl;
1572 int i;
1573
1574 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1575 for (i = 0; i < wcl->wcl_count; i++) {
1576 if (wcl->wcl_children[i] == child)
1577 return (1);
1578 }
1579 }
1580 return (0);
1581 }
1582
1583 static int
1584 isitmydescendant(struct witness *parent, struct witness *child)
1585 {
1586 struct witness_child_list_entry *wcl;
1587 int i, j;
1588
1589 if (isitmychild(parent, child))
1590 return (1);
1591 j = 0;
1592 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1593 MPASS(j < 1000);
1594 for (i = 0; i < wcl->wcl_count; i++) {
1595 if (isitmydescendant(wcl->wcl_children[i], child))
1596 return (1);
1597 }
1598 j++;
1599 }
1600 return (0);
1601 }
1602
1603 #ifdef BLESSING
1604 static int
1605 blessed(struct witness *w1, struct witness *w2)
1606 {
1607 int i;
1608 struct witness_blessed *b;
1609
1610 for (i = 0; i < blessed_count; i++) {
1611 b = &blessed_list[i];
1612 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1613 if (strcmp(w2->w_name, b->b_lock2) == 0)
1614 return (1);
1615 continue;
1616 }
1617 if (strcmp(w1->w_name, b->b_lock2) == 0)
1618 if (strcmp(w2->w_name, b->b_lock1) == 0)
1619 return (1);
1620 }
1621 return (0);
1622 }
1623 #endif
1624
1625 static struct witness *
1626 witness_get(void)
1627 {
1628 struct witness *w;
1629
1630 if (witness_watch == 0) {
1631 mtx_unlock_spin(&w_mtx);
1632 return (NULL);
1633 }
1634 if (STAILQ_EMPTY(&w_free)) {
1635 witness_watch = 0;
1636 mtx_unlock_spin(&w_mtx);
1637 printf("%s: witness exhausted\n", __func__);
1638 return (NULL);
1639 }
1640 w = STAILQ_FIRST(&w_free);
1641 STAILQ_REMOVE_HEAD(&w_free, w_list);
1642 w_free_cnt--;
1643 bzero(w, sizeof(*w));
1644 return (w);
1645 }
1646
1647 static void
1648 witness_free(struct witness *w)
1649 {
1650
1651 STAILQ_INSERT_HEAD(&w_free, w, w_list);
1652 w_free_cnt++;
1653 }
1654
1655 static struct witness_child_list_entry *
1656 witness_child_get(void)
1657 {
1658 struct witness_child_list_entry *wcl;
1659
1660 if (witness_watch == 0) {
1661 mtx_unlock_spin(&w_mtx);
1662 return (NULL);
1663 }
1664 wcl = w_child_free;
1665 if (wcl == NULL) {
1666 witness_watch = 0;
1667 mtx_unlock_spin(&w_mtx);
1668 printf("%s: witness exhausted\n", __func__);
1669 return (NULL);
1670 }
1671 w_child_free = wcl->wcl_next;
1672 w_child_free_cnt--;
1673 bzero(wcl, sizeof(*wcl));
1674 return (wcl);
1675 }
1676
1677 static void
1678 witness_child_free(struct witness_child_list_entry *wcl)
1679 {
1680
1681 wcl->wcl_next = w_child_free;
1682 w_child_free = wcl;
1683 w_child_free_cnt++;
1684 }
1685
1686 static struct lock_list_entry *
1687 witness_lock_list_get(void)
1688 {
1689 struct lock_list_entry *lle;
1690
1691 if (witness_watch == 0)
1692 return (NULL);
1693 mtx_lock_spin(&w_mtx);
1694 lle = w_lock_list_free;
1695 if (lle == NULL) {
1696 witness_watch = 0;
1697 mtx_unlock_spin(&w_mtx);
1698 printf("%s: witness exhausted\n", __func__);
1699 return (NULL);
1700 }
1701 w_lock_list_free = lle->ll_next;
1702 mtx_unlock_spin(&w_mtx);
1703 bzero(lle, sizeof(*lle));
1704 return (lle);
1705 }
1706
1707 static void
1708 witness_lock_list_free(struct lock_list_entry *lle)
1709 {
1710
1711 mtx_lock_spin(&w_mtx);
1712 lle->ll_next = w_lock_list_free;
1713 w_lock_list_free = lle;
1714 mtx_unlock_spin(&w_mtx);
1715 }
1716
1717 static struct lock_instance *
1718 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1719 {
1720 struct lock_list_entry *lle;
1721 struct lock_instance *instance;
1722 int i;
1723
1724 for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1725 for (i = lle->ll_count - 1; i >= 0; i--) {
1726 instance = &lle->ll_children[i];
1727 if (instance->li_lock == lock)
1728 return (instance);
1729 }
1730 return (NULL);
1731 }
1732
1733 static void
1734 witness_list_lock(struct lock_instance *instance)
1735 {
1736 struct lock_object *lock;
1737
1738 lock = instance->li_lock;
1739 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1740 "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name);
1741 if (lock->lo_type != lock->lo_name)
1742 printf(" (%s)", lock->lo_type);
1743 printf(" r = %d (%p) locked @ %s:%d\n",
1744 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1745 instance->li_line);
1746 }
1747
1748 #ifdef DDB
1749 static int
1750 witness_thread_has_locks(struct thread *td)
1751 {
1752
1753 return (td->td_sleeplocks != NULL);
1754 }
1755
1756 static int
1757 witness_proc_has_locks(struct proc *p)
1758 {
1759 struct thread *td;
1760
1761 FOREACH_THREAD_IN_PROC(p, td) {
1762 if (witness_thread_has_locks(td))
1763 return (1);
1764 }
1765 return (0);
1766 }
1767 #endif
1768
1769 int
1770 witness_list_locks(struct lock_list_entry **lock_list)
1771 {
1772 struct lock_list_entry *lle;
1773 int i, nheld;
1774
1775 nheld = 0;
1776 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1777 for (i = lle->ll_count - 1; i >= 0; i--) {
1778 witness_list_lock(&lle->ll_children[i]);
1779 nheld++;
1780 }
1781 return (nheld);
1782 }
1783
1784 /*
1785 * This is a bit risky at best. We call this function when we have timed
1786 * out acquiring a spin lock, and we assume that the other CPU is stuck
1787 * with this lock held. So, we go groveling around in the other CPU's
1788 * per-cpu data to try to find the lock instance for this spin lock to
1789 * see when it was last acquired.
1790 */
1791 void
1792 witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1793 {
1794 struct lock_instance *instance;
1795 struct pcpu *pc;
1796
1797 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1798 return;
1799 pc = pcpu_find(owner->td_oncpu);
1800 instance = find_instance(pc->pc_spinlocks, lock);
1801 if (instance != NULL)
1802 witness_list_lock(instance);
1803 }
1804
1805 void
1806 witness_save(struct lock_object *lock, const char **filep, int *linep)
1807 {
1808 struct lock_instance *instance;
1809
1810 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1811 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1812 return;
1813 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1814 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1815 lock->lo_class->lc_name, lock->lo_name);
1816 instance = find_instance(curthread->td_sleeplocks, lock);
1817 if (instance == NULL)
1818 panic("%s: lock (%s) %s not locked", __func__,
1819 lock->lo_class->lc_name, lock->lo_name);
1820 *filep = instance->li_file;
1821 *linep = instance->li_line;
1822 }
1823
1824 void
1825 witness_restore(struct lock_object *lock, const char *file, int line)
1826 {
1827 struct lock_instance *instance;
1828
1829 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1830 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1831 return;
1832 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1833 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1834 lock->lo_class->lc_name, lock->lo_name);
1835 instance = find_instance(curthread->td_sleeplocks, lock);
1836 if (instance == NULL)
1837 panic("%s: lock (%s) %s not locked", __func__,
1838 lock->lo_class->lc_name, lock->lo_name);
1839 lock->lo_witness->w_file = file;
1840 lock->lo_witness->w_line = line;
1841 instance->li_file = file;
1842 instance->li_line = line;
1843 }
1844
1845 void
1846 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1847 {
1848 #ifdef INVARIANT_SUPPORT
1849 struct lock_instance *instance;
1850
1851 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1852 return;
1853 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
1854 instance = find_instance(curthread->td_sleeplocks, lock);
1855 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
1856 instance = find_instance(PCPU_GET(spinlocks), lock);
1857 else {
1858 panic("Lock (%s) %s is not sleep or spin!",
1859 lock->lo_class->lc_name, lock->lo_name);
1860 }
1861 file = fixup_filename(file);
1862 switch (flags) {
1863 case LA_UNLOCKED:
1864 if (instance != NULL)
1865 panic("Lock (%s) %s locked @ %s:%d.",
1866 lock->lo_class->lc_name, lock->lo_name, file, line);
1867 break;
1868 case LA_LOCKED:
1869 case LA_LOCKED | LA_RECURSED:
1870 case LA_LOCKED | LA_NOTRECURSED:
1871 case LA_SLOCKED:
1872 case LA_SLOCKED | LA_RECURSED:
1873 case LA_SLOCKED | LA_NOTRECURSED:
1874 case LA_XLOCKED:
1875 case LA_XLOCKED | LA_RECURSED:
1876 case LA_XLOCKED | LA_NOTRECURSED:
1877 if (instance == NULL) {
1878 panic("Lock (%s) %s not locked @ %s:%d.",
1879 lock->lo_class->lc_name, lock->lo_name, file, line);
1880 break;
1881 }
1882 if ((flags & LA_XLOCKED) != 0 &&
1883 (instance->li_flags & LI_EXCLUSIVE) == 0)
1884 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1885 lock->lo_class->lc_name, lock->lo_name, file, line);
1886 if ((flags & LA_SLOCKED) != 0 &&
1887 (instance->li_flags & LI_EXCLUSIVE) != 0)
1888 panic("Lock (%s) %s exclusively locked @ %s:%d.",
1889 lock->lo_class->lc_name, lock->lo_name, file, line);
1890 if ((flags & LA_RECURSED) != 0 &&
1891 (instance->li_flags & LI_RECURSEMASK) == 0)
1892 panic("Lock (%s) %s not recursed @ %s:%d.",
1893 lock->lo_class->lc_name, lock->lo_name, file, line);
1894 if ((flags & LA_NOTRECURSED) != 0 &&
1895 (instance->li_flags & LI_RECURSEMASK) != 0)
1896 panic("Lock (%s) %s recursed @ %s:%d.",
1897 lock->lo_class->lc_name, lock->lo_name, file, line);
1898 break;
1899 default:
1900 panic("Invalid lock assertion at %s:%d.", file, line);
1901
1902 }
1903 #endif /* INVARIANT_SUPPORT */
1904 }
1905
1906 #ifdef DDB
1907 static void
1908 witness_list(struct thread *td)
1909 {
1910
1911 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1912 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1913
1914 if (witness_watch == 0)
1915 return;
1916
1917 witness_list_locks(&td->td_sleeplocks);
1918
1919 /*
1920 * We only handle spinlocks if td == curthread. This is somewhat broken
1921 * if td is currently executing on some other CPU and holds spin locks
1922 * as we won't display those locks. If we had a MI way of getting
1923 * the per-cpu data for a given cpu then we could use
1924 * td->td_oncpu to get the list of spinlocks for this thread
1925 * and "fix" this.
1926 *
1927 * That still wouldn't really fix this unless we locked sched_lock
1928 * or stopped the other CPU to make sure it wasn't changing the list
1929 * out from under us. It is probably best to just not try to handle
1930 * threads on other CPU's for now.
1931 */
1932 if (td == curthread && PCPU_GET(spinlocks) != NULL)
1933 witness_list_locks(PCPU_PTR(spinlocks));
1934 }
1935
1936 DB_SHOW_COMMAND(locks, db_witness_list)
1937 {
1938 struct thread *td;
1939 pid_t pid;
1940 struct proc *p;
1941
1942 if (have_addr) {
1943 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1944 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1945 ((addr >> 16) % 16) * 10000;
1946 /* sx_slock(&allproc_lock); */
1947 FOREACH_PROC_IN_SYSTEM(p) {
1948 if (p->p_pid == pid)
1949 break;
1950 }
1951 /* sx_sunlock(&allproc_lock); */
1952 if (p == NULL) {
1953 db_printf("pid %d not found\n", pid);
1954 return;
1955 }
1956 FOREACH_THREAD_IN_PROC(p, td) {
1957 witness_list(td);
1958 }
1959 } else {
1960 td = curthread;
1961 witness_list(td);
1962 }
1963 }
1964
1965 DB_SHOW_COMMAND(alllocks, db_witness_list_all)
1966 {
1967 struct thread *td;
1968 struct proc *p;
1969
1970 /*
1971 * It would be nice to list only threads and processes that actually
1972 * held sleep locks, but that information is currently not exported
1973 * by WITNESS.
1974 */
1975 FOREACH_PROC_IN_SYSTEM(p) {
1976 if (!witness_proc_has_locks(p))
1977 continue;
1978 FOREACH_THREAD_IN_PROC(p, td) {
1979 if (!witness_thread_has_locks(td))
1980 continue;
1981 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
1982 p->p_comm, td, td->td_tid);
1983 witness_list(td);
1984 }
1985 }
1986 }
1987
1988 DB_SHOW_COMMAND(witness, db_witness_display)
1989 {
1990
1991 witness_display(db_printf);
1992 }
1993 #endif
Cache object: 0de9b917ecdcbc9dcc8da283f9eaf5bc
|