1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Implementation of the `witness' lock verifier. Originally implemented for
34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
35 * classes in FreeBSD.
36 */
37
38 /*
39 * Main Entry: witness
40 * Pronunciation: 'wit-n&s
41 * Function: noun
42 * Etymology: Middle English witnesse, from Old English witnes knowledge,
43 * testimony, witness, from 2wit
44 * Date: before 12th century
45 * 1 : attestation of a fact or event : TESTIMONY
46 * 2 : one that gives evidence; specifically : one who testifies in
47 * a cause or before a judicial tribunal
48 * 3 : one asked to be present at a transaction so as to be able to
49 * testify to its having taken place
50 * 4 : one who has personal knowledge of something
51 * 5 a : something serving as evidence or proof : SIGN
52 * b : public affirmation by word or example of usually
53 * religious faith or conviction <the heroic witness to divine
54 * life -- Pilot>
55 * 6 capitalized : a member of the Jehovah's Witnesses
56 */
57
58 /*
59 * Special rules concerning Giant and lock orders:
60 *
61 * 1) Giant must be acquired before any other mutexes. Stated another way,
62 * no other mutex may be held when Giant is acquired.
63 *
64 * 2) Giant must be released when blocking on a sleepable lock.
65 *
66 * This rule is less obvious, but is a result of Giant providing the same
67 * semantics as spl(). Basically, when a thread sleeps, it must release
68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
69 * 2).
70 *
71 * 3) Giant may be acquired before or after sleepable locks.
72 *
73 * This rule is also not quite as obvious. Giant may be acquired after
74 * a sleepable lock because it is a non-sleepable lock and non-sleepable
75 * locks may always be acquired while holding a sleepable lock. The second
76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
80 * execute. Thus, acquiring Giant both before and after a sleepable lock
81 * will not result in a lock order reversal.
82 */
83
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD$");
86
87 #include "opt_ddb.h"
88 #include "opt_witness.h"
89
90 #include <sys/param.h>
91 #include <sys/bus.h>
92 #include <sys/kdb.h>
93 #include <sys/kernel.h>
94 #include <sys/ktr.h>
95 #include <sys/lock.h>
96 #include <sys/malloc.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/sysctl.h>
100 #include <sys/systm.h>
101
102 #include <ddb/ddb.h>
103
104 #include <machine/stdarg.h>
105
106 /* Define this to check for blessed mutexes */
107 #undef BLESSING
108
109 #define WITNESS_COUNT 1024
110 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
111 /*
112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
113 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should
114 * probably be safe for the most part, but it's still a SWAG.
115 */
116 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
117
118 #define WITNESS_NCHILDREN 6
119
120 struct witness_child_list_entry;
121
122 struct witness {
123 const char *w_name;
124 struct lock_class *w_class;
125 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
126 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
127 struct witness_child_list_entry *w_children; /* Great evilness... */
128 const char *w_file;
129 int w_line;
130 u_int w_level;
131 u_int w_refcount;
132 u_char w_Giant_squawked:1;
133 u_char w_other_squawked:1;
134 u_char w_same_squawked:1;
135 u_char w_displayed:1;
136 };
137
138 struct witness_child_list_entry {
139 struct witness_child_list_entry *wcl_next;
140 struct witness *wcl_children[WITNESS_NCHILDREN];
141 u_int wcl_count;
142 };
143
144 STAILQ_HEAD(witness_list, witness);
145
146 #ifdef BLESSING
147 struct witness_blessed {
148 const char *b_lock1;
149 const char *b_lock2;
150 };
151 #endif
152
153 struct witness_order_list_entry {
154 const char *w_name;
155 struct lock_class *w_class;
156 };
157
158 #ifdef BLESSING
159 static int blessed(struct witness *, struct witness *);
160 #endif
161 static int depart(struct witness *w);
162 static struct witness *enroll(const char *description,
163 struct lock_class *lock_class);
164 static int insertchild(struct witness *parent, struct witness *child);
165 static int isitmychild(struct witness *parent, struct witness *child);
166 static int isitmydescendant(struct witness *parent, struct witness *child);
167 static int itismychild(struct witness *parent, struct witness *child);
168 static void removechild(struct witness *parent, struct witness *child);
169 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
170 static const char *fixup_filename(const char *file);
171 static struct witness *witness_get(void);
172 static void witness_free(struct witness *m);
173 static struct witness_child_list_entry *witness_child_get(void);
174 static void witness_child_free(struct witness_child_list_entry *wcl);
175 static struct lock_list_entry *witness_lock_list_get(void);
176 static void witness_lock_list_free(struct lock_list_entry *lle);
177 static struct lock_instance *find_instance(struct lock_list_entry *lock_list,
178 struct lock_object *lock);
179 static void witness_list_lock(struct lock_instance *instance);
180 #ifdef DDB
181 static void witness_leveldescendents(struct witness *parent, int level);
182 static void witness_levelall(void);
183 static void witness_displaydescendants(void(*)(const char *fmt, ...),
184 struct witness *, int indent);
185 static void witness_display_list(void(*prnt)(const char *fmt, ...),
186 struct witness_list *list);
187 static void witness_display(void(*)(const char *fmt, ...));
188 static void witness_list(struct thread *td);
189 #endif
190
191 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
192
193 /*
194 * If set to 0, witness is disabled. If set to a non-zero value, witness
195 * performs full lock order checking for all locks. At runtime, this
196 * value may be set to 0 to turn off witness. witness is not allowed be
197 * turned on once it is turned off, however.
198 */
199 static int witness_watch = 1;
200 TUNABLE_INT("debug.witness.watch", &witness_watch);
201 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
202 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
203
204 #ifdef KDB
205 /*
206 * When KDB is enabled and witness_kdb is set to 1, it will cause the system
207 * to drop into kdebug() when:
208 * - a lock heirarchy violation occurs
209 * - locks are held when going to sleep.
210 */
211 #ifdef WITNESS_KDB
212 int witness_kdb = 1;
213 #else
214 int witness_kdb = 0;
215 #endif
216 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
217 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
218
219 /*
220 * When KDB is enabled and witness_trace is set to 1, it will cause the system
221 * to print a stack trace:
222 * - a lock heirarchy violation occurs
223 * - locks are held when going to sleep.
224 */
225 int witness_trace = 1;
226 TUNABLE_INT("debug.witness.trace", &witness_trace);
227 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
228 #endif /* KDB */
229
230 #ifdef WITNESS_SKIPSPIN
231 int witness_skipspin = 1;
232 #else
233 int witness_skipspin = 0;
234 #endif
235 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
236 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
237 &witness_skipspin, 0, "");
238
239 static struct mtx w_mtx;
240 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
241 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
242 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
243 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
244 static struct witness_child_list_entry *w_child_free = NULL;
245 static struct lock_list_entry *w_lock_list_free = NULL;
246
247 static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt;
248 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
249 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
250 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
251 "");
252 SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD,
253 &w_child_free_cnt, 0, "");
254 SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0,
255 "");
256
257 static struct witness w_data[WITNESS_COUNT];
258 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
259 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
260
261 static struct witness_order_list_entry order_lists[] = {
262 /*
263 * sx locks
264 */
265 { "proctree", &lock_class_sx },
266 { "allproc", &lock_class_sx },
267 { NULL, NULL },
268 /*
269 * Various mutexes
270 */
271 { "Giant", &lock_class_mtx_sleep },
272 { "filedesc structure", &lock_class_mtx_sleep },
273 { "pipe mutex", &lock_class_mtx_sleep },
274 { "sigio lock", &lock_class_mtx_sleep },
275 { "process group", &lock_class_mtx_sleep },
276 { "process lock", &lock_class_mtx_sleep },
277 { "session", &lock_class_mtx_sleep },
278 { "uidinfo hash", &lock_class_mtx_sleep },
279 { "uidinfo struct", &lock_class_mtx_sleep },
280 { "allprison", &lock_class_mtx_sleep },
281 { NULL, NULL },
282 /*
283 * Sockets
284 */
285 { "filedesc structure", &lock_class_mtx_sleep },
286 { "accept", &lock_class_mtx_sleep },
287 { "so_snd", &lock_class_mtx_sleep },
288 { "so_rcv", &lock_class_mtx_sleep },
289 { "sellck", &lock_class_mtx_sleep },
290 { NULL, NULL },
291 /*
292 * Routing
293 */
294 { "so_rcv", &lock_class_mtx_sleep },
295 { "radix node head", &lock_class_mtx_sleep },
296 { "rtentry", &lock_class_mtx_sleep },
297 { "ifaddr", &lock_class_mtx_sleep },
298 { NULL, NULL },
299 /*
300 * Multicast - protocol locks before interface locks.
301 */
302 { "in_multi_mtx", &lock_class_mtx_sleep },
303 { "igmp_mtx", &lock_class_mtx_sleep },
304 { "if_addr_mtx", &lock_class_mtx_sleep },
305 { NULL, NULL },
306 /*
307 * UNIX Domain Sockets
308 */
309 { "unp", &lock_class_mtx_sleep },
310 { "so_snd", &lock_class_mtx_sleep },
311 { NULL, NULL },
312 /*
313 * UDP/IP
314 */
315 { "udp", &lock_class_mtx_sleep },
316 { "udpinp", &lock_class_mtx_sleep },
317 { "so_snd", &lock_class_mtx_sleep },
318 { NULL, NULL },
319 /*
320 * TCP/IP
321 */
322 { "tcp", &lock_class_mtx_sleep },
323 { "tcpinp", &lock_class_mtx_sleep },
324 { "so_snd", &lock_class_mtx_sleep },
325 { NULL, NULL },
326 /*
327 * SLIP
328 */
329 { "slip_mtx", &lock_class_mtx_sleep },
330 { "slip sc_mtx", &lock_class_mtx_sleep },
331 { NULL, NULL },
332 /*
333 * netatalk
334 */
335 { "ddp_list_mtx", &lock_class_mtx_sleep },
336 { "ddp_mtx", &lock_class_mtx_sleep },
337 { NULL, NULL },
338 /*
339 * BPF
340 */
341 { "bpf global lock", &lock_class_mtx_sleep },
342 { "bpf interface lock", &lock_class_mtx_sleep },
343 { "bpf cdev lock", &lock_class_mtx_sleep },
344 { NULL, NULL },
345 /*
346 * NFS server
347 */
348 { "nfsd_mtx", &lock_class_mtx_sleep },
349 { "so_snd", &lock_class_mtx_sleep },
350 { NULL, NULL },
351 /*
352 * Netgraph
353 */
354 { "ng_node", &lock_class_mtx_sleep },
355 { "ng_worklist", &lock_class_mtx_sleep },
356 { NULL, NULL },
357 /*
358 * CDEV
359 */
360 { "system map", &lock_class_mtx_sleep },
361 { "vm page queue mutex", &lock_class_mtx_sleep },
362 { "vnode interlock", &lock_class_mtx_sleep },
363 { "cdev", &lock_class_mtx_sleep },
364 { NULL, NULL },
365 /*
366 * spin locks
367 */
368 #ifdef SMP
369 { "ap boot", &lock_class_mtx_spin },
370 #endif
371 { "rm.mutex_mtx", &lock_class_mtx_spin },
372 { "sio", &lock_class_mtx_spin },
373 #ifdef __i386__
374 { "cy", &lock_class_mtx_spin },
375 #endif
376 { "uart_hwmtx", &lock_class_mtx_spin },
377 { "sabtty", &lock_class_mtx_spin },
378 { "zstty", &lock_class_mtx_spin },
379 { "fast_taskqueue", &lock_class_mtx_spin },
380 { "intr table", &lock_class_mtx_spin },
381 { "sleepq chain", &lock_class_mtx_spin },
382 { "sched lock", &lock_class_mtx_spin },
383 { "turnstile chain", &lock_class_mtx_spin },
384 { "td_contested", &lock_class_mtx_spin },
385 { "callout", &lock_class_mtx_spin },
386 { "entropy harvest mutex", &lock_class_mtx_spin },
387 /*
388 * leaf locks
389 */
390 { "allpmaps", &lock_class_mtx_spin },
391 { "vm page queue free mutex", &lock_class_mtx_spin },
392 { "icu", &lock_class_mtx_spin },
393 #ifdef SMP
394 { "smp rendezvous", &lock_class_mtx_spin },
395 #if defined(__i386__) || defined(__amd64__)
396 { "tlb", &lock_class_mtx_spin },
397 #endif
398 #ifdef __sparc64__
399 { "ipi", &lock_class_mtx_spin },
400 { "rtc_mtx", &lock_class_mtx_spin },
401 #endif
402 #endif
403 { "clk", &lock_class_mtx_spin },
404 { "mutex profiling lock", &lock_class_mtx_spin },
405 { "kse zombie lock", &lock_class_mtx_spin },
406 { "ALD Queue", &lock_class_mtx_spin },
407 #ifdef __ia64__
408 { "MCA spin lock", &lock_class_mtx_spin },
409 #endif
410 #if defined(__i386__) || defined(__amd64__)
411 { "pcicfg", &lock_class_mtx_spin },
412 { "NDIS thread lock", &lock_class_mtx_spin },
413 #endif
414 { "tw_osl_io_lock", &lock_class_mtx_spin },
415 { "tw_osl_q_lock", &lock_class_mtx_spin },
416 { "tw_cl_io_lock", &lock_class_mtx_spin },
417 { "tw_cl_intr_lock", &lock_class_mtx_spin },
418 { "tw_cl_gen_lock", &lock_class_mtx_spin },
419 { NULL, NULL },
420 { NULL, NULL }
421 };
422
423 #ifdef BLESSING
424 /*
425 * Pairs of locks which have been blessed
426 * Don't complain about order problems with blessed locks
427 */
428 static struct witness_blessed blessed_list[] = {
429 };
430 static int blessed_count =
431 sizeof(blessed_list) / sizeof(struct witness_blessed);
432 #endif
433
434 /*
435 * List of all locks in the system.
436 */
437 TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
438
439 static struct mtx all_mtx = {
440 { &lock_class_mtx_sleep, /* mtx_object.lo_class */
441 "All locks list", /* mtx_object.lo_name */
442 "All locks list", /* mtx_object.lo_type */
443 LO_INITIALIZED, /* mtx_object.lo_flags */
444 { NULL, NULL }, /* mtx_object.lo_list */
445 NULL }, /* mtx_object.lo_witness */
446 MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */
447 };
448
449 /*
450 * This global is set to 0 once it becomes safe to use the witness code.
451 */
452 static int witness_cold = 1;
453
454 /*
455 * This global is set to 1 once the static lock orders have been enrolled
456 * so that a warning can be issued for any spin locks enrolled later.
457 */
458 static int witness_spin_warn = 0;
459
460 /*
461 * Global variables for book keeping.
462 */
463 static int lock_cur_cnt;
464 static int lock_max_cnt;
465
466 /*
467 * The WITNESS-enabled diagnostic code.
468 */
469 static void
470 witness_initialize(void *dummy __unused)
471 {
472 struct lock_object *lock;
473 struct witness_order_list_entry *order;
474 struct witness *w, *w1;
475 int i;
476
477 /*
478 * We have to release Giant before initializing its witness
479 * structure so that WITNESS doesn't get confused.
480 */
481 mtx_unlock(&Giant);
482 mtx_assert(&Giant, MA_NOTOWNED);
483
484 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
485 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
486 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
487 MTX_NOWITNESS);
488 for (i = 0; i < WITNESS_COUNT; i++)
489 witness_free(&w_data[i]);
490 for (i = 0; i < WITNESS_CHILDCOUNT; i++)
491 witness_child_free(&w_childdata[i]);
492 for (i = 0; i < LOCK_CHILDCOUNT; i++)
493 witness_lock_list_free(&w_locklistdata[i]);
494
495 /* First add in all the specified order lists. */
496 for (order = order_lists; order->w_name != NULL; order++) {
497 w = enroll(order->w_name, order->w_class);
498 if (w == NULL)
499 continue;
500 w->w_file = "order list";
501 for (order++; order->w_name != NULL; order++) {
502 w1 = enroll(order->w_name, order->w_class);
503 if (w1 == NULL)
504 continue;
505 w1->w_file = "order list";
506 if (!itismychild(w, w1))
507 panic("Not enough memory for static orders!");
508 w = w1;
509 }
510 }
511 witness_spin_warn = 1;
512
513 /* Iterate through all locks and add them to witness. */
514 mtx_lock(&all_mtx);
515 TAILQ_FOREACH(lock, &all_locks, lo_list) {
516 if (lock->lo_flags & LO_WITNESS)
517 lock->lo_witness = enroll(lock->lo_type,
518 LOCK_CLASS(lock));
519 else
520 lock->lo_witness = NULL;
521 }
522 mtx_unlock(&all_mtx);
523
524 /* Mark the witness code as being ready for use. */
525 witness_cold = 0;
526
527 mtx_lock(&Giant);
528 }
529 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
530
531 static int
532 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
533 {
534 int error, value;
535
536 value = witness_watch;
537 error = sysctl_handle_int(oidp, &value, 0, req);
538 if (error != 0 || req->newptr == NULL)
539 return (error);
540 error = suser(req->td);
541 if (error != 0)
542 return (error);
543 if (value == witness_watch)
544 return (0);
545 if (value != 0)
546 return (EINVAL);
547 witness_watch = 0;
548 return (0);
549 }
550
551 void
552 witness_init(struct lock_object *lock)
553 {
554 struct lock_class *class;
555
556 class = LOCK_CLASS(lock);
557 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
558 (class->lc_flags & LC_RECURSABLE) == 0)
559 panic("%s: lock (%s) %s can not be recursable", __func__,
560 class->lc_name, lock->lo_name);
561 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
562 (class->lc_flags & LC_SLEEPABLE) == 0)
563 panic("%s: lock (%s) %s can not be sleepable", __func__,
564 class->lc_name, lock->lo_name);
565 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
566 (class->lc_flags & LC_UPGRADABLE) == 0)
567 panic("%s: lock (%s) %s can not be upgradable", __func__,
568 class->lc_name, lock->lo_name);
569
570 mtx_lock(&all_mtx);
571 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
572 lock_cur_cnt++;
573 if (lock_cur_cnt > lock_max_cnt)
574 lock_max_cnt = lock_cur_cnt;
575 mtx_unlock(&all_mtx);
576 if (!witness_cold && witness_watch != 0 && panicstr == NULL &&
577 (lock->lo_flags & LO_WITNESS) != 0)
578 lock->lo_witness = enroll(lock->lo_type, class);
579 else
580 lock->lo_witness = NULL;
581 }
582
583 void
584 witness_destroy(struct lock_object *lock)
585 {
586 struct lock_class *class;
587 struct witness *w;
588
589 class = LOCK_CLASS(lock);
590 if (witness_cold)
591 panic("lock (%s) %s destroyed while witness_cold",
592 class->lc_name, lock->lo_name);
593
594 /* XXX: need to verify that no one holds the lock */
595 w = lock->lo_witness;
596 if (w != NULL) {
597 mtx_lock_spin(&w_mtx);
598 MPASS(w->w_refcount > 0);
599 w->w_refcount--;
600
601 /*
602 * Lock is already released if we have an allocation failure
603 * and depart() fails.
604 */
605 if (w->w_refcount != 0 || depart(w))
606 mtx_unlock_spin(&w_mtx);
607 }
608
609 mtx_lock(&all_mtx);
610 lock_cur_cnt--;
611 TAILQ_REMOVE(&all_locks, lock, lo_list);
612 mtx_unlock(&all_mtx);
613 }
614
615 #ifdef DDB
616 static void
617 witness_levelall (void)
618 {
619 struct witness_list *list;
620 struct witness *w, *w1;
621
622 /*
623 * First clear all levels.
624 */
625 STAILQ_FOREACH(w, &w_all, w_list) {
626 w->w_level = 0;
627 }
628
629 /*
630 * Look for locks with no parent and level all their descendants.
631 */
632 STAILQ_FOREACH(w, &w_all, w_list) {
633 /*
634 * This is just an optimization, technically we could get
635 * away just walking the all list each time.
636 */
637 if (w->w_class->lc_flags & LC_SLEEPLOCK)
638 list = &w_sleep;
639 else
640 list = &w_spin;
641 STAILQ_FOREACH(w1, list, w_typelist) {
642 if (isitmychild(w1, w))
643 goto skip;
644 }
645 witness_leveldescendents(w, 0);
646 skip:
647 ; /* silence GCC 3.x */
648 }
649 }
650
651 static void
652 witness_leveldescendents(struct witness *parent, int level)
653 {
654 struct witness_child_list_entry *wcl;
655 int i;
656
657 if (parent->w_level < level)
658 parent->w_level = level;
659 level++;
660 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
661 for (i = 0; i < wcl->wcl_count; i++)
662 witness_leveldescendents(wcl->wcl_children[i], level);
663 }
664
665 static void
666 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
667 struct witness *parent, int indent)
668 {
669 struct witness_child_list_entry *wcl;
670 int i, level;
671
672 level = parent->w_level;
673 prnt("%-2d", level);
674 for (i = 0; i < indent; i++)
675 prnt(" ");
676 if (parent->w_refcount > 0)
677 prnt("%s", parent->w_name);
678 else
679 prnt("(dead)");
680 if (parent->w_displayed) {
681 prnt(" -- (already displayed)\n");
682 return;
683 }
684 parent->w_displayed = 1;
685 if (parent->w_refcount > 0) {
686 if (parent->w_file != NULL)
687 prnt(" -- last acquired @ %s:%d", parent->w_file,
688 parent->w_line);
689 }
690 prnt("\n");
691 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
692 for (i = 0; i < wcl->wcl_count; i++)
693 witness_displaydescendants(prnt,
694 wcl->wcl_children[i], indent + 1);
695 }
696
697 static void
698 witness_display_list(void(*prnt)(const char *fmt, ...),
699 struct witness_list *list)
700 {
701 struct witness *w;
702
703 STAILQ_FOREACH(w, list, w_typelist) {
704 if (w->w_file == NULL || w->w_level > 0)
705 continue;
706 /*
707 * This lock has no anscestors, display its descendants.
708 */
709 witness_displaydescendants(prnt, w, 0);
710 }
711 }
712
713 static void
714 witness_display(void(*prnt)(const char *fmt, ...))
715 {
716 struct witness *w;
717
718 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
719 witness_levelall();
720
721 /* Clear all the displayed flags. */
722 STAILQ_FOREACH(w, &w_all, w_list) {
723 w->w_displayed = 0;
724 }
725
726 /*
727 * First, handle sleep locks which have been acquired at least
728 * once.
729 */
730 prnt("Sleep locks:\n");
731 witness_display_list(prnt, &w_sleep);
732
733 /*
734 * Now do spin locks which have been acquired at least once.
735 */
736 prnt("\nSpin locks:\n");
737 witness_display_list(prnt, &w_spin);
738
739 /*
740 * Finally, any locks which have not been acquired yet.
741 */
742 prnt("\nLocks which were never acquired:\n");
743 STAILQ_FOREACH(w, &w_all, w_list) {
744 if (w->w_file != NULL || w->w_refcount == 0)
745 continue;
746 prnt("%s\n", w->w_name);
747 }
748 }
749 #endif /* DDB */
750
751 /* Trim useless garbage from filenames. */
752 static const char *
753 fixup_filename(const char *file)
754 {
755
756 if (file == NULL)
757 return (NULL);
758 while (strncmp(file, "../", 3) == 0)
759 file += 3;
760 return (file);
761 }
762
763 int
764 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
765 {
766
767 if (witness_watch == 0 || panicstr != NULL)
768 return (0);
769
770 /* Require locks that witness knows about. */
771 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
772 lock2->lo_witness == NULL)
773 return (EINVAL);
774
775 MPASS(!mtx_owned(&w_mtx));
776 mtx_lock_spin(&w_mtx);
777
778 /*
779 * If we already have either an explicit or implied lock order that
780 * is the other way around, then return an error.
781 */
782 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
783 mtx_unlock_spin(&w_mtx);
784 return (EDOOFUS);
785 }
786
787 /* Try to add the new order. */
788 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
789 lock2->lo_type, lock1->lo_type);
790 if (!itismychild(lock1->lo_witness, lock2->lo_witness))
791 return (ENOMEM);
792 mtx_unlock_spin(&w_mtx);
793 return (0);
794 }
795
796 void
797 witness_checkorder(struct lock_object *lock, int flags, const char *file,
798 int line)
799 {
800 struct lock_list_entry **lock_list, *lle;
801 struct lock_instance *lock1, *lock2;
802 struct lock_class *class;
803 struct witness *w, *w1;
804 struct thread *td;
805 int i, j;
806
807 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
808 panicstr != NULL)
809 return;
810
811 /*
812 * Try locks do not block if they fail to acquire the lock, thus
813 * there is no danger of deadlocks or of switching while holding a
814 * spin lock if we acquire a lock via a try operation. This
815 * function shouldn't even be called for try locks, so panic if
816 * that happens.
817 */
818 if (flags & LOP_TRYLOCK)
819 panic("%s should not be called for try lock operations",
820 __func__);
821
822 w = lock->lo_witness;
823 class = LOCK_CLASS(lock);
824 td = curthread;
825 file = fixup_filename(file);
826
827 if (class->lc_flags & LC_SLEEPLOCK) {
828 /*
829 * Since spin locks include a critical section, this check
830 * implicitly enforces a lock order of all sleep locks before
831 * all spin locks.
832 */
833 if (td->td_critnest != 0 && !kdb_active)
834 panic("blockable sleep lock (%s) %s @ %s:%d",
835 class->lc_name, lock->lo_name, file, line);
836
837 /*
838 * If this is the first lock acquired then just return as
839 * no order checking is needed.
840 */
841 if (td->td_sleeplocks == NULL)
842 return;
843 lock_list = &td->td_sleeplocks;
844 } else {
845 /*
846 * If this is the first lock, just return as no order
847 * checking is needed. We check this in both if clauses
848 * here as unifying the check would require us to use a
849 * critical section to ensure we don't migrate while doing
850 * the check. Note that if this is not the first lock, we
851 * are already in a critical section and are safe for the
852 * rest of the check.
853 */
854 if (PCPU_GET(spinlocks) == NULL)
855 return;
856 lock_list = PCPU_PTR(spinlocks);
857 }
858
859 /*
860 * Check to see if we are recursing on a lock we already own. If
861 * so, make sure that we don't mismatch exclusive and shared lock
862 * acquires.
863 */
864 lock1 = find_instance(*lock_list, lock);
865 if (lock1 != NULL) {
866 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
867 (flags & LOP_EXCLUSIVE) == 0) {
868 printf("shared lock of (%s) %s @ %s:%d\n",
869 class->lc_name, lock->lo_name, file, line);
870 printf("while exclusively locked from %s:%d\n",
871 lock1->li_file, lock1->li_line);
872 panic("share->excl");
873 }
874 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
875 (flags & LOP_EXCLUSIVE) != 0) {
876 printf("exclusive lock of (%s) %s @ %s:%d\n",
877 class->lc_name, lock->lo_name, file, line);
878 printf("while share locked from %s:%d\n",
879 lock1->li_file, lock1->li_line);
880 panic("excl->share");
881 }
882 return;
883 }
884
885 /*
886 * Try locks do not block if they fail to acquire the lock, thus
887 * there is no danger of deadlocks or of switching while holding a
888 * spin lock if we acquire a lock via a try operation.
889 */
890 if (flags & LOP_TRYLOCK)
891 return;
892
893 /*
894 * Check for duplicate locks of the same type. Note that we only
895 * have to check for this on the last lock we just acquired. Any
896 * other cases will be caught as lock order violations.
897 */
898 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
899 w1 = lock1->li_lock->lo_witness;
900 if (w1 == w) {
901 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
902 (flags & LOP_DUPOK))
903 return;
904 w->w_same_squawked = 1;
905 printf("acquiring duplicate lock of same type: \"%s\"\n",
906 lock->lo_type);
907 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
908 lock1->li_file, lock1->li_line);
909 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
910 #ifdef KDB
911 goto debugger;
912 #else
913 return;
914 #endif
915 }
916 MPASS(!mtx_owned(&w_mtx));
917 mtx_lock_spin(&w_mtx);
918 /*
919 * If we know that the the lock we are acquiring comes after
920 * the lock we most recently acquired in the lock order tree,
921 * then there is no need for any further checks.
922 */
923 if (isitmychild(w1, w)) {
924 mtx_unlock_spin(&w_mtx);
925 return;
926 }
927 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
928 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
929
930 MPASS(j < WITNESS_COUNT);
931 lock1 = &lle->ll_children[i];
932 w1 = lock1->li_lock->lo_witness;
933
934 /*
935 * If this lock doesn't undergo witness checking,
936 * then skip it.
937 */
938 if (w1 == NULL) {
939 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
940 ("lock missing witness structure"));
941 continue;
942 }
943 /*
944 * If we are locking Giant and this is a sleepable
945 * lock, then skip it.
946 */
947 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
948 lock == &Giant.mtx_object)
949 continue;
950 /*
951 * If we are locking a sleepable lock and this lock
952 * is Giant, then skip it.
953 */
954 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
955 lock1->li_lock == &Giant.mtx_object)
956 continue;
957 /*
958 * If we are locking a sleepable lock and this lock
959 * isn't sleepable, we want to treat it as a lock
960 * order violation to enfore a general lock order of
961 * sleepable locks before non-sleepable locks.
962 */
963 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
964 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
965 goto reversal;
966 /*
967 * If we are locking Giant and this is a non-sleepable
968 * lock, then treat it as a reversal.
969 */
970 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
971 lock == &Giant.mtx_object)
972 goto reversal;
973 /*
974 * Check the lock order hierarchy for a reveresal.
975 */
976 if (!isitmydescendant(w, w1))
977 continue;
978 reversal:
979 /*
980 * We have a lock order violation, check to see if it
981 * is allowed or has already been yelled about.
982 */
983 mtx_unlock_spin(&w_mtx);
984 #ifdef BLESSING
985 /*
986 * If the lock order is blessed, just bail. We don't
987 * look for other lock order violations though, which
988 * may be a bug.
989 */
990 if (blessed(w, w1))
991 return;
992 #endif
993 if (lock1->li_lock == &Giant.mtx_object) {
994 if (w1->w_Giant_squawked)
995 return;
996 else
997 w1->w_Giant_squawked = 1;
998 } else {
999 if (w1->w_other_squawked)
1000 return;
1001 else
1002 w1->w_other_squawked = 1;
1003 }
1004 /*
1005 * Ok, yell about it.
1006 */
1007 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1008 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1009 printf(
1010 "lock order reversal: (sleepable after non-sleepable)\n");
1011 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1012 && lock == &Giant.mtx_object)
1013 printf(
1014 "lock order reversal: (Giant after non-sleepable)\n");
1015 else
1016 printf("lock order reversal:\n");
1017 /*
1018 * Try to locate an earlier lock with
1019 * witness w in our list.
1020 */
1021 do {
1022 lock2 = &lle->ll_children[i];
1023 MPASS(lock2->li_lock != NULL);
1024 if (lock2->li_lock->lo_witness == w)
1025 break;
1026 if (i == 0 && lle->ll_next != NULL) {
1027 lle = lle->ll_next;
1028 i = lle->ll_count - 1;
1029 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1030 } else
1031 i--;
1032 } while (i >= 0);
1033 if (i < 0) {
1034 printf(" 1st %p %s (%s) @ %s:%d\n",
1035 lock1->li_lock, lock1->li_lock->lo_name,
1036 lock1->li_lock->lo_type, lock1->li_file,
1037 lock1->li_line);
1038 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1039 lock->lo_name, lock->lo_type, file, line);
1040 } else {
1041 printf(" 1st %p %s (%s) @ %s:%d\n",
1042 lock2->li_lock, lock2->li_lock->lo_name,
1043 lock2->li_lock->lo_type, lock2->li_file,
1044 lock2->li_line);
1045 printf(" 2nd %p %s (%s) @ %s:%d\n",
1046 lock1->li_lock, lock1->li_lock->lo_name,
1047 lock1->li_lock->lo_type, lock1->li_file,
1048 lock1->li_line);
1049 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1050 lock->lo_name, lock->lo_type, file, line);
1051 }
1052 #ifdef KDB
1053 goto debugger;
1054 #else
1055 return;
1056 #endif
1057 }
1058 }
1059 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
1060 /*
1061 * If requested, build a new lock order. However, don't build a new
1062 * relationship between a sleepable lock and Giant if it is in the
1063 * wrong direction. The correct lock order is that sleepable locks
1064 * always come before Giant.
1065 */
1066 if (flags & LOP_NEWORDER &&
1067 !(lock1->li_lock == &Giant.mtx_object &&
1068 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1069 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1070 lock->lo_type, lock1->li_lock->lo_type);
1071 if (!itismychild(lock1->li_lock->lo_witness, w))
1072 /* Witness is dead. */
1073 return;
1074 }
1075 mtx_unlock_spin(&w_mtx);
1076 return;
1077
1078 #ifdef KDB
1079 debugger:
1080 if (witness_trace)
1081 kdb_backtrace();
1082 if (witness_kdb)
1083 kdb_enter(__func__);
1084 #endif
1085 }
1086
1087 void
1088 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1089 {
1090 struct lock_list_entry **lock_list, *lle;
1091 struct lock_instance *instance;
1092 struct witness *w;
1093 struct thread *td;
1094
1095 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1096 panicstr != NULL)
1097 return;
1098 w = lock->lo_witness;
1099 td = curthread;
1100 file = fixup_filename(file);
1101
1102 /* Determine lock list for this lock. */
1103 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1104 lock_list = &td->td_sleeplocks;
1105 else
1106 lock_list = PCPU_PTR(spinlocks);
1107
1108 /* Check to see if we are recursing on a lock we already own. */
1109 instance = find_instance(*lock_list, lock);
1110 if (instance != NULL) {
1111 instance->li_flags++;
1112 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1113 td->td_proc->p_pid, lock->lo_name,
1114 instance->li_flags & LI_RECURSEMASK);
1115 instance->li_file = file;
1116 instance->li_line = line;
1117 return;
1118 }
1119
1120 /* Update per-witness last file and line acquire. */
1121 w->w_file = file;
1122 w->w_line = line;
1123
1124 /* Find the next open lock instance in the list and fill it. */
1125 lle = *lock_list;
1126 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1127 lle = witness_lock_list_get();
1128 if (lle == NULL)
1129 return;
1130 lle->ll_next = *lock_list;
1131 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1132 td->td_proc->p_pid, lle);
1133 *lock_list = lle;
1134 }
1135 instance = &lle->ll_children[lle->ll_count++];
1136 instance->li_lock = lock;
1137 instance->li_line = line;
1138 instance->li_file = file;
1139 if ((flags & LOP_EXCLUSIVE) != 0)
1140 instance->li_flags = LI_EXCLUSIVE;
1141 else
1142 instance->li_flags = 0;
1143 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1144 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1145 }
1146
1147 void
1148 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1149 {
1150 struct lock_instance *instance;
1151 struct lock_class *class;
1152
1153 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1154 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1155 return;
1156 class = LOCK_CLASS(lock);
1157 file = fixup_filename(file);
1158 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1159 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1160 class->lc_name, lock->lo_name, file, line);
1161 if ((flags & LOP_TRYLOCK) == 0)
1162 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1163 lock->lo_name, file, line);
1164 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1165 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1166 class->lc_name, lock->lo_name, file, line);
1167 instance = find_instance(curthread->td_sleeplocks, lock);
1168 if (instance == NULL)
1169 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1170 class->lc_name, lock->lo_name, file, line);
1171 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1172 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1173 class->lc_name, lock->lo_name, file, line);
1174 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1175 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1176 class->lc_name, lock->lo_name,
1177 instance->li_flags & LI_RECURSEMASK, file, line);
1178 instance->li_flags |= LI_EXCLUSIVE;
1179 }
1180
1181 void
1182 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1183 int line)
1184 {
1185 struct lock_instance *instance;
1186 struct lock_class *class;
1187
1188 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1189 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1190 return;
1191 class = LOCK_CLASS(lock);
1192 file = fixup_filename(file);
1193 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1194 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1195 class->lc_name, lock->lo_name, file, line);
1196 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1197 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1198 class->lc_name, lock->lo_name, file, line);
1199 instance = find_instance(curthread->td_sleeplocks, lock);
1200 if (instance == NULL)
1201 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1202 class->lc_name, lock->lo_name, file, line);
1203 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1204 panic("downgrade of shared lock (%s) %s @ %s:%d",
1205 class->lc_name, lock->lo_name, file, line);
1206 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1207 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1208 class->lc_name, lock->lo_name,
1209 instance->li_flags & LI_RECURSEMASK, file, line);
1210 instance->li_flags &= ~LI_EXCLUSIVE;
1211 }
1212
1213 void
1214 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1215 {
1216 struct lock_list_entry **lock_list, *lle;
1217 struct lock_instance *instance;
1218 struct lock_class *class;
1219 struct thread *td;
1220 register_t s;
1221 int i, j;
1222
1223 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1224 panicstr != NULL)
1225 return;
1226 td = curthread;
1227 class = LOCK_CLASS(lock);
1228 file = fixup_filename(file);
1229
1230 /* Find lock instance associated with this lock. */
1231 if (class->lc_flags & LC_SLEEPLOCK)
1232 lock_list = &td->td_sleeplocks;
1233 else
1234 lock_list = PCPU_PTR(spinlocks);
1235 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1236 for (i = 0; i < (*lock_list)->ll_count; i++) {
1237 instance = &(*lock_list)->ll_children[i];
1238 if (instance->li_lock == lock)
1239 goto found;
1240 }
1241 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1242 file, line);
1243 found:
1244
1245 /* First, check for shared/exclusive mismatches. */
1246 if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1247 (flags & LOP_EXCLUSIVE) == 0) {
1248 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1249 lock->lo_name, file, line);
1250 printf("while exclusively locked from %s:%d\n",
1251 instance->li_file, instance->li_line);
1252 panic("excl->ushare");
1253 }
1254 if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1255 (flags & LOP_EXCLUSIVE) != 0) {
1256 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1257 lock->lo_name, file, line);
1258 printf("while share locked from %s:%d\n", instance->li_file,
1259 instance->li_line);
1260 panic("share->uexcl");
1261 }
1262
1263 /* If we are recursed, unrecurse. */
1264 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1265 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1266 td->td_proc->p_pid, instance->li_lock->lo_name,
1267 instance->li_flags);
1268 instance->li_flags--;
1269 return;
1270 }
1271
1272 /* Otherwise, remove this item from the list. */
1273 s = intr_disable();
1274 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1275 td->td_proc->p_pid, instance->li_lock->lo_name,
1276 (*lock_list)->ll_count - 1);
1277 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1278 (*lock_list)->ll_children[j] =
1279 (*lock_list)->ll_children[j + 1];
1280 (*lock_list)->ll_count--;
1281 intr_restore(s);
1282
1283 /* If this lock list entry is now empty, free it. */
1284 if ((*lock_list)->ll_count == 0) {
1285 lle = *lock_list;
1286 *lock_list = lle->ll_next;
1287 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1288 td->td_proc->p_pid, lle);
1289 witness_lock_list_free(lle);
1290 }
1291 }
1292
1293 /*
1294 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1295 * exempt Giant and sleepable locks from the checks as well. If any
1296 * non-exempt locks are held, then a supplied message is printed to the
1297 * console along with a list of the offending locks. If indicated in the
1298 * flags then a failure results in a panic as well.
1299 */
1300 int
1301 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1302 {
1303 struct lock_list_entry *lle;
1304 struct lock_instance *lock1;
1305 struct thread *td;
1306 va_list ap;
1307 int i, n;
1308
1309 if (witness_cold || witness_watch == 0 || panicstr != NULL)
1310 return (0);
1311 n = 0;
1312 td = curthread;
1313 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1314 for (i = lle->ll_count - 1; i >= 0; i--) {
1315 lock1 = &lle->ll_children[i];
1316 if (lock1->li_lock == lock)
1317 continue;
1318 if (flags & WARN_GIANTOK &&
1319 lock1->li_lock == &Giant.mtx_object)
1320 continue;
1321 if (flags & WARN_SLEEPOK &&
1322 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1323 continue;
1324 if (n == 0) {
1325 va_start(ap, fmt);
1326 vprintf(fmt, ap);
1327 va_end(ap);
1328 printf(" with the following");
1329 if (flags & WARN_SLEEPOK)
1330 printf(" non-sleepable");
1331 printf(" locks held:\n");
1332 }
1333 n++;
1334 witness_list_lock(lock1);
1335 }
1336 if (PCPU_GET(spinlocks) != NULL) {
1337 /*
1338 * Since we already hold a spinlock preemption is
1339 * already blocked.
1340 */
1341 if (n == 0) {
1342 va_start(ap, fmt);
1343 vprintf(fmt, ap);
1344 va_end(ap);
1345 printf(" with the following");
1346 if (flags & WARN_SLEEPOK)
1347 printf(" non-sleepable");
1348 printf(" locks held:\n");
1349 }
1350 n += witness_list_locks(PCPU_PTR(spinlocks));
1351 }
1352 if (flags & WARN_PANIC && n)
1353 panic("witness_warn");
1354 #ifdef KDB
1355 else if (witness_kdb && n)
1356 kdb_enter(__func__);
1357 else if (witness_trace && n)
1358 kdb_backtrace();
1359 #endif
1360 return (n);
1361 }
1362
1363 const char *
1364 witness_file(struct lock_object *lock)
1365 {
1366 struct witness *w;
1367
1368 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1369 return ("?");
1370 w = lock->lo_witness;
1371 return (w->w_file);
1372 }
1373
1374 int
1375 witness_line(struct lock_object *lock)
1376 {
1377 struct witness *w;
1378
1379 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1380 return (0);
1381 w = lock->lo_witness;
1382 return (w->w_line);
1383 }
1384
1385 static struct witness *
1386 enroll(const char *description, struct lock_class *lock_class)
1387 {
1388 struct witness *w;
1389
1390 if (witness_watch == 0 || panicstr != NULL)
1391 return (NULL);
1392 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1393 return (NULL);
1394 mtx_lock_spin(&w_mtx);
1395 STAILQ_FOREACH(w, &w_all, w_list) {
1396 if (w->w_name == description || (w->w_refcount > 0 &&
1397 strcmp(description, w->w_name) == 0)) {
1398 w->w_refcount++;
1399 mtx_unlock_spin(&w_mtx);
1400 if (lock_class != w->w_class)
1401 panic(
1402 "lock (%s) %s does not match earlier (%s) lock",
1403 description, lock_class->lc_name,
1404 w->w_class->lc_name);
1405 return (w);
1406 }
1407 }
1408 if ((w = witness_get()) == NULL)
1409 goto out;
1410 w->w_name = description;
1411 w->w_class = lock_class;
1412 w->w_refcount = 1;
1413 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1414 if (lock_class->lc_flags & LC_SPINLOCK) {
1415 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1416 w_spin_cnt++;
1417 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1418 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1419 w_sleep_cnt++;
1420 } else {
1421 mtx_unlock_spin(&w_mtx);
1422 panic("lock class %s is not sleep or spin",
1423 lock_class->lc_name);
1424 }
1425 mtx_unlock_spin(&w_mtx);
1426 out:
1427 /*
1428 * We issue a warning for any spin locks not defined in the static
1429 * order list as a way to discourage their use (folks should really
1430 * be using non-spin mutexes most of the time). However, several
1431 * 3rd part device drivers use spin locks because that is all they
1432 * have available on Windows and Linux and they think that normal
1433 * mutexes are insufficient.
1434 */
1435 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1436 printf("WITNESS: spin lock %s not in order list\n",
1437 description);
1438 return (w);
1439 }
1440
1441 /* Don't let the door bang you on the way out... */
1442 static int
1443 depart(struct witness *w)
1444 {
1445 struct witness_child_list_entry *wcl, *nwcl;
1446 struct witness_list *list;
1447 struct witness *parent;
1448
1449 MPASS(w->w_refcount == 0);
1450 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1451 list = &w_sleep;
1452 w_sleep_cnt--;
1453 } else {
1454 list = &w_spin;
1455 w_spin_cnt--;
1456 }
1457 /*
1458 * First, we run through the entire tree looking for any
1459 * witnesses that the outgoing witness is a child of. For
1460 * each parent that we find, we reparent all the direct
1461 * children of the outgoing witness to its parent.
1462 */
1463 STAILQ_FOREACH(parent, list, w_typelist) {
1464 if (!isitmychild(parent, w))
1465 continue;
1466 removechild(parent, w);
1467 }
1468
1469 /*
1470 * Now we go through and free up the child list of the
1471 * outgoing witness.
1472 */
1473 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1474 nwcl = wcl->wcl_next;
1475 w_child_cnt--;
1476 witness_child_free(wcl);
1477 }
1478
1479 /*
1480 * Detach from various lists and free.
1481 */
1482 STAILQ_REMOVE(list, w, witness, w_typelist);
1483 STAILQ_REMOVE(&w_all, w, witness, w_list);
1484 witness_free(w);
1485
1486 return (1);
1487 }
1488
1489 /*
1490 * Add "child" as a direct child of "parent". Returns false if
1491 * we fail due to out of memory.
1492 */
1493 static int
1494 insertchild(struct witness *parent, struct witness *child)
1495 {
1496 struct witness_child_list_entry **wcl;
1497
1498 MPASS(child != NULL && parent != NULL);
1499
1500 /*
1501 * Insert "child" after "parent"
1502 */
1503 wcl = &parent->w_children;
1504 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1505 wcl = &(*wcl)->wcl_next;
1506 if (*wcl == NULL) {
1507 *wcl = witness_child_get();
1508 if (*wcl == NULL)
1509 return (0);
1510 w_child_cnt++;
1511 }
1512 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1513
1514 return (1);
1515 }
1516
1517
1518 static int
1519 itismychild(struct witness *parent, struct witness *child)
1520 {
1521 struct witness_list *list;
1522
1523 MPASS(child != NULL && parent != NULL);
1524 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1525 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1526 panic(
1527 "%s: parent (%s) and child (%s) are not the same lock type",
1528 __func__, parent->w_class->lc_name,
1529 child->w_class->lc_name);
1530
1531 if (!insertchild(parent, child))
1532 return (0);
1533
1534 if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1535 list = &w_sleep;
1536 else
1537 list = &w_spin;
1538 return (1);
1539 }
1540
1541 static void
1542 removechild(struct witness *parent, struct witness *child)
1543 {
1544 struct witness_child_list_entry **wcl, *wcl1;
1545 int i;
1546
1547 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1548 for (i = 0; i < (*wcl)->wcl_count; i++)
1549 if ((*wcl)->wcl_children[i] == child)
1550 goto found;
1551 return;
1552 found:
1553 (*wcl)->wcl_count--;
1554 if ((*wcl)->wcl_count > i)
1555 (*wcl)->wcl_children[i] =
1556 (*wcl)->wcl_children[(*wcl)->wcl_count];
1557 MPASS((*wcl)->wcl_children[i] != NULL);
1558 if ((*wcl)->wcl_count != 0)
1559 return;
1560 wcl1 = *wcl;
1561 *wcl = wcl1->wcl_next;
1562 w_child_cnt--;
1563 witness_child_free(wcl1);
1564 }
1565
1566 static int
1567 isitmychild(struct witness *parent, struct witness *child)
1568 {
1569 struct witness_child_list_entry *wcl;
1570 int i;
1571
1572 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1573 for (i = 0; i < wcl->wcl_count; i++) {
1574 if (wcl->wcl_children[i] == child)
1575 return (1);
1576 }
1577 }
1578 return (0);
1579 }
1580
1581 static int
1582 isitmydescendant(struct witness *parent, struct witness *child)
1583 {
1584 struct witness_child_list_entry *wcl;
1585 int i, j;
1586
1587 if (isitmychild(parent, child))
1588 return (1);
1589 j = 0;
1590 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1591 MPASS(j < 1000);
1592 for (i = 0; i < wcl->wcl_count; i++) {
1593 if (isitmydescendant(wcl->wcl_children[i], child))
1594 return (1);
1595 }
1596 j++;
1597 }
1598 return (0);
1599 }
1600
1601 #ifdef BLESSING
1602 static int
1603 blessed(struct witness *w1, struct witness *w2)
1604 {
1605 int i;
1606 struct witness_blessed *b;
1607
1608 for (i = 0; i < blessed_count; i++) {
1609 b = &blessed_list[i];
1610 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1611 if (strcmp(w2->w_name, b->b_lock2) == 0)
1612 return (1);
1613 continue;
1614 }
1615 if (strcmp(w1->w_name, b->b_lock2) == 0)
1616 if (strcmp(w2->w_name, b->b_lock1) == 0)
1617 return (1);
1618 }
1619 return (0);
1620 }
1621 #endif
1622
1623 static struct witness *
1624 witness_get(void)
1625 {
1626 struct witness *w;
1627
1628 if (witness_watch == 0) {
1629 mtx_unlock_spin(&w_mtx);
1630 return (NULL);
1631 }
1632 if (STAILQ_EMPTY(&w_free)) {
1633 witness_watch = 0;
1634 mtx_unlock_spin(&w_mtx);
1635 printf("%s: witness exhausted\n", __func__);
1636 return (NULL);
1637 }
1638 w = STAILQ_FIRST(&w_free);
1639 STAILQ_REMOVE_HEAD(&w_free, w_list);
1640 w_free_cnt--;
1641 bzero(w, sizeof(*w));
1642 return (w);
1643 }
1644
1645 static void
1646 witness_free(struct witness *w)
1647 {
1648
1649 STAILQ_INSERT_HEAD(&w_free, w, w_list);
1650 w_free_cnt++;
1651 }
1652
1653 static struct witness_child_list_entry *
1654 witness_child_get(void)
1655 {
1656 struct witness_child_list_entry *wcl;
1657
1658 if (witness_watch == 0) {
1659 mtx_unlock_spin(&w_mtx);
1660 return (NULL);
1661 }
1662 wcl = w_child_free;
1663 if (wcl == NULL) {
1664 witness_watch = 0;
1665 mtx_unlock_spin(&w_mtx);
1666 printf("%s: witness exhausted\n", __func__);
1667 return (NULL);
1668 }
1669 w_child_free = wcl->wcl_next;
1670 w_child_free_cnt--;
1671 bzero(wcl, sizeof(*wcl));
1672 return (wcl);
1673 }
1674
1675 static void
1676 witness_child_free(struct witness_child_list_entry *wcl)
1677 {
1678
1679 wcl->wcl_next = w_child_free;
1680 w_child_free = wcl;
1681 w_child_free_cnt++;
1682 }
1683
1684 static struct lock_list_entry *
1685 witness_lock_list_get(void)
1686 {
1687 struct lock_list_entry *lle;
1688
1689 if (witness_watch == 0)
1690 return (NULL);
1691 mtx_lock_spin(&w_mtx);
1692 lle = w_lock_list_free;
1693 if (lle == NULL) {
1694 witness_watch = 0;
1695 mtx_unlock_spin(&w_mtx);
1696 printf("%s: witness exhausted\n", __func__);
1697 return (NULL);
1698 }
1699 w_lock_list_free = lle->ll_next;
1700 mtx_unlock_spin(&w_mtx);
1701 bzero(lle, sizeof(*lle));
1702 return (lle);
1703 }
1704
1705 static void
1706 witness_lock_list_free(struct lock_list_entry *lle)
1707 {
1708
1709 mtx_lock_spin(&w_mtx);
1710 lle->ll_next = w_lock_list_free;
1711 w_lock_list_free = lle;
1712 mtx_unlock_spin(&w_mtx);
1713 }
1714
1715 static struct lock_instance *
1716 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1717 {
1718 struct lock_list_entry *lle;
1719 struct lock_instance *instance;
1720 int i;
1721
1722 for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1723 for (i = lle->ll_count - 1; i >= 0; i--) {
1724 instance = &lle->ll_children[i];
1725 if (instance->li_lock == lock)
1726 return (instance);
1727 }
1728 return (NULL);
1729 }
1730
1731 static void
1732 witness_list_lock(struct lock_instance *instance)
1733 {
1734 struct lock_object *lock;
1735
1736 lock = instance->li_lock;
1737 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1738 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
1739 if (lock->lo_type != lock->lo_name)
1740 printf(" (%s)", lock->lo_type);
1741 printf(" r = %d (%p) locked @ %s:%d\n",
1742 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1743 instance->li_line);
1744 }
1745
1746 #ifdef DDB
1747 static int
1748 witness_thread_has_locks(struct thread *td)
1749 {
1750
1751 return (td->td_sleeplocks != NULL);
1752 }
1753
1754 static int
1755 witness_proc_has_locks(struct proc *p)
1756 {
1757 struct thread *td;
1758
1759 FOREACH_THREAD_IN_PROC(p, td) {
1760 if (witness_thread_has_locks(td))
1761 return (1);
1762 }
1763 return (0);
1764 }
1765 #endif
1766
1767 int
1768 witness_list_locks(struct lock_list_entry **lock_list)
1769 {
1770 struct lock_list_entry *lle;
1771 int i, nheld;
1772
1773 nheld = 0;
1774 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1775 for (i = lle->ll_count - 1; i >= 0; i--) {
1776 witness_list_lock(&lle->ll_children[i]);
1777 nheld++;
1778 }
1779 return (nheld);
1780 }
1781
1782 /*
1783 * This is a bit risky at best. We call this function when we have timed
1784 * out acquiring a spin lock, and we assume that the other CPU is stuck
1785 * with this lock held. So, we go groveling around in the other CPU's
1786 * per-cpu data to try to find the lock instance for this spin lock to
1787 * see when it was last acquired.
1788 */
1789 void
1790 witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1791 {
1792 struct lock_instance *instance;
1793 struct pcpu *pc;
1794
1795 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1796 return;
1797 pc = pcpu_find(owner->td_oncpu);
1798 instance = find_instance(pc->pc_spinlocks, lock);
1799 if (instance != NULL)
1800 witness_list_lock(instance);
1801 }
1802
1803 void
1804 witness_save(struct lock_object *lock, const char **filep, int *linep)
1805 {
1806 struct lock_list_entry *lock_list;
1807 struct lock_instance *instance;
1808 struct lock_class *class;
1809
1810 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1811 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1812 return;
1813 class = LOCK_CLASS(lock);
1814 if (class->lc_flags & LC_SLEEPLOCK)
1815 lock_list = curthread->td_sleeplocks;
1816 else {
1817 if (witness_skipspin)
1818 return;
1819 lock_list = PCPU_GET(spinlocks);
1820 }
1821 instance = find_instance(lock_list, lock);
1822 if (instance == NULL)
1823 panic("%s: lock (%s) %s not locked", __func__,
1824 class->lc_name, lock->lo_name);
1825 *filep = instance->li_file;
1826 *linep = instance->li_line;
1827 }
1828
1829 void
1830 witness_restore(struct lock_object *lock, const char *file, int line)
1831 {
1832 struct lock_list_entry *lock_list;
1833 struct lock_instance *instance;
1834 struct lock_class *class;
1835
1836 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1837 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1838 return;
1839 class = LOCK_CLASS(lock);
1840 if (class->lc_flags & LC_SLEEPLOCK)
1841 lock_list = curthread->td_sleeplocks;
1842 else {
1843 if (witness_skipspin)
1844 return;
1845 lock_list = PCPU_GET(spinlocks);
1846 }
1847 instance = find_instance(lock_list, lock);
1848 if (instance == NULL)
1849 panic("%s: lock (%s) %s not locked", __func__,
1850 class->lc_name, lock->lo_name);
1851 lock->lo_witness->w_file = file;
1852 lock->lo_witness->w_line = line;
1853 instance->li_file = file;
1854 instance->li_line = line;
1855 }
1856
1857 void
1858 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1859 {
1860 #ifdef INVARIANT_SUPPORT
1861 struct lock_instance *instance;
1862 struct lock_class *class;
1863
1864 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1865 return;
1866 class = LOCK_CLASS(lock);
1867 if ((class->lc_flags & LC_SLEEPLOCK) != 0)
1868 instance = find_instance(curthread->td_sleeplocks, lock);
1869 else if ((class->lc_flags & LC_SPINLOCK) != 0)
1870 instance = find_instance(PCPU_GET(spinlocks), lock);
1871 else {
1872 panic("Lock (%s) %s is not sleep or spin!",
1873 class->lc_name, lock->lo_name);
1874 }
1875 file = fixup_filename(file);
1876 switch (flags) {
1877 case LA_UNLOCKED:
1878 if (instance != NULL)
1879 panic("Lock (%s) %s locked @ %s:%d.",
1880 class->lc_name, lock->lo_name, file, line);
1881 break;
1882 case LA_LOCKED:
1883 case LA_LOCKED | LA_RECURSED:
1884 case LA_LOCKED | LA_NOTRECURSED:
1885 case LA_SLOCKED:
1886 case LA_SLOCKED | LA_RECURSED:
1887 case LA_SLOCKED | LA_NOTRECURSED:
1888 case LA_XLOCKED:
1889 case LA_XLOCKED | LA_RECURSED:
1890 case LA_XLOCKED | LA_NOTRECURSED:
1891 if (instance == NULL) {
1892 panic("Lock (%s) %s not locked @ %s:%d.",
1893 class->lc_name, lock->lo_name, file, line);
1894 break;
1895 }
1896 if ((flags & LA_XLOCKED) != 0 &&
1897 (instance->li_flags & LI_EXCLUSIVE) == 0)
1898 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1899 class->lc_name, lock->lo_name, file, line);
1900 if ((flags & LA_SLOCKED) != 0 &&
1901 (instance->li_flags & LI_EXCLUSIVE) != 0)
1902 panic("Lock (%s) %s exclusively locked @ %s:%d.",
1903 class->lc_name, lock->lo_name, file, line);
1904 if ((flags & LA_RECURSED) != 0 &&
1905 (instance->li_flags & LI_RECURSEMASK) == 0)
1906 panic("Lock (%s) %s not recursed @ %s:%d.",
1907 class->lc_name, lock->lo_name, file, line);
1908 if ((flags & LA_NOTRECURSED) != 0 &&
1909 (instance->li_flags & LI_RECURSEMASK) != 0)
1910 panic("Lock (%s) %s recursed @ %s:%d.",
1911 class->lc_name, lock->lo_name, file, line);
1912 break;
1913 default:
1914 panic("Invalid lock assertion at %s:%d.", file, line);
1915
1916 }
1917 #endif /* INVARIANT_SUPPORT */
1918 }
1919
1920 #ifdef DDB
1921 static void
1922 witness_list(struct thread *td)
1923 {
1924
1925 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1926 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1927
1928 if (witness_watch == 0)
1929 return;
1930
1931 witness_list_locks(&td->td_sleeplocks);
1932
1933 /*
1934 * We only handle spinlocks if td == curthread. This is somewhat broken
1935 * if td is currently executing on some other CPU and holds spin locks
1936 * as we won't display those locks. If we had a MI way of getting
1937 * the per-cpu data for a given cpu then we could use
1938 * td->td_oncpu to get the list of spinlocks for this thread
1939 * and "fix" this.
1940 *
1941 * That still wouldn't really fix this unless we locked sched_lock
1942 * or stopped the other CPU to make sure it wasn't changing the list
1943 * out from under us. It is probably best to just not try to handle
1944 * threads on other CPU's for now.
1945 */
1946 if (td == curthread && PCPU_GET(spinlocks) != NULL)
1947 witness_list_locks(PCPU_PTR(spinlocks));
1948 }
1949
1950 DB_SHOW_COMMAND(locks, db_witness_list)
1951 {
1952 struct thread *td;
1953 pid_t pid;
1954 struct proc *p;
1955
1956 if (have_addr) {
1957 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1958 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1959 ((addr >> 16) % 16) * 10000;
1960 /* sx_slock(&allproc_lock); */
1961 FOREACH_PROC_IN_SYSTEM(p) {
1962 if (p->p_pid == pid)
1963 break;
1964 }
1965 /* sx_sunlock(&allproc_lock); */
1966 if (p == NULL) {
1967 db_printf("pid %d not found\n", pid);
1968 return;
1969 }
1970 FOREACH_THREAD_IN_PROC(p, td) {
1971 witness_list(td);
1972 }
1973 } else {
1974 td = curthread;
1975 witness_list(td);
1976 }
1977 }
1978
1979 DB_SHOW_COMMAND(alllocks, db_witness_list_all)
1980 {
1981 struct thread *td;
1982 struct proc *p;
1983
1984 /*
1985 * It would be nice to list only threads and processes that actually
1986 * held sleep locks, but that information is currently not exported
1987 * by WITNESS.
1988 */
1989 FOREACH_PROC_IN_SYSTEM(p) {
1990 if (!witness_proc_has_locks(p))
1991 continue;
1992 FOREACH_THREAD_IN_PROC(p, td) {
1993 if (!witness_thread_has_locks(td))
1994 continue;
1995 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
1996 p->p_comm, td, td->td_tid);
1997 witness_list(td);
1998 }
1999 }
2000 }
2001
2002 DB_SHOW_COMMAND(witness, db_witness_display)
2003 {
2004
2005 witness_display(db_printf);
2006 }
2007 #endif
Cache object: 4190dc0bece75179cb5ba34091056355
|