1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: releng/5.0/sys/kern/subr_witness.c 108185 2002-12-22 16:33:39Z kris $
31 */
32
33 /*
34 * Implementation of the `witness' lock verifier. Originally implemented for
35 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
36 * classes in FreeBSD.
37 */
38
39 /*
40 * Main Entry: witness
41 * Pronunciation: 'wit-n&s
42 * Function: noun
43 * Etymology: Middle English witnesse, from Old English witnes knowledge,
44 * testimony, witness, from 2wit
45 * Date: before 12th century
46 * 1 : attestation of a fact or event : TESTIMONY
47 * 2 : one that gives evidence; specifically : one who testifies in
48 * a cause or before a judicial tribunal
49 * 3 : one asked to be present at a transaction so as to be able to
50 * testify to its having taken place
51 * 4 : one who has personal knowledge of something
52 * 5 a : something serving as evidence or proof : SIGN
53 * b : public affirmation by word or example of usually
54 * religious faith or conviction <the heroic witness to divine
55 * life -- Pilot>
56 * 6 capitalized : a member of the Jehovah's Witnesses
57 */
58
59 #include "opt_ddb.h"
60 #include "opt_witness.h"
61
62 #include <sys/param.h>
63 #include <sys/bus.h>
64 #include <sys/kernel.h>
65 #include <sys/ktr.h>
66 #include <sys/lock.h>
67 #include <sys/malloc.h>
68 #include <sys/mutex.h>
69 #include <sys/proc.h>
70 #include <sys/sysctl.h>
71 #include <sys/systm.h>
72
73 #include <ddb/ddb.h>
74
75 /* Define this to check for blessed mutexes */
76 #undef BLESSING
77
78 #define WITNESS_COUNT 200
79 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
80 /*
81 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
82 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should
83 * probably be safe for the most part, but it's still a SWAG.
84 */
85 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
86
87 #define WITNESS_NCHILDREN 6
88
89 struct witness_child_list_entry;
90
91 struct witness {
92 const char *w_name;
93 struct lock_class *w_class;
94 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
95 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
96 struct witness_child_list_entry *w_children; /* Great evilness... */
97 const char *w_file;
98 int w_line;
99 u_int w_level;
100 u_int w_refcount;
101 u_char w_Giant_squawked:1;
102 u_char w_other_squawked:1;
103 u_char w_same_squawked:1;
104 };
105
106 struct witness_child_list_entry {
107 struct witness_child_list_entry *wcl_next;
108 struct witness *wcl_children[WITNESS_NCHILDREN];
109 u_int wcl_count;
110 };
111
112 STAILQ_HEAD(witness_list, witness);
113
114 #ifdef BLESSING
115 struct witness_blessed {
116 const char *b_lock1;
117 const char *b_lock2;
118 };
119 #endif
120
121 struct witness_order_list_entry {
122 const char *w_name;
123 struct lock_class *w_class;
124 };
125
126 static struct witness *enroll(const char *description,
127 struct lock_class *lock_class);
128 static int itismychild(struct witness *parent, struct witness *child);
129 static void removechild(struct witness *parent, struct witness *child);
130 static int isitmychild(struct witness *parent, struct witness *child);
131 static int isitmydescendant(struct witness *parent, struct witness *child);
132 #ifdef BLESSING
133 static int blessed(struct witness *, struct witness *);
134 #endif
135 static void witness_displaydescendants(void(*)(const char *fmt, ...),
136 struct witness *);
137 static void witness_leveldescendents(struct witness *parent, int level);
138 static void witness_levelall(void);
139 static struct witness *witness_get(void);
140 static void witness_free(struct witness *m);
141 static struct witness_child_list_entry *witness_child_get(void);
142 static void witness_child_free(struct witness_child_list_entry *wcl);
143 static struct lock_list_entry *witness_lock_list_get(void);
144 static void witness_lock_list_free(struct lock_list_entry *lle);
145 static struct lock_instance *find_instance(struct lock_list_entry *lock_list,
146 struct lock_object *lock);
147 #if defined(DDB)
148 static void witness_display_list(void(*prnt)(const char *fmt, ...),
149 struct witness_list *list);
150 static void witness_display(void(*)(const char *fmt, ...));
151 #endif
152
153 MALLOC_DEFINE(M_WITNESS, "witness", "witness structure");
154
155 static int witness_watch = 1;
156 TUNABLE_INT("debug.witness_watch", &witness_watch);
157 SYSCTL_INT(_debug, OID_AUTO, witness_watch, CTLFLAG_RD, &witness_watch, 0, "");
158
159 #ifdef DDB
160 /*
161 * When DDB is enabled and witness_ddb is set to 1, it will cause the system to
162 * drop into kdebug() when:
163 * - a lock heirarchy violation occurs
164 * - locks are held when going to sleep.
165 */
166 #ifdef WITNESS_DDB
167 int witness_ddb = 1;
168 #else
169 int witness_ddb = 0;
170 #endif
171 TUNABLE_INT("debug.witness_ddb", &witness_ddb);
172 SYSCTL_INT(_debug, OID_AUTO, witness_ddb, CTLFLAG_RW, &witness_ddb, 0, "");
173 #endif /* DDB */
174
175 #ifdef WITNESS_SKIPSPIN
176 int witness_skipspin = 1;
177 #else
178 int witness_skipspin = 0;
179 #endif
180 TUNABLE_INT("debug.witness_skipspin", &witness_skipspin);
181 SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
182 "");
183
184 static struct mtx w_mtx;
185 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
186 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
187 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
188 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
189 static struct witness_child_list_entry *w_child_free = NULL;
190 static struct lock_list_entry *w_lock_list_free = NULL;
191 static int witness_dead; /* fatal error, probably no memory */
192
193 static struct witness w_data[WITNESS_COUNT];
194 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
195 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
196
197 static struct witness_order_list_entry order_lists[] = {
198 { "Giant", &lock_class_mtx_sleep },
199 { "proctree", &lock_class_sx },
200 { "allproc", &lock_class_sx },
201 { "filedesc structure", &lock_class_mtx_sleep },
202 { "pipe mutex", &lock_class_mtx_sleep },
203 { "sigio lock", &lock_class_mtx_sleep },
204 { "process group", &lock_class_mtx_sleep },
205 { "process lock", &lock_class_mtx_sleep },
206 { "session", &lock_class_mtx_sleep },
207 { "uidinfo hash", &lock_class_mtx_sleep },
208 { "uidinfo struct", &lock_class_mtx_sleep },
209 { NULL, NULL },
210 /*
211 * spin locks
212 */
213 #ifdef SMP
214 { "ap boot", &lock_class_mtx_spin },
215 #ifdef __i386__
216 { "com", &lock_class_mtx_spin },
217 #endif
218 #endif
219 { "sio", &lock_class_mtx_spin },
220 #ifdef __i386__
221 { "cy", &lock_class_mtx_spin },
222 #endif
223 { "sabtty", &lock_class_mtx_spin },
224 { "ng_node", &lock_class_mtx_spin },
225 { "ng_worklist", &lock_class_mtx_spin },
226 { "ithread table lock", &lock_class_mtx_spin },
227 { "sched lock", &lock_class_mtx_spin },
228 { "callout", &lock_class_mtx_spin },
229 /*
230 * leaf locks
231 */
232 { "allpmaps", &lock_class_mtx_spin },
233 { "vm page queue free mutex", &lock_class_mtx_spin },
234 { "icu", &lock_class_mtx_spin },
235 #ifdef SMP
236 { "smp rendezvous", &lock_class_mtx_spin },
237 #if defined(__i386__) && defined(APIC_IO)
238 { "tlb", &lock_class_mtx_spin },
239 #endif
240 #endif
241 { "clk", &lock_class_mtx_spin },
242 { "mutex profiling lock", &lock_class_mtx_spin },
243 { "zombie_thread_lock", &lock_class_mtx_spin },
244 { "ALD Queue", &lock_class_mtx_spin },
245 #ifdef __ia64__
246 { "MCA spin lock", &lock_class_mtx_spin },
247 #endif
248 { NULL, NULL },
249 { NULL, NULL }
250 };
251
252 #ifdef BLESSING
253 /*
254 * Pairs of locks which have been blessed
255 * Don't complain about order problems with blessed locks
256 */
257 static struct witness_blessed blessed_list[] = {
258 };
259 static int blessed_count =
260 sizeof(blessed_list) / sizeof(struct witness_blessed);
261 #endif
262
263 /*
264 * List of all locks in the system.
265 */
266 TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
267
268 static struct mtx all_mtx = {
269 { &lock_class_mtx_sleep, /* mtx_object.lo_class */
270 "All locks list", /* mtx_object.lo_name */
271 "All locks list", /* mtx_object.lo_type */
272 LO_INITIALIZED, /* mtx_object.lo_flags */
273 { NULL, NULL }, /* mtx_object.lo_list */
274 NULL }, /* mtx_object.lo_witness */
275 MTX_UNOWNED, 0, /* mtx_lock, mtx_recurse */
276 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
277 { NULL, NULL } /* mtx_contested */
278 };
279
280 /*
281 * This global is set to 0 once it becomes safe to use the witness code.
282 */
283 static int witness_cold = 1;
284
285 /*
286 * Global variables for book keeping.
287 */
288 static int lock_cur_cnt;
289 static int lock_max_cnt;
290
291 /*
292 * The WITNESS-enabled diagnostic code.
293 */
294 static void
295 witness_initialize(void *dummy __unused)
296 {
297 struct lock_object *lock;
298 struct witness_order_list_entry *order;
299 struct witness *w, *w1;
300 int i;
301
302 /*
303 * We have to release Giant before initializing its witness
304 * structure so that WITNESS doesn't get confused.
305 */
306 mtx_unlock(&Giant);
307 mtx_assert(&Giant, MA_NOTOWNED);
308
309 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
310 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
311 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
312 MTX_NOWITNESS);
313 for (i = 0; i < WITNESS_COUNT; i++)
314 witness_free(&w_data[i]);
315 for (i = 0; i < WITNESS_CHILDCOUNT; i++)
316 witness_child_free(&w_childdata[i]);
317 for (i = 0; i < LOCK_CHILDCOUNT; i++)
318 witness_lock_list_free(&w_locklistdata[i]);
319
320 /* First add in all the specified order lists. */
321 for (order = order_lists; order->w_name != NULL; order++) {
322 w = enroll(order->w_name, order->w_class);
323 if (w == NULL)
324 continue;
325 w->w_file = "order list";
326 for (order++; order->w_name != NULL; order++) {
327 w1 = enroll(order->w_name, order->w_class);
328 if (w1 == NULL)
329 continue;
330 w1->w_file = "order list";
331 itismychild(w, w1);
332 w = w1;
333 }
334 }
335
336 /* Iterate through all locks and add them to witness. */
337 mtx_lock(&all_mtx);
338 TAILQ_FOREACH(lock, &all_locks, lo_list) {
339 if (lock->lo_flags & LO_WITNESS)
340 lock->lo_witness = enroll(lock->lo_type,
341 lock->lo_class);
342 else
343 lock->lo_witness = NULL;
344 }
345 mtx_unlock(&all_mtx);
346
347 /* Mark the witness code as being ready for use. */
348 atomic_store_rel_int(&witness_cold, 0);
349
350 mtx_lock(&Giant);
351 }
352 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
353
354 void
355 witness_init(struct lock_object *lock)
356 {
357 struct lock_class *class;
358
359 class = lock->lo_class;
360 if (lock->lo_flags & LO_INITIALIZED)
361 panic("%s: lock (%s) %s is already initialized", __func__,
362 class->lc_name, lock->lo_name);
363 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
364 (class->lc_flags & LC_RECURSABLE) == 0)
365 panic("%s: lock (%s) %s can not be recursable", __func__,
366 class->lc_name, lock->lo_name);
367 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
368 (class->lc_flags & LC_SLEEPABLE) == 0)
369 panic("%s: lock (%s) %s can not be sleepable", __func__,
370 class->lc_name, lock->lo_name);
371 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
372 (class->lc_flags & LC_UPGRADABLE) == 0)
373 panic("%s: lock (%s) %s can not be upgradable", __func__,
374 class->lc_name, lock->lo_name);
375
376 mtx_lock(&all_mtx);
377 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
378 lock->lo_flags |= LO_INITIALIZED;
379 lock_cur_cnt++;
380 if (lock_cur_cnt > lock_max_cnt)
381 lock_max_cnt = lock_cur_cnt;
382 mtx_unlock(&all_mtx);
383 if (!witness_cold && !witness_dead && panicstr == NULL &&
384 (lock->lo_flags & LO_WITNESS) != 0)
385 lock->lo_witness = enroll(lock->lo_type, class);
386 else
387 lock->lo_witness = NULL;
388 }
389
390 void
391 witness_destroy(struct lock_object *lock)
392 {
393 struct witness *w;
394
395 if (witness_cold)
396 panic("lock (%s) %s destroyed while witness_cold",
397 lock->lo_class->lc_name, lock->lo_name);
398 if ((lock->lo_flags & LO_INITIALIZED) == 0)
399 panic("%s: lock (%s) %s is not initialized", __func__,
400 lock->lo_class->lc_name, lock->lo_name);
401
402 /* XXX: need to verify that no one holds the lock */
403 w = lock->lo_witness;
404 if (w != NULL) {
405 mtx_lock_spin(&w_mtx);
406 MPASS(w->w_refcount > 0);
407 w->w_refcount--;
408 mtx_unlock_spin(&w_mtx);
409 }
410
411 mtx_lock(&all_mtx);
412 lock_cur_cnt--;
413 TAILQ_REMOVE(&all_locks, lock, lo_list);
414 lock->lo_flags &= ~LO_INITIALIZED;
415 mtx_unlock(&all_mtx);
416 }
417
418 #if defined(DDB)
419 static void
420 witness_display_list(void(*prnt)(const char *fmt, ...),
421 struct witness_list *list)
422 {
423 struct witness *w, *w1;
424 int found;
425
426 STAILQ_FOREACH(w, list, w_typelist) {
427 if (w->w_file == NULL)
428 continue;
429 found = 0;
430 STAILQ_FOREACH(w1, list, w_typelist) {
431 if (isitmychild(w1, w)) {
432 found++;
433 break;
434 }
435 }
436 if (found)
437 continue;
438 /*
439 * This lock has no anscestors, display its descendants.
440 */
441 witness_displaydescendants(prnt, w);
442 }
443 }
444
445 static void
446 witness_display(void(*prnt)(const char *fmt, ...))
447 {
448 struct witness *w;
449
450 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
451 witness_levelall();
452
453 /*
454 * First, handle sleep locks which have been acquired at least
455 * once.
456 */
457 prnt("Sleep locks:\n");
458 witness_display_list(prnt, &w_sleep);
459
460 /*
461 * Now do spin locks which have been acquired at least once.
462 */
463 prnt("\nSpin locks:\n");
464 witness_display_list(prnt, &w_spin);
465
466 /*
467 * Finally, any locks which have not been acquired yet.
468 */
469 prnt("\nLocks which were never acquired:\n");
470 STAILQ_FOREACH(w, &w_all, w_list) {
471 if (w->w_file != NULL || w->w_refcount == 0)
472 continue;
473 prnt("%s\n", w->w_name);
474 }
475 }
476 #endif
477
478 void
479 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
480 {
481 struct lock_list_entry **lock_list, *lle;
482 struct lock_instance *lock1, *lock2;
483 struct lock_class *class;
484 struct witness *w, *w1;
485 struct thread *td;
486 int i, j;
487 #ifdef DDB
488 int go_into_ddb = 0;
489 #endif /* DDB */
490
491 if (witness_cold || witness_dead || lock->lo_witness == NULL ||
492 panicstr != NULL)
493 return;
494 w = lock->lo_witness;
495 class = lock->lo_class;
496 td = curthread;
497
498 if (class->lc_flags & LC_SLEEPLOCK) {
499 /*
500 * Since spin locks include a critical section, this check
501 * impliclty enforces a lock order of all sleep locks before
502 * all spin locks.
503 */
504 if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0)
505 panic("blockable sleep lock (%s) %s @ %s:%d",
506 class->lc_name, lock->lo_name, file, line);
507 lock_list = &td->td_sleeplocks;
508 } else
509 lock_list = PCPU_PTR(spinlocks);
510
511 /*
512 * Try locks do not block if they fail to acquire the lock, thus
513 * there is no danger of deadlocks or of switching while holding a
514 * spin lock if we acquire a lock via a try operation.
515 */
516 if (flags & LOP_TRYLOCK)
517 goto out;
518
519 /*
520 * Is this the first lock acquired? If so, then no order checking
521 * is needed.
522 */
523 if (*lock_list == NULL)
524 goto out;
525
526 /*
527 * Check to see if we are recursing on a lock we already own.
528 */
529 lock1 = find_instance(*lock_list, lock);
530 if (lock1 != NULL) {
531 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
532 (flags & LOP_EXCLUSIVE) == 0) {
533 printf("shared lock of (%s) %s @ %s:%d\n",
534 class->lc_name, lock->lo_name, file, line);
535 printf("while exclusively locked from %s:%d\n",
536 lock1->li_file, lock1->li_line);
537 panic("share->excl");
538 }
539 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
540 (flags & LOP_EXCLUSIVE) != 0) {
541 printf("exclusive lock of (%s) %s @ %s:%d\n",
542 class->lc_name, lock->lo_name, file, line);
543 printf("while share locked from %s:%d\n",
544 lock1->li_file, lock1->li_line);
545 panic("excl->share");
546 }
547 lock1->li_flags++;
548 if ((lock->lo_flags & LO_RECURSABLE) == 0) {
549 printf(
550 "recursed on non-recursive lock (%s) %s @ %s:%d\n",
551 class->lc_name, lock->lo_name, file, line);
552 printf("first acquired @ %s:%d\n", lock1->li_file,
553 lock1->li_line);
554 panic("recurse");
555 }
556 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
557 td->td_proc->p_pid, lock->lo_name,
558 lock1->li_flags & LI_RECURSEMASK);
559 lock1->li_file = file;
560 lock1->li_line = line;
561 return;
562 }
563
564 /*
565 * Check for duplicate locks of the same type. Note that we only
566 * have to check for this on the last lock we just acquired. Any
567 * other cases will be caught as lock order violations.
568 */
569 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
570 w1 = lock1->li_lock->lo_witness;
571 if (w1 == w) {
572 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK))
573 goto out;
574 w->w_same_squawked = 1;
575 printf("acquiring duplicate lock of same type: \"%s\"\n",
576 lock->lo_type);
577 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
578 lock1->li_file, lock1->li_line);
579 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
580 #ifdef DDB
581 go_into_ddb = 1;
582 #endif /* DDB */
583 goto out;
584 }
585 MPASS(!mtx_owned(&w_mtx));
586 mtx_lock_spin(&w_mtx);
587 /*
588 * If we have a known higher number just say ok
589 */
590 if (witness_watch > 1 && w->w_level > w1->w_level) {
591 mtx_unlock_spin(&w_mtx);
592 goto out;
593 }
594 if (isitmydescendant(w1, w)) {
595 mtx_unlock_spin(&w_mtx);
596 goto out;
597 }
598 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
599 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
600
601 MPASS(j < WITNESS_COUNT);
602 lock1 = &lle->ll_children[i];
603 w1 = lock1->li_lock->lo_witness;
604
605 /*
606 * If this lock doesn't undergo witness checking,
607 * then skip it.
608 */
609 if (w1 == NULL) {
610 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
611 ("lock missing witness structure"));
612 continue;
613 }
614 /*
615 * If we are locking Giant and we slept with this
616 * lock, then skip it.
617 */
618 if ((lock1->li_flags & LI_SLEPT) != 0 &&
619 lock == &Giant.mtx_object)
620 continue;
621 /*
622 * If we are locking a sleepable lock and this lock
623 * isn't sleepable and isn't Giant, we want to treat
624 * it as a lock order violation to enfore a general
625 * lock order of sleepable locks before non-sleepable
626 * locks. Thus, we only bother checking the lock
627 * order hierarchy if we pass the initial test.
628 */
629 if (!((lock->lo_flags & LO_SLEEPABLE) != 0 &&
630 ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
631 lock1->li_lock != &Giant.mtx_object)) &&
632 !isitmydescendant(w, w1))
633 continue;
634 /*
635 * We have a lock order violation, check to see if it
636 * is allowed or has already been yelled about.
637 */
638 mtx_unlock_spin(&w_mtx);
639 #ifdef BLESSING
640 if (blessed(w, w1))
641 goto out;
642 #endif
643 if (lock1->li_lock == &Giant.mtx_object) {
644 if (w1->w_Giant_squawked)
645 goto out;
646 else
647 w1->w_Giant_squawked = 1;
648 } else {
649 if (w1->w_other_squawked)
650 goto out;
651 else
652 w1->w_other_squawked = 1;
653 }
654 /*
655 * Ok, yell about it.
656 */
657 printf("lock order reversal\n");
658 /*
659 * Try to locate an earlier lock with
660 * witness w in our list.
661 */
662 do {
663 lock2 = &lle->ll_children[i];
664 MPASS(lock2->li_lock != NULL);
665 if (lock2->li_lock->lo_witness == w)
666 break;
667 i--;
668 if (i == 0 && lle->ll_next != NULL) {
669 lle = lle->ll_next;
670 i = lle->ll_count - 1;
671 MPASS(i >= 0 && i < LOCK_NCHILDREN);
672 }
673 } while (i >= 0);
674 if (i < 0) {
675 printf(" 1st %p %s (%s) @ %s:%d\n",
676 lock1->li_lock, lock1->li_lock->lo_name,
677 lock1->li_lock->lo_type, lock1->li_file,
678 lock1->li_line);
679 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
680 lock->lo_name, lock->lo_type, file, line);
681 } else {
682 printf(" 1st %p %s (%s) @ %s:%d\n",
683 lock2->li_lock, lock2->li_lock->lo_name,
684 lock2->li_lock->lo_type, lock2->li_file,
685 lock2->li_line);
686 printf(" 2nd %p %s (%s) @ %s:%d\n",
687 lock1->li_lock, lock1->li_lock->lo_name,
688 lock1->li_lock->lo_type, lock1->li_file,
689 lock1->li_line);
690 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
691 lock->lo_name, lock->lo_type, file, line);
692 }
693 #ifdef DDB
694 go_into_ddb = 1;
695 #endif /* DDB */
696 goto out;
697 }
698 }
699 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
700 /*
701 * Don't build a new relationship if we are locking Giant just
702 * after waking up and the previous lock in the list was acquired
703 * prior to blocking.
704 */
705 if (lock == &Giant.mtx_object && (lock1->li_flags & LI_SLEPT) != 0)
706 mtx_unlock_spin(&w_mtx);
707 else {
708 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
709 lock->lo_type, lock1->li_lock->lo_type);
710 if (!itismychild(lock1->li_lock->lo_witness, w))
711 mtx_unlock_spin(&w_mtx);
712 }
713
714 out:
715 #ifdef DDB
716 if (witness_ddb && go_into_ddb)
717 Debugger(__func__);
718 #endif /* DDB */
719 w->w_file = file;
720 w->w_line = line;
721
722 lle = *lock_list;
723 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
724 lle = witness_lock_list_get();
725 if (lle == NULL)
726 return;
727 lle->ll_next = *lock_list;
728 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
729 td->td_proc->p_pid, lle);
730 *lock_list = lle;
731 }
732 lock1 = &lle->ll_children[lle->ll_count++];
733 lock1->li_lock = lock;
734 lock1->li_line = line;
735 lock1->li_file = file;
736 if ((flags & LOP_EXCLUSIVE) != 0)
737 lock1->li_flags = LI_EXCLUSIVE;
738 else
739 lock1->li_flags = 0;
740 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
741 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
742 }
743
744 void
745 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
746 {
747 struct lock_instance *instance;
748 struct lock_class *class;
749
750 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
751 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL)
752 return;
753 class = lock->lo_class;
754 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
755 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
756 class->lc_name, lock->lo_name, file, line);
757 if ((flags & LOP_TRYLOCK) == 0)
758 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
759 lock->lo_name, file, line);
760 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
761 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
762 class->lc_name, lock->lo_name, file, line);
763 instance = find_instance(curthread->td_sleeplocks, lock);
764 if (instance == NULL)
765 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
766 class->lc_name, lock->lo_name, file, line);
767 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
768 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
769 class->lc_name, lock->lo_name, file, line);
770 if ((instance->li_flags & LI_RECURSEMASK) != 0)
771 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
772 class->lc_name, lock->lo_name,
773 instance->li_flags & LI_RECURSEMASK, file, line);
774 instance->li_flags |= LI_EXCLUSIVE;
775 }
776
777 void
778 witness_downgrade(struct lock_object *lock, int flags, const char *file,
779 int line)
780 {
781 struct lock_instance *instance;
782 struct lock_class *class;
783
784 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
785 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL)
786 return;
787 class = lock->lo_class;
788 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
789 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
790 class->lc_name, lock->lo_name, file, line);
791 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
792 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
793 class->lc_name, lock->lo_name, file, line);
794 instance = find_instance(curthread->td_sleeplocks, lock);
795 if (instance == NULL)
796 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
797 class->lc_name, lock->lo_name, file, line);
798 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
799 panic("downgrade of shared lock (%s) %s @ %s:%d",
800 class->lc_name, lock->lo_name, file, line);
801 if ((instance->li_flags & LI_RECURSEMASK) != 0)
802 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
803 class->lc_name, lock->lo_name,
804 instance->li_flags & LI_RECURSEMASK, file, line);
805 instance->li_flags &= ~LI_EXCLUSIVE;
806 }
807
808 void
809 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
810 {
811 struct lock_list_entry **lock_list, *lle;
812 struct lock_instance *instance;
813 struct lock_class *class;
814 struct thread *td;
815 register_t s;
816 int i, j;
817
818 if (witness_cold || witness_dead || lock->lo_witness == NULL ||
819 panicstr != NULL)
820 return;
821 td = curthread;
822 class = lock->lo_class;
823 if (class->lc_flags & LC_SLEEPLOCK)
824 lock_list = &td->td_sleeplocks;
825 else
826 lock_list = PCPU_PTR(spinlocks);
827 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
828 for (i = 0; i < (*lock_list)->ll_count; i++) {
829 instance = &(*lock_list)->ll_children[i];
830 if (instance->li_lock == lock) {
831 if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
832 (flags & LOP_EXCLUSIVE) == 0) {
833 printf(
834 "shared unlock of (%s) %s @ %s:%d\n",
835 class->lc_name, lock->lo_name,
836 file, line);
837 printf(
838 "while exclusively locked from %s:%d\n",
839 instance->li_file,
840 instance->li_line);
841 panic("excl->ushare");
842 }
843 if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
844 (flags & LOP_EXCLUSIVE) != 0) {
845 printf(
846 "exclusive unlock of (%s) %s @ %s:%d\n",
847 class->lc_name, lock->lo_name,
848 file, line);
849 printf(
850 "while share locked from %s:%d\n",
851 instance->li_file,
852 instance->li_line);
853 panic("share->uexcl");
854 }
855 /* If we are recursed, unrecurse. */
856 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
857 CTR4(KTR_WITNESS,
858 "%s: pid %d unrecursed on %s r=%d", __func__,
859 td->td_proc->p_pid,
860 instance->li_lock->lo_name,
861 instance->li_flags);
862 instance->li_flags--;
863 return;
864 }
865 s = intr_disable();
866 CTR4(KTR_WITNESS,
867 "%s: pid %d removed %s from lle[%d]", __func__,
868 td->td_proc->p_pid,
869 instance->li_lock->lo_name,
870 (*lock_list)->ll_count - 1);
871 for (j = i; j < (*lock_list)->ll_count - 1; j++)
872 (*lock_list)->ll_children[j] =
873 (*lock_list)->ll_children[j + 1];
874 (*lock_list)->ll_count--;
875 intr_restore(s);
876 if ((*lock_list)->ll_count == 0) {
877 lle = *lock_list;
878 *lock_list = lle->ll_next;
879 CTR3(KTR_WITNESS,
880 "%s: pid %d removed lle %p", __func__,
881 td->td_proc->p_pid, lle);
882 witness_lock_list_free(lle);
883 }
884 return;
885 }
886 }
887 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
888 file, line);
889 }
890
891 /*
892 * Warn if any held locks are not sleepable. Note that Giant and the lock
893 * passed in are both special cases since they are both released during the
894 * sleep process and aren't actually held while the thread is asleep.
895 */
896 int
897 witness_sleep(int check_only, struct lock_object *lock, const char *file,
898 int line)
899 {
900 struct lock_list_entry **lock_list, *lle;
901 struct lock_instance *lock1;
902 struct thread *td;
903 int i, n;
904
905 if (witness_cold || witness_dead || panicstr != NULL)
906 return (0);
907 n = 0;
908 td = curthread;
909 lock_list = &td->td_sleeplocks;
910 again:
911 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
912 for (i = lle->ll_count - 1; i >= 0; i--) {
913 lock1 = &lle->ll_children[i];
914 if (lock1->li_lock == lock ||
915 lock1->li_lock == &Giant.mtx_object)
916 continue;
917 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) {
918 if (check_only == 0) {
919 CTR3(KTR_WITNESS,
920 "pid %d: sleeping with lock (%s) %s held",
921 td->td_proc->p_pid,
922 lock1->li_lock->lo_class->lc_name,
923 lock1->li_lock->lo_name);
924 lock1->li_flags |= LI_SLEPT;
925 }
926 continue;
927 }
928 n++;
929 printf("%s:%d: %s with \"%s\" locked from %s:%d\n",
930 file, line, check_only ? "could sleep" : "sleeping",
931 lock1->li_lock->lo_name, lock1->li_file,
932 lock1->li_line);
933 }
934 if (lock_list == &td->td_sleeplocks && PCPU_GET(spinlocks) != NULL) {
935 /*
936 * Since we already hold a spinlock preemption is
937 * already blocked.
938 */
939 lock_list = PCPU_PTR(spinlocks);
940 goto again;
941 }
942 #ifdef DDB
943 if (witness_ddb && n)
944 Debugger(__func__);
945 #endif /* DDB */
946 return (n);
947 }
948
949 const char *
950 witness_file(struct lock_object *lock)
951 {
952 struct witness *w;
953
954 if (witness_cold || witness_dead || lock->lo_witness == NULL)
955 return ("?");
956 w = lock->lo_witness;
957 return (w->w_file);
958 }
959
960 int
961 witness_line(struct lock_object *lock)
962 {
963 struct witness *w;
964
965 if (witness_cold || witness_dead || lock->lo_witness == NULL)
966 return (0);
967 w = lock->lo_witness;
968 return (w->w_line);
969 }
970
971 static struct witness *
972 enroll(const char *description, struct lock_class *lock_class)
973 {
974 struct witness *w;
975
976 if (!witness_watch || witness_dead || panicstr != NULL)
977 return (NULL);
978 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
979 return (NULL);
980 mtx_lock_spin(&w_mtx);
981 STAILQ_FOREACH(w, &w_all, w_list) {
982 if (w->w_name == description || (w->w_refcount > 0 &&
983 strcmp(description, w->w_name) == 0)) {
984 w->w_refcount++;
985 mtx_unlock_spin(&w_mtx);
986 if (lock_class != w->w_class)
987 panic(
988 "lock (%s) %s does not match earlier (%s) lock",
989 description, lock_class->lc_name,
990 w->w_class->lc_name);
991 return (w);
992 }
993 }
994 /*
995 * This isn't quite right, as witness_cold is still 0 while we
996 * enroll all the locks initialized before witness_initialize().
997 */
998 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) {
999 mtx_unlock_spin(&w_mtx);
1000 panic("spin lock %s not in order list", description);
1001 }
1002 if ((w = witness_get()) == NULL)
1003 return (NULL);
1004 w->w_name = description;
1005 w->w_class = lock_class;
1006 w->w_refcount = 1;
1007 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1008 if (lock_class->lc_flags & LC_SPINLOCK)
1009 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1010 else if (lock_class->lc_flags & LC_SLEEPLOCK)
1011 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1012 else {
1013 mtx_unlock_spin(&w_mtx);
1014 panic("lock class %s is not sleep or spin",
1015 lock_class->lc_name);
1016 }
1017 mtx_unlock_spin(&w_mtx);
1018 return (w);
1019 }
1020
1021 static int
1022 itismychild(struct witness *parent, struct witness *child)
1023 {
1024 static int recursed;
1025 struct witness_child_list_entry **wcl;
1026 struct witness_list *list;
1027
1028 MPASS(child != NULL && parent != NULL);
1029 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1030 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1031 panic(
1032 "%s: parent (%s) and child (%s) are not the same lock type",
1033 __func__, parent->w_class->lc_name,
1034 child->w_class->lc_name);
1035
1036 /*
1037 * Insert "child" after "parent"
1038 */
1039 wcl = &parent->w_children;
1040 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1041 wcl = &(*wcl)->wcl_next;
1042 if (*wcl == NULL) {
1043 *wcl = witness_child_get();
1044 if (*wcl == NULL)
1045 return (1);
1046 }
1047 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1048
1049 /*
1050 * Now prune whole tree. We look for cases where a lock is now
1051 * both a descendant and a direct child of a given lock. In that
1052 * case, we want to remove the direct child link from the tree.
1053 */
1054 if (recursed)
1055 return (0);
1056 recursed = 1;
1057 if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1058 list = &w_sleep;
1059 else
1060 list = &w_spin;
1061 STAILQ_FOREACH(child, list, w_typelist) {
1062 STAILQ_FOREACH(parent, list, w_typelist) {
1063 if (!isitmychild(parent, child))
1064 continue;
1065 removechild(parent, child);
1066 if (isitmydescendant(parent, child))
1067 continue;
1068 itismychild(parent, child);
1069 }
1070 }
1071 recursed = 0;
1072 witness_levelall();
1073 return (0);
1074 }
1075
1076 static void
1077 removechild(struct witness *parent, struct witness *child)
1078 {
1079 struct witness_child_list_entry **wcl, *wcl1;
1080 int i;
1081
1082 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1083 for (i = 0; i < (*wcl)->wcl_count; i++)
1084 if ((*wcl)->wcl_children[i] == child)
1085 goto found;
1086 return;
1087 found:
1088 (*wcl)->wcl_count--;
1089 if ((*wcl)->wcl_count > i)
1090 (*wcl)->wcl_children[i] =
1091 (*wcl)->wcl_children[(*wcl)->wcl_count];
1092 MPASS((*wcl)->wcl_children[i] != NULL);
1093 if ((*wcl)->wcl_count != 0)
1094 return;
1095 wcl1 = *wcl;
1096 *wcl = wcl1->wcl_next;
1097 witness_child_free(wcl1);
1098 }
1099
1100 static int
1101 isitmychild(struct witness *parent, struct witness *child)
1102 {
1103 struct witness_child_list_entry *wcl;
1104 int i;
1105
1106 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1107 for (i = 0; i < wcl->wcl_count; i++) {
1108 if (wcl->wcl_children[i] == child)
1109 return (1);
1110 }
1111 }
1112 return (0);
1113 }
1114
1115 static int
1116 isitmydescendant(struct witness *parent, struct witness *child)
1117 {
1118 struct witness_child_list_entry *wcl;
1119 int i, j;
1120
1121 if (isitmychild(parent, child))
1122 return (1);
1123 j = 0;
1124 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1125 MPASS(j < 1000);
1126 for (i = 0; i < wcl->wcl_count; i++) {
1127 if (isitmydescendant(wcl->wcl_children[i], child))
1128 return (1);
1129 }
1130 j++;
1131 }
1132 return (0);
1133 }
1134
1135 static void
1136 witness_levelall (void)
1137 {
1138 struct witness_list *list;
1139 struct witness *w, *w1;
1140
1141 /*
1142 * First clear all levels.
1143 */
1144 STAILQ_FOREACH(w, &w_all, w_list) {
1145 w->w_level = 0;
1146 }
1147
1148 /*
1149 * Look for locks with no parent and level all their descendants.
1150 */
1151 STAILQ_FOREACH(w, &w_all, w_list) {
1152 /*
1153 * This is just an optimization, technically we could get
1154 * away just walking the all list each time.
1155 */
1156 if (w->w_class->lc_flags & LC_SLEEPLOCK)
1157 list = &w_sleep;
1158 else
1159 list = &w_spin;
1160 STAILQ_FOREACH(w1, list, w_typelist) {
1161 if (isitmychild(w1, w))
1162 goto skip;
1163 }
1164 witness_leveldescendents(w, 0);
1165 skip:
1166 ; /* silence GCC 3.x */
1167 }
1168 }
1169
1170 static void
1171 witness_leveldescendents(struct witness *parent, int level)
1172 {
1173 struct witness_child_list_entry *wcl;
1174 int i;
1175
1176 if (parent->w_level < level)
1177 parent->w_level = level;
1178 level++;
1179 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1180 for (i = 0; i < wcl->wcl_count; i++)
1181 witness_leveldescendents(wcl->wcl_children[i], level);
1182 }
1183
1184 static void
1185 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
1186 struct witness *parent)
1187 {
1188 struct witness_child_list_entry *wcl;
1189 int i, level;
1190
1191 level = parent->w_level;
1192 prnt("%-2d", level);
1193 for (i = 0; i < level; i++)
1194 prnt(" ");
1195 if (parent->w_refcount > 0) {
1196 prnt("%s", parent->w_name);
1197 if (parent->w_file != NULL)
1198 prnt(" -- last acquired @ %s:%d\n", parent->w_file,
1199 parent->w_line);
1200 } else
1201 prnt("(dead)\n");
1202 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1203 for (i = 0; i < wcl->wcl_count; i++)
1204 witness_displaydescendants(prnt,
1205 wcl->wcl_children[i]);
1206 }
1207
1208 #ifdef BLESSING
1209 static int
1210 blessed(struct witness *w1, struct witness *w2)
1211 {
1212 int i;
1213 struct witness_blessed *b;
1214
1215 for (i = 0; i < blessed_count; i++) {
1216 b = &blessed_list[i];
1217 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1218 if (strcmp(w2->w_name, b->b_lock2) == 0)
1219 return (1);
1220 continue;
1221 }
1222 if (strcmp(w1->w_name, b->b_lock2) == 0)
1223 if (strcmp(w2->w_name, b->b_lock1) == 0)
1224 return (1);
1225 }
1226 return (0);
1227 }
1228 #endif
1229
1230 static struct witness *
1231 witness_get(void)
1232 {
1233 struct witness *w;
1234
1235 if (witness_dead) {
1236 mtx_unlock_spin(&w_mtx);
1237 return (NULL);
1238 }
1239 if (STAILQ_EMPTY(&w_free)) {
1240 witness_dead = 1;
1241 mtx_unlock_spin(&w_mtx);
1242 printf("%s: witness exhausted\n", __func__);
1243 return (NULL);
1244 }
1245 w = STAILQ_FIRST(&w_free);
1246 STAILQ_REMOVE_HEAD(&w_free, w_list);
1247 bzero(w, sizeof(*w));
1248 return (w);
1249 }
1250
1251 static void
1252 witness_free(struct witness *w)
1253 {
1254
1255 STAILQ_INSERT_HEAD(&w_free, w, w_list);
1256 }
1257
1258 static struct witness_child_list_entry *
1259 witness_child_get(void)
1260 {
1261 struct witness_child_list_entry *wcl;
1262
1263 if (witness_dead) {
1264 mtx_unlock_spin(&w_mtx);
1265 return (NULL);
1266 }
1267 wcl = w_child_free;
1268 if (wcl == NULL) {
1269 witness_dead = 1;
1270 mtx_unlock_spin(&w_mtx);
1271 printf("%s: witness exhausted\n", __func__);
1272 return (NULL);
1273 }
1274 w_child_free = wcl->wcl_next;
1275 bzero(wcl, sizeof(*wcl));
1276 return (wcl);
1277 }
1278
1279 static void
1280 witness_child_free(struct witness_child_list_entry *wcl)
1281 {
1282
1283 wcl->wcl_next = w_child_free;
1284 w_child_free = wcl;
1285 }
1286
1287 static struct lock_list_entry *
1288 witness_lock_list_get(void)
1289 {
1290 struct lock_list_entry *lle;
1291
1292 if (witness_dead)
1293 return (NULL);
1294 mtx_lock_spin(&w_mtx);
1295 lle = w_lock_list_free;
1296 if (lle == NULL) {
1297 witness_dead = 1;
1298 mtx_unlock_spin(&w_mtx);
1299 printf("%s: witness exhausted\n", __func__);
1300 return (NULL);
1301 }
1302 w_lock_list_free = lle->ll_next;
1303 mtx_unlock_spin(&w_mtx);
1304 bzero(lle, sizeof(*lle));
1305 return (lle);
1306 }
1307
1308 static void
1309 witness_lock_list_free(struct lock_list_entry *lle)
1310 {
1311
1312 mtx_lock_spin(&w_mtx);
1313 lle->ll_next = w_lock_list_free;
1314 w_lock_list_free = lle;
1315 mtx_unlock_spin(&w_mtx);
1316 }
1317
1318 static struct lock_instance *
1319 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1320 {
1321 struct lock_list_entry *lle;
1322 struct lock_instance *instance;
1323 int i;
1324
1325 for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1326 for (i = lle->ll_count - 1; i >= 0; i--) {
1327 instance = &lle->ll_children[i];
1328 if (instance->li_lock == lock)
1329 return (instance);
1330 }
1331 return (NULL);
1332 }
1333
1334 int
1335 witness_list_locks(struct lock_list_entry **lock_list)
1336 {
1337 struct lock_list_entry *lle;
1338 struct lock_instance *instance;
1339 struct lock_object *lock;
1340 int i, nheld;
1341
1342 nheld = 0;
1343 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1344 for (i = lle->ll_count - 1; i >= 0; i--) {
1345 instance = &lle->ll_children[i];
1346 lock = instance->li_lock;
1347 printf("%s %s %s",
1348 (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1349 "exclusive" : "shared",
1350 lock->lo_class->lc_name, lock->lo_name);
1351 if (lock->lo_type != lock->lo_name)
1352 printf(" (%s)", lock->lo_type);
1353 printf(" r = %d (%p) locked @ %s:%d\n",
1354 instance->li_flags & LI_RECURSEMASK, lock,
1355 instance->li_file, instance->li_line);
1356 nheld++;
1357 }
1358 return (nheld);
1359 }
1360
1361 /*
1362 * Calling this on td != curthread is bad unless we are in ddb.
1363 */
1364 int
1365 witness_list(struct thread *td)
1366 {
1367 int nheld;
1368
1369 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1370 #ifdef DDB
1371 KASSERT(td == curthread || db_active,
1372 ("%s: td != curthread and we aren't in the debugger", __func__));
1373 if (!db_active && witness_dead)
1374 return (0);
1375 #else
1376 KASSERT(td == curthread, ("%s: p != curthread", __func__));
1377 if (witness_dead)
1378 return (0);
1379 #endif
1380 nheld = witness_list_locks(&td->td_sleeplocks);
1381
1382 /*
1383 * We only handle spinlocks if td == curthread. This is somewhat broken
1384 * if td is currently executing on some other CPU and holds spin locks
1385 * as we won't display those locks. If we had a MI way of getting
1386 * the per-cpu data for a given cpu then we could use
1387 * td->td_kse->ke_oncpu to get the list of spinlocks for this thread
1388 * and "fix" this.
1389 *
1390 * That still wouldn't really fix this unless we locked sched_lock
1391 * or stopped the other CPU to make sure it wasn't changing the list
1392 * out from under us. It is probably best to just not try to handle
1393 * threads on other CPU's for now.
1394 */
1395 if (td == curthread && PCPU_GET(spinlocks) != NULL)
1396 nheld += witness_list_locks(PCPU_PTR(spinlocks));
1397
1398 return (nheld);
1399 }
1400
1401 void
1402 witness_save(struct lock_object *lock, const char **filep, int *linep)
1403 {
1404 struct lock_instance *instance;
1405
1406 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1407 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL)
1408 return;
1409 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1410 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1411 lock->lo_class->lc_name, lock->lo_name);
1412 instance = find_instance(curthread->td_sleeplocks, lock);
1413 if (instance == NULL)
1414 panic("%s: lock (%s) %s not locked", __func__,
1415 lock->lo_class->lc_name, lock->lo_name);
1416 *filep = instance->li_file;
1417 *linep = instance->li_line;
1418 }
1419
1420 void
1421 witness_restore(struct lock_object *lock, const char *file, int line)
1422 {
1423 struct lock_instance *instance;
1424
1425 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1426 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL)
1427 return;
1428 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1429 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1430 lock->lo_class->lc_name, lock->lo_name);
1431 instance = find_instance(curthread->td_sleeplocks, lock);
1432 if (instance == NULL)
1433 panic("%s: lock (%s) %s not locked", __func__,
1434 lock->lo_class->lc_name, lock->lo_name);
1435 lock->lo_witness->w_file = file;
1436 lock->lo_witness->w_line = line;
1437 instance->li_file = file;
1438 instance->li_line = line;
1439 }
1440
1441 void
1442 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1443 {
1444 #ifdef INVARIANT_SUPPORT
1445 struct lock_instance *instance;
1446
1447 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL)
1448 return;
1449 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
1450 instance = find_instance(curthread->td_sleeplocks, lock);
1451 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
1452 instance = find_instance(PCPU_GET(spinlocks), lock);
1453 else {
1454 panic("Lock (%s) %s is not sleep or spin!",
1455 lock->lo_class->lc_name, lock->lo_name);
1456 return;
1457 }
1458 switch (flags) {
1459 case LA_UNLOCKED:
1460 if (instance != NULL)
1461 panic("Lock (%s) %s locked @ %s:%d.",
1462 lock->lo_class->lc_name, lock->lo_name, file, line);
1463 break;
1464 case LA_LOCKED:
1465 case LA_LOCKED | LA_RECURSED:
1466 case LA_LOCKED | LA_NOTRECURSED:
1467 case LA_SLOCKED:
1468 case LA_SLOCKED | LA_RECURSED:
1469 case LA_SLOCKED | LA_NOTRECURSED:
1470 case LA_XLOCKED:
1471 case LA_XLOCKED | LA_RECURSED:
1472 case LA_XLOCKED | LA_NOTRECURSED:
1473 if (instance == NULL) {
1474 panic("Lock (%s) %s not locked @ %s:%d.",
1475 lock->lo_class->lc_name, lock->lo_name, file, line);
1476 break;
1477 }
1478 if ((flags & LA_XLOCKED) != 0 &&
1479 (instance->li_flags & LI_EXCLUSIVE) == 0)
1480 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1481 lock->lo_class->lc_name, lock->lo_name, file, line);
1482 if ((flags & LA_SLOCKED) != 0 &&
1483 (instance->li_flags & LI_EXCLUSIVE) != 0)
1484 panic("Lock (%s) %s exclusively locked @ %s:%d.",
1485 lock->lo_class->lc_name, lock->lo_name, file, line);
1486 if ((flags & LA_RECURSED) != 0 &&
1487 (instance->li_flags & LI_RECURSEMASK) == 0)
1488 panic("Lock (%s) %s not recursed @ %s:%d.",
1489 lock->lo_class->lc_name, lock->lo_name, file, line);
1490 if ((flags & LA_NOTRECURSED) != 0 &&
1491 (instance->li_flags & LI_RECURSEMASK) != 0)
1492 panic("Lock (%s) %s recursed @ %s:%d.",
1493 lock->lo_class->lc_name, lock->lo_name, file, line);
1494 break;
1495 default:
1496 panic("Invalid lock assertion at %s:%d.", file, line);
1497
1498 }
1499 #endif /* INVARIANT_SUPPORT */
1500 }
1501
1502 #ifdef DDB
1503
1504 DB_SHOW_COMMAND(locks, db_witness_list)
1505 {
1506 struct thread *td;
1507 pid_t pid;
1508 struct proc *p;
1509
1510 if (have_addr) {
1511 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1512 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1513 ((addr >> 16) % 16) * 10000;
1514 /* sx_slock(&allproc_lock); */
1515 FOREACH_PROC_IN_SYSTEM(p) {
1516 if (p->p_pid == pid)
1517 break;
1518 }
1519 /* sx_sunlock(&allproc_lock); */
1520 if (p == NULL) {
1521 db_printf("pid %d not found\n", pid);
1522 return;
1523 }
1524 FOREACH_THREAD_IN_PROC(p, td) {
1525 witness_list(td);
1526 }
1527 } else {
1528 td = curthread;
1529 witness_list(td);
1530 }
1531 }
1532
1533 DB_SHOW_COMMAND(witness, db_witness_display)
1534 {
1535
1536 witness_display(db_printf);
1537 }
1538 #endif
Cache object: baffaa72bdbd5d4fa14235cb683f4982
|