1 /*-
2 * Copyright (c) 2004 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/6.4/sys/i386/i386/mp_watchdog.c 142723 2005-02-27 22:34:07Z pjd $
27 */
28
29 #include "opt_mp_watchdog.h"
30 #include "opt_sched.h"
31
32 #ifdef SCHED_ULE
33 #error MP_WATCHDOG cannot currently be used with SCHED_ULE
34 #endif
35
36 #include <sys/param.h>
37 #include <sys/kdb.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/pcpu.h>
42 #include <sys/proc.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
45
46 #include <machine/smp.h>
47 #include <machine/apicreg.h>
48 #include <machine/apicvar.h>
49 #include <machine/mp_watchdog.h>
50
51 /*
52 * mp_watchdog hijacks the idle thread on a specified CPU, prevents new work
53 * from being scheduled there, and uses it as a "watchdog" to detect kernel
54 * failure on other CPUs. This is made reasonable by inclusion of logical
55 * processors in Xeon hardware. The watchdog is configured by setting the
56 * debug.watchdog sysctl/tunable to the CPU of interest. A callout will then
57 * begin executing reseting a timer that is gradually lowered by the watching
58 * thread. If the timer reaches 0, the watchdog fires by ether dropping
59 * directly to the debugger, or by sending an NMI IPI to the boot processor.
60 * This is a somewhat less efficient substitute for dedicated watchdog
61 * hardware, but can be quite an effective tool for debugging hangs.
62 *
63 * XXXRW: This should really use the watchdog(9)/watchdog(4) framework, but
64 * doesn't yet.
65 */
66 static int watchdog_cpu = -1;
67 static int watchdog_dontfire = 1;
68 static int watchdog_timer = -1;
69 static int watchdog_nmi = 1;
70
71 TUNABLE_INT("debug.watchdog", &watchdog_cpu);
72 SYSCTL_INT(_debug, OID_AUTO, watchdog_nmi, CTLFLAG_RW, &watchdog_nmi, 0,
73 "IPI the boot processor with an NMI to enter the debugger");
74
75 static struct callout watchdog_callout;
76
77 static void watchdog_change(int wdcpu);
78
79 /*
80 * Number of seconds before the watchdog will fire if the callout fails to
81 * reset the timer.
82 */
83 #define WATCHDOG_THRESHOLD 10
84
85 static void
86 watchdog_init(void *arg)
87 {
88
89 callout_init(&watchdog_callout, CALLOUT_MPSAFE);
90 if (watchdog_cpu != -1)
91 watchdog_change(watchdog_cpu);
92 }
93
94 /*
95 * This callout resets a timer until the watchdog kicks in. It acquires some
96 * critical locks to make sure things haven't gotten wedged with hose locks
97 * held.
98 */
99 static void
100 watchdog_function(void *arg)
101 {
102
103 /*
104 * Since the timer ran, we must not be wedged. Acquire some critical
105 * locks to make sure. Then reset the timer.
106 */
107 mtx_lock(&Giant);
108 mtx_lock_spin(&sched_lock);
109 watchdog_timer = WATCHDOG_THRESHOLD;
110 mtx_unlock_spin(&sched_lock);
111 mtx_unlock(&Giant);
112 callout_reset(&watchdog_callout, 1 * hz, watchdog_function, NULL);
113 }
114 SYSINIT(watchdog_init, SI_SUB_DRIVERS, SI_ORDER_ANY, watchdog_init, NULL);
115
116 static void
117 watchdog_change(int wdcpu)
118 {
119
120 if (wdcpu == -1 || wdcpu == 0xffffffff) {
121 /*
122 * Disable the watchdog.
123 */
124 watchdog_cpu = -1;
125 watchdog_dontfire = 1;
126 callout_stop(&watchdog_callout);
127 printf("watchdog stopped\n");
128 } else {
129 watchdog_timer = WATCHDOG_THRESHOLD;
130 watchdog_dontfire = 0;
131 watchdog_cpu = wdcpu;
132 callout_reset(&watchdog_callout, 1 * hz, watchdog_function,
133 NULL);
134 }
135 }
136
137 /*
138 * This sysctl sets which CPU is the watchdog CPU. Set to -1 or 0xffffffff
139 * to disable the watchdog.
140 */
141 static int
142 sysctl_watchdog(SYSCTL_HANDLER_ARGS)
143 {
144 int error, temp;
145
146 temp = watchdog_cpu;
147 error = sysctl_handle_int(oidp, &temp, 0, req);
148 if (error)
149 return (error);
150
151 if (req->newptr != NULL)
152 watchdog_change(temp);
153 return (0);
154 }
155 SYSCTL_PROC(_debug, OID_AUTO, watchdog, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
156 sysctl_watchdog, "I", "");
157
158 /*
159 * A badly behaved sysctl that leaks the sched lock when written to. Then
160 * spin holding it just to make matters worse. This can be used to test the
161 * effectiveness of the watchdog by generating a fairly hard and nast hang.
162 * Note that Giant is also held in the current world order when we get here.
163 */
164 static int
165 sysctl_leak_schedlock(SYSCTL_HANDLER_ARGS)
166 {
167 int error, temp;
168
169 temp = 0;
170 error = sysctl_handle_int(oidp, &temp, 0, req);
171 if (error)
172 return (error);
173
174 if (req->newptr != NULL) {
175 if (temp) {
176 printf("Leaking the sched lock...\n");
177 mtx_lock_spin(&sched_lock);
178 while (1);
179 }
180 }
181 return (0);
182 }
183 SYSCTL_PROC(_debug, OID_AUTO, leak_schedlock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
184 sysctl_leak_schedlock, "IU", "");
185
186 /*
187 * Drop into the debugger by sending an IPI NMI to the boot processor.
188 */
189 static void
190 watchdog_ipi_nmi(void)
191 {
192
193 /*
194 * Deliver NMI to the boot processor. Why not?
195 */
196 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
197 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_NMI,
198 boot_cpu_id);
199 lapic_ipi_wait(-1);
200 }
201
202 /*
203 * ap_watchdog() is called by the SMP idle loop code. It works on the same
204 * premise that the disabling of logical processors does: that if the cpu is
205 * idle, then it can ignore the world from then on, as nothing will be
206 * scheduled on it. Leaving aside multi-runqueue schedulers (SCHED_ULE) and
207 * explicit process migration (sched_bind()), this is not an unreasonable
208 * assumption.
209 */
210 void
211 ap_watchdog(u_int cpuid)
212 {
213 char old_pcomm[MAXCOMLEN + 1];
214 struct proc *p;
215
216 if (watchdog_cpu != cpuid)
217 return;
218
219 printf("watchdog started on cpu %d\n", cpuid);
220 p = curproc;
221 bcopy(p->p_comm, old_pcomm, MAXCOMLEN + 1);
222 snprintf(p->p_comm, MAXCOMLEN + 1, "mp_watchdog cpu %d", cpuid);
223 while (1) {
224 DELAY(1000000); /* One second. */
225 if (watchdog_cpu != cpuid)
226 break;
227 atomic_subtract_int(&watchdog_timer, 1);
228 if (watchdog_timer < 4)
229 printf("Watchdog timer: %d\n", watchdog_timer);
230 if (watchdog_timer == 0 && watchdog_dontfire == 0) {
231 printf("Watchdog firing!\n");
232 watchdog_dontfire = 1;
233 if (watchdog_nmi)
234 watchdog_ipi_nmi();
235 else
236 kdb_enter("mp_watchdog");
237 }
238 }
239 bcopy(old_pcomm, p->p_comm, MAXCOMLEN + 1);
240 printf("watchdog stopped on cpu %d\n", cpuid);
241 }
Cache object: d927fcebc7c649f4be98788694d12fc7
|