FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_clock.c
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/8.2/sys/kern/kern_clock.c 215044 2010-11-09 18:46:09Z jhb $");
39
40 #include "opt_kdb.h"
41 #include "opt_device_polling.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_ntp.h"
44 #include "opt_watchdog.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/callout.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/resource.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/sleepqueue.h>
61 #include <sys/smp.h>
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <sys/sysctl.h>
66 #include <sys/bus.h>
67 #include <sys/interrupt.h>
68 #include <sys/limits.h>
69 #include <sys/timetc.h>
70
71 #ifdef GPROF
72 #include <sys/gmon.h>
73 #endif
74
75 #ifdef HWPMC_HOOKS
76 #include <sys/pmckern.h>
77 #endif
78
79 #ifdef DEVICE_POLLING
80 extern void hardclock_device_poll(void);
81 #endif /* DEVICE_POLLING */
82
83 static void initclocks(void *dummy);
84 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
85
86 /* Spin-lock protecting profiling statistics. */
87 static struct mtx time_lock;
88
89 static int
90 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
91 {
92 int error;
93 long cp_time[CPUSTATES];
94 #ifdef SCTL_MASK32
95 int i;
96 unsigned int cp_time32[CPUSTATES];
97 #endif
98
99 read_cpu_time(cp_time);
100 #ifdef SCTL_MASK32
101 if (req->flags & SCTL_MASK32) {
102 if (!req->oldptr)
103 return SYSCTL_OUT(req, 0, sizeof(cp_time32));
104 for (i = 0; i < CPUSTATES; i++)
105 cp_time32[i] = (unsigned int)cp_time[i];
106 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
107 } else
108 #endif
109 {
110 if (!req->oldptr)
111 return SYSCTL_OUT(req, 0, sizeof(cp_time));
112 error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
113 }
114 return error;
115 }
116
117 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
118 0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
119
120 static long empty[CPUSTATES];
121
122 static int
123 sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
124 {
125 struct pcpu *pcpu;
126 int error;
127 int c;
128 long *cp_time;
129 #ifdef SCTL_MASK32
130 unsigned int cp_time32[CPUSTATES];
131 int i;
132 #endif
133
134 if (!req->oldptr) {
135 #ifdef SCTL_MASK32
136 if (req->flags & SCTL_MASK32)
137 return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
138 else
139 #endif
140 return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
141 }
142 for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
143 if (!CPU_ABSENT(c)) {
144 pcpu = pcpu_find(c);
145 cp_time = pcpu->pc_cp_time;
146 } else {
147 cp_time = empty;
148 }
149 #ifdef SCTL_MASK32
150 if (req->flags & SCTL_MASK32) {
151 for (i = 0; i < CPUSTATES; i++)
152 cp_time32[i] = (unsigned int)cp_time[i];
153 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
154 } else
155 #endif
156 error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
157 }
158 return error;
159 }
160
161 SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
162 0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
163
164 #ifdef DEADLKRES
165 static const char *blessed[] = {
166 "getblk",
167 "so_snd_sx",
168 "so_rcv_sx",
169 NULL
170 };
171 static int slptime_threshold = 1800;
172 static int blktime_threshold = 900;
173 static int sleepfreq = 3;
174
175 static void
176 deadlkres(void)
177 {
178 struct proc *p;
179 struct thread *td;
180 void *wchan;
181 int blkticks, i, slpticks, slptype, tryl, tticks;
182
183 tryl = 0;
184 for (;;) {
185 blkticks = blktime_threshold * hz;
186 slpticks = slptime_threshold * hz;
187
188 /*
189 * Avoid to sleep on the sx_lock in order to avoid a possible
190 * priority inversion problem leading to starvation.
191 * If the lock can't be held after 100 tries, panic.
192 */
193 if (!sx_try_slock(&allproc_lock)) {
194 if (tryl > 100)
195 panic("%s: possible deadlock detected on allproc_lock\n",
196 __func__);
197 tryl++;
198 pause("allproc", sleepfreq * hz);
199 continue;
200 }
201 tryl = 0;
202 FOREACH_PROC_IN_SYSTEM(p) {
203 PROC_LOCK(p);
204 FOREACH_THREAD_IN_PROC(p, td) {
205
206 /*
207 * Once a thread is found in "interesting"
208 * state a possible ticks wrap-up needs to be
209 * checked.
210 */
211 thread_lock(td);
212 if (TD_ON_LOCK(td) && ticks < td->td_blktick) {
213
214 /*
215 * The thread should be blocked on a
216 * turnstile, simply check if the
217 * turnstile channel is in good state.
218 */
219 MPASS(td->td_blocked != NULL);
220
221 tticks = ticks - td->td_blktick;
222 thread_unlock(td);
223 if (tticks > blkticks) {
224
225 /*
226 * Accordingly with provided
227 * thresholds, this thread is
228 * stuck for too long on a
229 * turnstile.
230 */
231 PROC_UNLOCK(p);
232 sx_sunlock(&allproc_lock);
233 panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
234 __func__, td, tticks);
235 }
236 } else if (TD_IS_SLEEPING(td) &&
237 TD_ON_SLEEPQ(td) &&
238 ticks < td->td_blktick) {
239
240 /*
241 * Check if the thread is sleeping on a
242 * lock, otherwise skip the check.
243 * Drop the thread lock in order to
244 * avoid a LOR with the sleepqueue
245 * spinlock.
246 */
247 wchan = td->td_wchan;
248 tticks = ticks - td->td_slptick;
249 thread_unlock(td);
250 slptype = sleepq_type(wchan);
251 if ((slptype == SLEEPQ_SX ||
252 slptype == SLEEPQ_LK) &&
253 tticks > slpticks) {
254
255 /*
256 * Accordingly with provided
257 * thresholds, this thread is
258 * stuck for too long on a
259 * sleepqueue.
260 * However, being on a
261 * sleepqueue, we might still
262 * check for the blessed
263 * list.
264 */
265 tryl = 0;
266 for (i = 0; blessed[i] != NULL;
267 i++) {
268 if (!strcmp(blessed[i],
269 td->td_wmesg)) {
270 tryl = 1;
271 break;
272 }
273 }
274 if (tryl != 0) {
275 tryl = 0;
276 continue;
277 }
278 PROC_UNLOCK(p);
279 sx_sunlock(&allproc_lock);
280 panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
281 __func__, td, tticks);
282 }
283 } else
284 thread_unlock(td);
285 }
286 PROC_UNLOCK(p);
287 }
288 sx_sunlock(&allproc_lock);
289
290 /* Sleep for sleepfreq seconds. */
291 pause("-", sleepfreq * hz);
292 }
293 }
294
295 static struct kthread_desc deadlkres_kd = {
296 "deadlkres",
297 deadlkres,
298 (struct thread **)NULL
299 };
300
301 SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
302
303 SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW, 0, "Deadlock resolver");
304 SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
305 &slptime_threshold, 0,
306 "Number of seconds within is valid to sleep on a sleepqueue");
307 SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
308 &blktime_threshold, 0,
309 "Number of seconds within is valid to block on a turnstile");
310 SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
311 "Number of seconds between any deadlock resolver thread run");
312 #endif /* DEADLKRES */
313
314 void
315 read_cpu_time(long *cp_time)
316 {
317 struct pcpu *pc;
318 int i, j;
319
320 /* Sum up global cp_time[]. */
321 bzero(cp_time, sizeof(long) * CPUSTATES);
322 for (i = 0; i <= mp_maxid; i++) {
323 if (CPU_ABSENT(i))
324 continue;
325 pc = pcpu_find(i);
326 for (j = 0; j < CPUSTATES; j++)
327 cp_time[j] += pc->pc_cp_time[j];
328 }
329 }
330
331 #ifdef SW_WATCHDOG
332 #include <sys/watchdog.h>
333
334 static int watchdog_ticks;
335 static int watchdog_enabled;
336 static void watchdog_fire(void);
337 static void watchdog_config(void *, u_int, int *);
338 #endif /* SW_WATCHDOG */
339
340 /*
341 * Clock handling routines.
342 *
343 * This code is written to operate with two timers that run independently of
344 * each other.
345 *
346 * The main timer, running hz times per second, is used to trigger interval
347 * timers, timeouts and rescheduling as needed.
348 *
349 * The second timer handles kernel and user profiling,
350 * and does resource use estimation. If the second timer is programmable,
351 * it is randomized to avoid aliasing between the two clocks. For example,
352 * the randomization prevents an adversary from always giving up the cpu
353 * just before its quantum expires. Otherwise, it would never accumulate
354 * cpu ticks. The mean frequency of the second timer is stathz.
355 *
356 * If no second timer exists, stathz will be zero; in this case we drive
357 * profiling and statistics off the main clock. This WILL NOT be accurate;
358 * do not do it unless absolutely necessary.
359 *
360 * The statistics clock may (or may not) be run at a higher rate while
361 * profiling. This profile clock runs at profhz. We require that profhz
362 * be an integral multiple of stathz.
363 *
364 * If the statistics clock is running fast, it must be divided by the ratio
365 * profhz/stathz for statistics. (For profiling, every tick counts.)
366 *
367 * Time-of-day is maintained using a "timecounter", which may or may
368 * not be related to the hardware generating the above mentioned
369 * interrupts.
370 */
371
372 int stathz;
373 int profhz;
374 int profprocs;
375 int ticks;
376 int psratio;
377
378 /*
379 * Initialize clock frequencies and start both clocks running.
380 */
381 /* ARGSUSED*/
382 static void
383 initclocks(dummy)
384 void *dummy;
385 {
386 register int i;
387
388 /*
389 * Set divisors to 1 (normal case) and let the machine-specific
390 * code do its bit.
391 */
392 mtx_init(&time_lock, "time lock", NULL, MTX_SPIN);
393 cpu_initclocks();
394
395 /*
396 * Compute profhz/stathz, and fix profhz if needed.
397 */
398 i = stathz ? stathz : hz;
399 if (profhz == 0)
400 profhz = i;
401 psratio = profhz / i;
402 #ifdef SW_WATCHDOG
403 EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
404 #endif
405 }
406
407 /*
408 * Each time the real-time timer fires, this function is called on all CPUs.
409 * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
410 * the other CPUs in the system need to call this function.
411 */
412 void
413 hardclock_cpu(int usermode)
414 {
415 struct pstats *pstats;
416 struct thread *td = curthread;
417 struct proc *p = td->td_proc;
418 int flags;
419
420 /*
421 * Run current process's virtual and profile time, as needed.
422 */
423 pstats = p->p_stats;
424 flags = 0;
425 if (usermode &&
426 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
427 PROC_SLOCK(p);
428 if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
429 flags |= TDF_ALRMPEND | TDF_ASTPENDING;
430 PROC_SUNLOCK(p);
431 }
432 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
433 PROC_SLOCK(p);
434 if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
435 flags |= TDF_PROFPEND | TDF_ASTPENDING;
436 PROC_SUNLOCK(p);
437 }
438 thread_lock(td);
439 sched_tick();
440 td->td_flags |= flags;
441 thread_unlock(td);
442
443 #ifdef HWPMC_HOOKS
444 if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
445 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
446 #endif
447 callout_tick();
448 }
449
450 /*
451 * The real-time timer, interrupting hz times per second.
452 */
453 void
454 hardclock(int usermode, uintfptr_t pc)
455 {
456
457 atomic_add_int((volatile int *)&ticks, 1);
458 hardclock_cpu(usermode);
459 tc_ticktock();
460 /*
461 * If no separate statistics clock is available, run it from here.
462 *
463 * XXX: this only works for UP
464 */
465 if (stathz == 0) {
466 profclock(usermode, pc);
467 statclock(usermode);
468 }
469 #ifdef DEVICE_POLLING
470 hardclock_device_poll(); /* this is very short and quick */
471 #endif /* DEVICE_POLLING */
472 #ifdef SW_WATCHDOG
473 if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
474 watchdog_fire();
475 #endif /* SW_WATCHDOG */
476 }
477
478 /*
479 * Compute number of ticks in the specified amount of time.
480 */
481 int
482 tvtohz(tv)
483 struct timeval *tv;
484 {
485 register unsigned long ticks;
486 register long sec, usec;
487
488 /*
489 * If the number of usecs in the whole seconds part of the time
490 * difference fits in a long, then the total number of usecs will
491 * fit in an unsigned long. Compute the total and convert it to
492 * ticks, rounding up and adding 1 to allow for the current tick
493 * to expire. Rounding also depends on unsigned long arithmetic
494 * to avoid overflow.
495 *
496 * Otherwise, if the number of ticks in the whole seconds part of
497 * the time difference fits in a long, then convert the parts to
498 * ticks separately and add, using similar rounding methods and
499 * overflow avoidance. This method would work in the previous
500 * case but it is slightly slower and assumes that hz is integral.
501 *
502 * Otherwise, round the time difference down to the maximum
503 * representable value.
504 *
505 * If ints have 32 bits, then the maximum value for any timeout in
506 * 10ms ticks is 248 days.
507 */
508 sec = tv->tv_sec;
509 usec = tv->tv_usec;
510 if (usec < 0) {
511 sec--;
512 usec += 1000000;
513 }
514 if (sec < 0) {
515 #ifdef DIAGNOSTIC
516 if (usec > 0) {
517 sec++;
518 usec -= 1000000;
519 }
520 printf("tvotohz: negative time difference %ld sec %ld usec\n",
521 sec, usec);
522 #endif
523 ticks = 1;
524 } else if (sec <= LONG_MAX / 1000000)
525 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
526 / tick + 1;
527 else if (sec <= LONG_MAX / hz)
528 ticks = sec * hz
529 + ((unsigned long)usec + (tick - 1)) / tick + 1;
530 else
531 ticks = LONG_MAX;
532 if (ticks > INT_MAX)
533 ticks = INT_MAX;
534 return ((int)ticks);
535 }
536
537 /*
538 * Start profiling on a process.
539 *
540 * Kernel profiling passes proc0 which never exits and hence
541 * keeps the profile clock running constantly.
542 */
543 void
544 startprofclock(p)
545 register struct proc *p;
546 {
547
548 PROC_LOCK_ASSERT(p, MA_OWNED);
549 if (p->p_flag & P_STOPPROF)
550 return;
551 if ((p->p_flag & P_PROFIL) == 0) {
552 p->p_flag |= P_PROFIL;
553 mtx_lock_spin(&time_lock);
554 if (++profprocs == 1)
555 cpu_startprofclock();
556 mtx_unlock_spin(&time_lock);
557 }
558 }
559
560 /*
561 * Stop profiling on a process.
562 */
563 void
564 stopprofclock(p)
565 register struct proc *p;
566 {
567
568 PROC_LOCK_ASSERT(p, MA_OWNED);
569 if (p->p_flag & P_PROFIL) {
570 if (p->p_profthreads != 0) {
571 p->p_flag |= P_STOPPROF;
572 while (p->p_profthreads != 0)
573 msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
574 "stopprof", 0);
575 p->p_flag &= ~P_STOPPROF;
576 }
577 if ((p->p_flag & P_PROFIL) == 0)
578 return;
579 p->p_flag &= ~P_PROFIL;
580 mtx_lock_spin(&time_lock);
581 if (--profprocs == 0)
582 cpu_stopprofclock();
583 mtx_unlock_spin(&time_lock);
584 }
585 }
586
587 /*
588 * Statistics clock. Updates rusage information and calls the scheduler
589 * to adjust priorities of the active thread.
590 *
591 * This should be called by all active processors.
592 */
593 void
594 statclock(int usermode)
595 {
596 struct rusage *ru;
597 struct vmspace *vm;
598 struct thread *td;
599 struct proc *p;
600 long rss;
601 long *cp_time;
602
603 td = curthread;
604 p = td->td_proc;
605
606 cp_time = (long *)PCPU_PTR(cp_time);
607 if (usermode) {
608 /*
609 * Charge the time as appropriate.
610 */
611 td->td_uticks++;
612 if (p->p_nice > NZERO)
613 cp_time[CP_NICE]++;
614 else
615 cp_time[CP_USER]++;
616 } else {
617 /*
618 * Came from kernel mode, so we were:
619 * - handling an interrupt,
620 * - doing syscall or trap work on behalf of the current
621 * user process, or
622 * - spinning in the idle loop.
623 * Whichever it is, charge the time as appropriate.
624 * Note that we charge interrupts to the current process,
625 * regardless of whether they are ``for'' that process,
626 * so that we know how much of its real time was spent
627 * in ``non-process'' (i.e., interrupt) work.
628 */
629 if ((td->td_pflags & TDP_ITHREAD) ||
630 td->td_intr_nesting_level >= 2) {
631 td->td_iticks++;
632 cp_time[CP_INTR]++;
633 } else {
634 td->td_pticks++;
635 td->td_sticks++;
636 if (!TD_IS_IDLETHREAD(td))
637 cp_time[CP_SYS]++;
638 else
639 cp_time[CP_IDLE]++;
640 }
641 }
642
643 /* Update resource usage integrals and maximums. */
644 MPASS(p->p_vmspace != NULL);
645 vm = p->p_vmspace;
646 ru = &td->td_ru;
647 ru->ru_ixrss += pgtok(vm->vm_tsize);
648 ru->ru_idrss += pgtok(vm->vm_dsize);
649 ru->ru_isrss += pgtok(vm->vm_ssize);
650 rss = pgtok(vmspace_resident_count(vm));
651 if (ru->ru_maxrss < rss)
652 ru->ru_maxrss = rss;
653 KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
654 "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
655 thread_lock_flags(td, MTX_QUIET);
656 sched_clock(td);
657 thread_unlock(td);
658 }
659
660 void
661 profclock(int usermode, uintfptr_t pc)
662 {
663 struct thread *td;
664 #ifdef GPROF
665 struct gmonparam *g;
666 uintfptr_t i;
667 #endif
668
669 td = curthread;
670 if (usermode) {
671 /*
672 * Came from user mode; CPU was in user state.
673 * If this process is being profiled, record the tick.
674 * if there is no related user location yet, don't
675 * bother trying to count it.
676 */
677 if (td->td_proc->p_flag & P_PROFIL)
678 addupc_intr(td, pc, 1);
679 }
680 #ifdef GPROF
681 else {
682 /*
683 * Kernel statistics are just like addupc_intr, only easier.
684 */
685 g = &_gmonparam;
686 if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
687 i = PC_TO_I(g, pc);
688 if (i < g->textsize) {
689 KCOUNT(g, i)++;
690 }
691 }
692 }
693 #endif
694 }
695
696 /*
697 * Return information about system clocks.
698 */
699 static int
700 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
701 {
702 struct clockinfo clkinfo;
703 /*
704 * Construct clockinfo structure.
705 */
706 bzero(&clkinfo, sizeof(clkinfo));
707 clkinfo.hz = hz;
708 clkinfo.tick = tick;
709 clkinfo.profhz = profhz;
710 clkinfo.stathz = stathz ? stathz : hz;
711 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
712 }
713
714 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
715 CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
716 0, 0, sysctl_kern_clockrate, "S,clockinfo",
717 "Rate and period of various kernel clocks");
718
719 #ifdef SW_WATCHDOG
720
721 static void
722 watchdog_config(void *unused __unused, u_int cmd, int *error)
723 {
724 u_int u;
725
726 u = cmd & WD_INTERVAL;
727 if (u >= WD_TO_1SEC) {
728 watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
729 watchdog_enabled = 1;
730 *error = 0;
731 } else {
732 watchdog_enabled = 0;
733 }
734 }
735
736 /*
737 * Handle a watchdog timeout by dumping interrupt information and
738 * then either dropping to DDB or panicking.
739 */
740 static void
741 watchdog_fire(void)
742 {
743 int nintr;
744 u_int64_t inttotal;
745 u_long *curintr;
746 char *curname;
747
748 curintr = intrcnt;
749 curname = intrnames;
750 inttotal = 0;
751 nintr = eintrcnt - intrcnt;
752
753 printf("interrupt total\n");
754 while (--nintr >= 0) {
755 if (*curintr)
756 printf("%-12s %20lu\n", curname, *curintr);
757 curname += strlen(curname) + 1;
758 inttotal += *curintr++;
759 }
760 printf("Total %20ju\n", (uintmax_t)inttotal);
761
762 #if defined(KDB) && !defined(KDB_UNATTENDED)
763 kdb_backtrace();
764 kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
765 #else
766 panic("watchdog timeout");
767 #endif
768 }
769
770 #endif /* SW_WATCHDOG */
Cache object: 03d2ec03bce2bd5a82c2f7d0f327a6bc
|