FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_clock.c
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/8.1/sys/kern/kern_clock.c 207138 2010-04-24 00:53:41Z attilio $");
39
40 #include "opt_kdb.h"
41 #include "opt_device_polling.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_ntp.h"
44 #include "opt_watchdog.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/callout.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/resource.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/sleepqueue.h>
61 #include <sys/smp.h>
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <sys/sysctl.h>
66 #include <sys/bus.h>
67 #include <sys/interrupt.h>
68 #include <sys/limits.h>
69 #include <sys/timetc.h>
70
71 #ifdef GPROF
72 #include <sys/gmon.h>
73 #endif
74
75 #ifdef HWPMC_HOOKS
76 #include <sys/pmckern.h>
77 #endif
78
79 #ifdef DEVICE_POLLING
80 extern void hardclock_device_poll(void);
81 #endif /* DEVICE_POLLING */
82
83 static void initclocks(void *dummy);
84 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
85
86 /* Spin-lock protecting profiling statistics. */
87 static struct mtx time_lock;
88
89 static int
90 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
91 {
92 int error;
93 long cp_time[CPUSTATES];
94 #ifdef SCTL_MASK32
95 int i;
96 unsigned int cp_time32[CPUSTATES];
97 #endif
98
99 read_cpu_time(cp_time);
100 #ifdef SCTL_MASK32
101 if (req->flags & SCTL_MASK32) {
102 if (!req->oldptr)
103 return SYSCTL_OUT(req, 0, sizeof(cp_time32));
104 for (i = 0; i < CPUSTATES; i++)
105 cp_time32[i] = (unsigned int)cp_time[i];
106 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
107 } else
108 #endif
109 {
110 if (!req->oldptr)
111 return SYSCTL_OUT(req, 0, sizeof(cp_time));
112 error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
113 }
114 return error;
115 }
116
117 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
118 0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
119
120 static long empty[CPUSTATES];
121
122 static int
123 sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
124 {
125 struct pcpu *pcpu;
126 int error;
127 int c;
128 long *cp_time;
129 #ifdef SCTL_MASK32
130 unsigned int cp_time32[CPUSTATES];
131 int i;
132 #endif
133
134 if (!req->oldptr) {
135 #ifdef SCTL_MASK32
136 if (req->flags & SCTL_MASK32)
137 return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
138 else
139 #endif
140 return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
141 }
142 for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
143 if (!CPU_ABSENT(c)) {
144 pcpu = pcpu_find(c);
145 cp_time = pcpu->pc_cp_time;
146 } else {
147 cp_time = empty;
148 }
149 #ifdef SCTL_MASK32
150 if (req->flags & SCTL_MASK32) {
151 for (i = 0; i < CPUSTATES; i++)
152 cp_time32[i] = (unsigned int)cp_time[i];
153 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
154 } else
155 #endif
156 error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
157 }
158 return error;
159 }
160
161 SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
162 0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
163
164 #ifdef DEADLKRES
165 static const char *blessed[] = {
166 "getblk",
167 "so_snd_sx",
168 "so_rcv_sx",
169 NULL
170 };
171 static int slptime_threshold = 1800;
172 static int blktime_threshold = 900;
173 static int sleepfreq = 3;
174
175 static void
176 deadlkres(void)
177 {
178 struct proc *p;
179 struct thread *td;
180 void *wchan;
181 int blkticks, i, slpticks, slptype, tryl, tticks;
182
183 tryl = 0;
184 for (;;) {
185 blkticks = blktime_threshold * hz;
186 slpticks = slptime_threshold * hz;
187
188 /*
189 * Avoid to sleep on the sx_lock in order to avoid a possible
190 * priority inversion problem leading to starvation.
191 * If the lock can't be held after 100 tries, panic.
192 */
193 if (!sx_try_slock(&allproc_lock)) {
194 if (tryl > 100)
195 panic("%s: possible deadlock detected on allproc_lock\n",
196 __func__);
197 tryl++;
198 pause("allproc_lock deadlkres", sleepfreq * hz);
199 continue;
200 }
201 tryl = 0;
202 FOREACH_PROC_IN_SYSTEM(p) {
203 PROC_LOCK(p);
204 FOREACH_THREAD_IN_PROC(p, td) {
205 thread_lock(td);
206 if (TD_ON_LOCK(td)) {
207
208 /*
209 * The thread should be blocked on a
210 * turnstile, simply check if the
211 * turnstile channel is in good state.
212 */
213 MPASS(td->td_blocked != NULL);
214
215 /* Handle ticks wrap-up. */
216 if (ticks < td->td_blktick)
217 continue;
218 tticks = ticks - td->td_blktick;
219 thread_unlock(td);
220 if (tticks > blkticks) {
221
222 /*
223 * Accordingly with provided
224 * thresholds, this thread is
225 * stuck for too long on a
226 * turnstile.
227 */
228 PROC_UNLOCK(p);
229 sx_sunlock(&allproc_lock);
230 panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
231 __func__, td, tticks);
232 }
233 } else if (TD_IS_SLEEPING(td)) {
234
235 /* Handle ticks wrap-up. */
236 if (ticks < td->td_blktick)
237 continue;
238
239 /*
240 * Check if the thread is sleeping on a
241 * lock, otherwise skip the check.
242 * Drop the thread lock in order to
243 * avoid a LOR with the sleepqueue
244 * spinlock.
245 */
246 wchan = td->td_wchan;
247 tticks = ticks - td->td_slptick;
248 thread_unlock(td);
249 slptype = sleepq_type(wchan);
250 if ((slptype == SLEEPQ_SX ||
251 slptype == SLEEPQ_LK) &&
252 tticks > slpticks) {
253
254 /*
255 * Accordingly with provided
256 * thresholds, this thread is
257 * stuck for too long on a
258 * sleepqueue.
259 * However, being on a
260 * sleepqueue, we might still
261 * check for the blessed
262 * list.
263 */
264 tryl = 0;
265 for (i = 0; blessed[i] != NULL;
266 i++) {
267 if (!strcmp(blessed[i],
268 td->td_wmesg)) {
269 tryl = 1;
270 break;
271 }
272 }
273 if (tryl != 0) {
274 tryl = 0;
275 continue;
276 }
277 PROC_UNLOCK(p);
278 sx_sunlock(&allproc_lock);
279 panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
280 __func__, td, tticks);
281 }
282 } else
283 thread_unlock(td);
284 }
285 PROC_UNLOCK(p);
286 }
287 sx_sunlock(&allproc_lock);
288
289 /* Sleep for sleepfreq seconds. */
290 pause("deadlkres", sleepfreq * hz);
291 }
292 }
293
294 static struct kthread_desc deadlkres_kd = {
295 "deadlkres",
296 deadlkres,
297 (struct thread **)NULL
298 };
299
300 SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
301
302 SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW, 0, "Deadlock resolver");
303 SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
304 &slptime_threshold, 0,
305 "Number of seconds within is valid to sleep on a sleepqueue");
306 SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
307 &blktime_threshold, 0,
308 "Number of seconds within is valid to block on a turnstile");
309 SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
310 "Number of seconds between any deadlock resolver thread run");
311 #endif /* DEADLKRES */
312
313 void
314 read_cpu_time(long *cp_time)
315 {
316 struct pcpu *pc;
317 int i, j;
318
319 /* Sum up global cp_time[]. */
320 bzero(cp_time, sizeof(long) * CPUSTATES);
321 for (i = 0; i <= mp_maxid; i++) {
322 if (CPU_ABSENT(i))
323 continue;
324 pc = pcpu_find(i);
325 for (j = 0; j < CPUSTATES; j++)
326 cp_time[j] += pc->pc_cp_time[j];
327 }
328 }
329
330 #ifdef SW_WATCHDOG
331 #include <sys/watchdog.h>
332
333 static int watchdog_ticks;
334 static int watchdog_enabled;
335 static void watchdog_fire(void);
336 static void watchdog_config(void *, u_int, int *);
337 #endif /* SW_WATCHDOG */
338
339 /*
340 * Clock handling routines.
341 *
342 * This code is written to operate with two timers that run independently of
343 * each other.
344 *
345 * The main timer, running hz times per second, is used to trigger interval
346 * timers, timeouts and rescheduling as needed.
347 *
348 * The second timer handles kernel and user profiling,
349 * and does resource use estimation. If the second timer is programmable,
350 * it is randomized to avoid aliasing between the two clocks. For example,
351 * the randomization prevents an adversary from always giving up the cpu
352 * just before its quantum expires. Otherwise, it would never accumulate
353 * cpu ticks. The mean frequency of the second timer is stathz.
354 *
355 * If no second timer exists, stathz will be zero; in this case we drive
356 * profiling and statistics off the main clock. This WILL NOT be accurate;
357 * do not do it unless absolutely necessary.
358 *
359 * The statistics clock may (or may not) be run at a higher rate while
360 * profiling. This profile clock runs at profhz. We require that profhz
361 * be an integral multiple of stathz.
362 *
363 * If the statistics clock is running fast, it must be divided by the ratio
364 * profhz/stathz for statistics. (For profiling, every tick counts.)
365 *
366 * Time-of-day is maintained using a "timecounter", which may or may
367 * not be related to the hardware generating the above mentioned
368 * interrupts.
369 */
370
371 int stathz;
372 int profhz;
373 int profprocs;
374 int ticks;
375 int psratio;
376
377 /*
378 * Initialize clock frequencies and start both clocks running.
379 */
380 /* ARGSUSED*/
381 static void
382 initclocks(dummy)
383 void *dummy;
384 {
385 register int i;
386
387 /*
388 * Set divisors to 1 (normal case) and let the machine-specific
389 * code do its bit.
390 */
391 mtx_init(&time_lock, "time lock", NULL, MTX_SPIN);
392 cpu_initclocks();
393
394 /*
395 * Compute profhz/stathz, and fix profhz if needed.
396 */
397 i = stathz ? stathz : hz;
398 if (profhz == 0)
399 profhz = i;
400 psratio = profhz / i;
401 #ifdef SW_WATCHDOG
402 EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
403 #endif
404 }
405
406 /*
407 * Each time the real-time timer fires, this function is called on all CPUs.
408 * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
409 * the other CPUs in the system need to call this function.
410 */
411 void
412 hardclock_cpu(int usermode)
413 {
414 struct pstats *pstats;
415 struct thread *td = curthread;
416 struct proc *p = td->td_proc;
417 int flags;
418
419 /*
420 * Run current process's virtual and profile time, as needed.
421 */
422 pstats = p->p_stats;
423 flags = 0;
424 if (usermode &&
425 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
426 PROC_SLOCK(p);
427 if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
428 flags |= TDF_ALRMPEND | TDF_ASTPENDING;
429 PROC_SUNLOCK(p);
430 }
431 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
432 PROC_SLOCK(p);
433 if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
434 flags |= TDF_PROFPEND | TDF_ASTPENDING;
435 PROC_SUNLOCK(p);
436 }
437 thread_lock(td);
438 sched_tick();
439 td->td_flags |= flags;
440 thread_unlock(td);
441
442 #ifdef HWPMC_HOOKS
443 if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
444 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
445 #endif
446 callout_tick();
447 }
448
449 /*
450 * The real-time timer, interrupting hz times per second.
451 */
452 void
453 hardclock(int usermode, uintfptr_t pc)
454 {
455
456 atomic_add_int((volatile int *)&ticks, 1);
457 hardclock_cpu(usermode);
458 tc_ticktock();
459 /*
460 * If no separate statistics clock is available, run it from here.
461 *
462 * XXX: this only works for UP
463 */
464 if (stathz == 0) {
465 profclock(usermode, pc);
466 statclock(usermode);
467 }
468 #ifdef DEVICE_POLLING
469 hardclock_device_poll(); /* this is very short and quick */
470 #endif /* DEVICE_POLLING */
471 #ifdef SW_WATCHDOG
472 if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
473 watchdog_fire();
474 #endif /* SW_WATCHDOG */
475 }
476
477 /*
478 * Compute number of ticks in the specified amount of time.
479 */
480 int
481 tvtohz(tv)
482 struct timeval *tv;
483 {
484 register unsigned long ticks;
485 register long sec, usec;
486
487 /*
488 * If the number of usecs in the whole seconds part of the time
489 * difference fits in a long, then the total number of usecs will
490 * fit in an unsigned long. Compute the total and convert it to
491 * ticks, rounding up and adding 1 to allow for the current tick
492 * to expire. Rounding also depends on unsigned long arithmetic
493 * to avoid overflow.
494 *
495 * Otherwise, if the number of ticks in the whole seconds part of
496 * the time difference fits in a long, then convert the parts to
497 * ticks separately and add, using similar rounding methods and
498 * overflow avoidance. This method would work in the previous
499 * case but it is slightly slower and assumes that hz is integral.
500 *
501 * Otherwise, round the time difference down to the maximum
502 * representable value.
503 *
504 * If ints have 32 bits, then the maximum value for any timeout in
505 * 10ms ticks is 248 days.
506 */
507 sec = tv->tv_sec;
508 usec = tv->tv_usec;
509 if (usec < 0) {
510 sec--;
511 usec += 1000000;
512 }
513 if (sec < 0) {
514 #ifdef DIAGNOSTIC
515 if (usec > 0) {
516 sec++;
517 usec -= 1000000;
518 }
519 printf("tvotohz: negative time difference %ld sec %ld usec\n",
520 sec, usec);
521 #endif
522 ticks = 1;
523 } else if (sec <= LONG_MAX / 1000000)
524 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
525 / tick + 1;
526 else if (sec <= LONG_MAX / hz)
527 ticks = sec * hz
528 + ((unsigned long)usec + (tick - 1)) / tick + 1;
529 else
530 ticks = LONG_MAX;
531 if (ticks > INT_MAX)
532 ticks = INT_MAX;
533 return ((int)ticks);
534 }
535
536 /*
537 * Start profiling on a process.
538 *
539 * Kernel profiling passes proc0 which never exits and hence
540 * keeps the profile clock running constantly.
541 */
542 void
543 startprofclock(p)
544 register struct proc *p;
545 {
546
547 PROC_LOCK_ASSERT(p, MA_OWNED);
548 if (p->p_flag & P_STOPPROF)
549 return;
550 if ((p->p_flag & P_PROFIL) == 0) {
551 p->p_flag |= P_PROFIL;
552 mtx_lock_spin(&time_lock);
553 if (++profprocs == 1)
554 cpu_startprofclock();
555 mtx_unlock_spin(&time_lock);
556 }
557 }
558
559 /*
560 * Stop profiling on a process.
561 */
562 void
563 stopprofclock(p)
564 register struct proc *p;
565 {
566
567 PROC_LOCK_ASSERT(p, MA_OWNED);
568 if (p->p_flag & P_PROFIL) {
569 if (p->p_profthreads != 0) {
570 p->p_flag |= P_STOPPROF;
571 while (p->p_profthreads != 0)
572 msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
573 "stopprof", 0);
574 p->p_flag &= ~P_STOPPROF;
575 }
576 if ((p->p_flag & P_PROFIL) == 0)
577 return;
578 p->p_flag &= ~P_PROFIL;
579 mtx_lock_spin(&time_lock);
580 if (--profprocs == 0)
581 cpu_stopprofclock();
582 mtx_unlock_spin(&time_lock);
583 }
584 }
585
586 /*
587 * Statistics clock. Updates rusage information and calls the scheduler
588 * to adjust priorities of the active thread.
589 *
590 * This should be called by all active processors.
591 */
592 void
593 statclock(int usermode)
594 {
595 struct rusage *ru;
596 struct vmspace *vm;
597 struct thread *td;
598 struct proc *p;
599 long rss;
600 long *cp_time;
601
602 td = curthread;
603 p = td->td_proc;
604
605 cp_time = (long *)PCPU_PTR(cp_time);
606 if (usermode) {
607 /*
608 * Charge the time as appropriate.
609 */
610 td->td_uticks++;
611 if (p->p_nice > NZERO)
612 cp_time[CP_NICE]++;
613 else
614 cp_time[CP_USER]++;
615 } else {
616 /*
617 * Came from kernel mode, so we were:
618 * - handling an interrupt,
619 * - doing syscall or trap work on behalf of the current
620 * user process, or
621 * - spinning in the idle loop.
622 * Whichever it is, charge the time as appropriate.
623 * Note that we charge interrupts to the current process,
624 * regardless of whether they are ``for'' that process,
625 * so that we know how much of its real time was spent
626 * in ``non-process'' (i.e., interrupt) work.
627 */
628 if ((td->td_pflags & TDP_ITHREAD) ||
629 td->td_intr_nesting_level >= 2) {
630 td->td_iticks++;
631 cp_time[CP_INTR]++;
632 } else {
633 td->td_pticks++;
634 td->td_sticks++;
635 if (!TD_IS_IDLETHREAD(td))
636 cp_time[CP_SYS]++;
637 else
638 cp_time[CP_IDLE]++;
639 }
640 }
641
642 /* Update resource usage integrals and maximums. */
643 MPASS(p->p_vmspace != NULL);
644 vm = p->p_vmspace;
645 ru = &td->td_ru;
646 ru->ru_ixrss += pgtok(vm->vm_tsize);
647 ru->ru_idrss += pgtok(vm->vm_dsize);
648 ru->ru_isrss += pgtok(vm->vm_ssize);
649 rss = pgtok(vmspace_resident_count(vm));
650 if (ru->ru_maxrss < rss)
651 ru->ru_maxrss = rss;
652 KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
653 "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
654 thread_lock_flags(td, MTX_QUIET);
655 sched_clock(td);
656 thread_unlock(td);
657 }
658
659 void
660 profclock(int usermode, uintfptr_t pc)
661 {
662 struct thread *td;
663 #ifdef GPROF
664 struct gmonparam *g;
665 uintfptr_t i;
666 #endif
667
668 td = curthread;
669 if (usermode) {
670 /*
671 * Came from user mode; CPU was in user state.
672 * If this process is being profiled, record the tick.
673 * if there is no related user location yet, don't
674 * bother trying to count it.
675 */
676 if (td->td_proc->p_flag & P_PROFIL)
677 addupc_intr(td, pc, 1);
678 }
679 #ifdef GPROF
680 else {
681 /*
682 * Kernel statistics are just like addupc_intr, only easier.
683 */
684 g = &_gmonparam;
685 if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
686 i = PC_TO_I(g, pc);
687 if (i < g->textsize) {
688 KCOUNT(g, i)++;
689 }
690 }
691 }
692 #endif
693 }
694
695 /*
696 * Return information about system clocks.
697 */
698 static int
699 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
700 {
701 struct clockinfo clkinfo;
702 /*
703 * Construct clockinfo structure.
704 */
705 bzero(&clkinfo, sizeof(clkinfo));
706 clkinfo.hz = hz;
707 clkinfo.tick = tick;
708 clkinfo.profhz = profhz;
709 clkinfo.stathz = stathz ? stathz : hz;
710 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
711 }
712
713 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
714 CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
715 0, 0, sysctl_kern_clockrate, "S,clockinfo",
716 "Rate and period of various kernel clocks");
717
718 #ifdef SW_WATCHDOG
719
720 static void
721 watchdog_config(void *unused __unused, u_int cmd, int *error)
722 {
723 u_int u;
724
725 u = cmd & WD_INTERVAL;
726 if (u >= WD_TO_1SEC) {
727 watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
728 watchdog_enabled = 1;
729 *error = 0;
730 } else {
731 watchdog_enabled = 0;
732 }
733 }
734
735 /*
736 * Handle a watchdog timeout by dumping interrupt information and
737 * then either dropping to DDB or panicking.
738 */
739 static void
740 watchdog_fire(void)
741 {
742 int nintr;
743 u_int64_t inttotal;
744 u_long *curintr;
745 char *curname;
746
747 curintr = intrcnt;
748 curname = intrnames;
749 inttotal = 0;
750 nintr = eintrcnt - intrcnt;
751
752 printf("interrupt total\n");
753 while (--nintr >= 0) {
754 if (*curintr)
755 printf("%-12s %20lu\n", curname, *curintr);
756 curname += strlen(curname) + 1;
757 inttotal += *curintr++;
758 }
759 printf("Total %20ju\n", (uintmax_t)inttotal);
760
761 #if defined(KDB) && !defined(KDB_UNATTENDED)
762 kdb_backtrace();
763 kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
764 #else
765 panic("watchdog timeout");
766 #endif
767 }
768
769 #endif /* SW_WATCHDOG */
Cache object: eae4fc4fb97a90a11be7e98d44070609
|