FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_clock.c
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/8.0/sys/kern/kern_clock.c 192304 2009-05-18 12:03:43Z ed $");
39
40 #include "opt_kdb.h"
41 #include "opt_device_polling.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_ntp.h"
44 #include "opt_watchdog.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/callout.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/lock.h>
52 #include <sys/ktr.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resource.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/signalvar.h>
59 #include <sys/smp.h>
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <sys/sysctl.h>
64 #include <sys/bus.h>
65 #include <sys/interrupt.h>
66 #include <sys/limits.h>
67 #include <sys/timetc.h>
68
69 #ifdef GPROF
70 #include <sys/gmon.h>
71 #endif
72
73 #ifdef HWPMC_HOOKS
74 #include <sys/pmckern.h>
75 #endif
76
77 #ifdef DEVICE_POLLING
78 extern void hardclock_device_poll(void);
79 #endif /* DEVICE_POLLING */
80
81 static void initclocks(void *dummy);
82 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
83
84 /* Spin-lock protecting profiling statistics. */
85 static struct mtx time_lock;
86
87 static int
88 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
89 {
90 int error;
91 long cp_time[CPUSTATES];
92 #ifdef SCTL_MASK32
93 int i;
94 unsigned int cp_time32[CPUSTATES];
95 #endif
96
97 read_cpu_time(cp_time);
98 #ifdef SCTL_MASK32
99 if (req->flags & SCTL_MASK32) {
100 if (!req->oldptr)
101 return SYSCTL_OUT(req, 0, sizeof(cp_time32));
102 for (i = 0; i < CPUSTATES; i++)
103 cp_time32[i] = (unsigned int)cp_time[i];
104 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
105 } else
106 #endif
107 {
108 if (!req->oldptr)
109 return SYSCTL_OUT(req, 0, sizeof(cp_time));
110 error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
111 }
112 return error;
113 }
114
115 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
116 0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
117
118 static long empty[CPUSTATES];
119
120 static int
121 sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
122 {
123 struct pcpu *pcpu;
124 int error;
125 int c;
126 long *cp_time;
127 #ifdef SCTL_MASK32
128 unsigned int cp_time32[CPUSTATES];
129 int i;
130 #endif
131
132 if (!req->oldptr) {
133 #ifdef SCTL_MASK32
134 if (req->flags & SCTL_MASK32)
135 return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
136 else
137 #endif
138 return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
139 }
140 for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
141 if (!CPU_ABSENT(c)) {
142 pcpu = pcpu_find(c);
143 cp_time = pcpu->pc_cp_time;
144 } else {
145 cp_time = empty;
146 }
147 #ifdef SCTL_MASK32
148 if (req->flags & SCTL_MASK32) {
149 for (i = 0; i < CPUSTATES; i++)
150 cp_time32[i] = (unsigned int)cp_time[i];
151 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
152 } else
153 #endif
154 error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
155 }
156 return error;
157 }
158
159 SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
160 0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
161
162 void
163 read_cpu_time(long *cp_time)
164 {
165 struct pcpu *pc;
166 int i, j;
167
168 /* Sum up global cp_time[]. */
169 bzero(cp_time, sizeof(long) * CPUSTATES);
170 for (i = 0; i <= mp_maxid; i++) {
171 if (CPU_ABSENT(i))
172 continue;
173 pc = pcpu_find(i);
174 for (j = 0; j < CPUSTATES; j++)
175 cp_time[j] += pc->pc_cp_time[j];
176 }
177 }
178
179 #ifdef SW_WATCHDOG
180 #include <sys/watchdog.h>
181
182 static int watchdog_ticks;
183 static int watchdog_enabled;
184 static void watchdog_fire(void);
185 static void watchdog_config(void *, u_int, int *);
186 #endif /* SW_WATCHDOG */
187
188 /*
189 * Clock handling routines.
190 *
191 * This code is written to operate with two timers that run independently of
192 * each other.
193 *
194 * The main timer, running hz times per second, is used to trigger interval
195 * timers, timeouts and rescheduling as needed.
196 *
197 * The second timer handles kernel and user profiling,
198 * and does resource use estimation. If the second timer is programmable,
199 * it is randomized to avoid aliasing between the two clocks. For example,
200 * the randomization prevents an adversary from always giving up the cpu
201 * just before its quantum expires. Otherwise, it would never accumulate
202 * cpu ticks. The mean frequency of the second timer is stathz.
203 *
204 * If no second timer exists, stathz will be zero; in this case we drive
205 * profiling and statistics off the main clock. This WILL NOT be accurate;
206 * do not do it unless absolutely necessary.
207 *
208 * The statistics clock may (or may not) be run at a higher rate while
209 * profiling. This profile clock runs at profhz. We require that profhz
210 * be an integral multiple of stathz.
211 *
212 * If the statistics clock is running fast, it must be divided by the ratio
213 * profhz/stathz for statistics. (For profiling, every tick counts.)
214 *
215 * Time-of-day is maintained using a "timecounter", which may or may
216 * not be related to the hardware generating the above mentioned
217 * interrupts.
218 */
219
220 int stathz;
221 int profhz;
222 int profprocs;
223 int ticks;
224 int psratio;
225
226 /*
227 * Initialize clock frequencies and start both clocks running.
228 */
229 /* ARGSUSED*/
230 static void
231 initclocks(dummy)
232 void *dummy;
233 {
234 register int i;
235
236 /*
237 * Set divisors to 1 (normal case) and let the machine-specific
238 * code do its bit.
239 */
240 mtx_init(&time_lock, "time lock", NULL, MTX_SPIN);
241 cpu_initclocks();
242
243 /*
244 * Compute profhz/stathz, and fix profhz if needed.
245 */
246 i = stathz ? stathz : hz;
247 if (profhz == 0)
248 profhz = i;
249 psratio = profhz / i;
250 #ifdef SW_WATCHDOG
251 EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
252 #endif
253 }
254
255 /*
256 * Each time the real-time timer fires, this function is called on all CPUs.
257 * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
258 * the other CPUs in the system need to call this function.
259 */
260 void
261 hardclock_cpu(int usermode)
262 {
263 struct pstats *pstats;
264 struct thread *td = curthread;
265 struct proc *p = td->td_proc;
266 int flags;
267
268 /*
269 * Run current process's virtual and profile time, as needed.
270 */
271 pstats = p->p_stats;
272 flags = 0;
273 if (usermode &&
274 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
275 PROC_SLOCK(p);
276 if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
277 flags |= TDF_ALRMPEND | TDF_ASTPENDING;
278 PROC_SUNLOCK(p);
279 }
280 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
281 PROC_SLOCK(p);
282 if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
283 flags |= TDF_PROFPEND | TDF_ASTPENDING;
284 PROC_SUNLOCK(p);
285 }
286 thread_lock(td);
287 sched_tick();
288 td->td_flags |= flags;
289 thread_unlock(td);
290
291 #ifdef HWPMC_HOOKS
292 if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
293 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
294 #endif
295 callout_tick();
296 }
297
298 /*
299 * The real-time timer, interrupting hz times per second.
300 */
301 void
302 hardclock(int usermode, uintfptr_t pc)
303 {
304
305 atomic_add_int((volatile int *)&ticks, 1);
306 hardclock_cpu(usermode);
307 tc_ticktock();
308 /*
309 * If no separate statistics clock is available, run it from here.
310 *
311 * XXX: this only works for UP
312 */
313 if (stathz == 0) {
314 profclock(usermode, pc);
315 statclock(usermode);
316 }
317 #ifdef DEVICE_POLLING
318 hardclock_device_poll(); /* this is very short and quick */
319 #endif /* DEVICE_POLLING */
320 #ifdef SW_WATCHDOG
321 if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
322 watchdog_fire();
323 #endif /* SW_WATCHDOG */
324 }
325
326 /*
327 * Compute number of ticks in the specified amount of time.
328 */
329 int
330 tvtohz(tv)
331 struct timeval *tv;
332 {
333 register unsigned long ticks;
334 register long sec, usec;
335
336 /*
337 * If the number of usecs in the whole seconds part of the time
338 * difference fits in a long, then the total number of usecs will
339 * fit in an unsigned long. Compute the total and convert it to
340 * ticks, rounding up and adding 1 to allow for the current tick
341 * to expire. Rounding also depends on unsigned long arithmetic
342 * to avoid overflow.
343 *
344 * Otherwise, if the number of ticks in the whole seconds part of
345 * the time difference fits in a long, then convert the parts to
346 * ticks separately and add, using similar rounding methods and
347 * overflow avoidance. This method would work in the previous
348 * case but it is slightly slower and assumes that hz is integral.
349 *
350 * Otherwise, round the time difference down to the maximum
351 * representable value.
352 *
353 * If ints have 32 bits, then the maximum value for any timeout in
354 * 10ms ticks is 248 days.
355 */
356 sec = tv->tv_sec;
357 usec = tv->tv_usec;
358 if (usec < 0) {
359 sec--;
360 usec += 1000000;
361 }
362 if (sec < 0) {
363 #ifdef DIAGNOSTIC
364 if (usec > 0) {
365 sec++;
366 usec -= 1000000;
367 }
368 printf("tvotohz: negative time difference %ld sec %ld usec\n",
369 sec, usec);
370 #endif
371 ticks = 1;
372 } else if (sec <= LONG_MAX / 1000000)
373 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
374 / tick + 1;
375 else if (sec <= LONG_MAX / hz)
376 ticks = sec * hz
377 + ((unsigned long)usec + (tick - 1)) / tick + 1;
378 else
379 ticks = LONG_MAX;
380 if (ticks > INT_MAX)
381 ticks = INT_MAX;
382 return ((int)ticks);
383 }
384
385 /*
386 * Start profiling on a process.
387 *
388 * Kernel profiling passes proc0 which never exits and hence
389 * keeps the profile clock running constantly.
390 */
391 void
392 startprofclock(p)
393 register struct proc *p;
394 {
395
396 PROC_LOCK_ASSERT(p, MA_OWNED);
397 if (p->p_flag & P_STOPPROF)
398 return;
399 if ((p->p_flag & P_PROFIL) == 0) {
400 p->p_flag |= P_PROFIL;
401 mtx_lock_spin(&time_lock);
402 if (++profprocs == 1)
403 cpu_startprofclock();
404 mtx_unlock_spin(&time_lock);
405 }
406 }
407
408 /*
409 * Stop profiling on a process.
410 */
411 void
412 stopprofclock(p)
413 register struct proc *p;
414 {
415
416 PROC_LOCK_ASSERT(p, MA_OWNED);
417 if (p->p_flag & P_PROFIL) {
418 if (p->p_profthreads != 0) {
419 p->p_flag |= P_STOPPROF;
420 while (p->p_profthreads != 0)
421 msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
422 "stopprof", 0);
423 p->p_flag &= ~P_STOPPROF;
424 }
425 if ((p->p_flag & P_PROFIL) == 0)
426 return;
427 p->p_flag &= ~P_PROFIL;
428 mtx_lock_spin(&time_lock);
429 if (--profprocs == 0)
430 cpu_stopprofclock();
431 mtx_unlock_spin(&time_lock);
432 }
433 }
434
435 /*
436 * Statistics clock. Updates rusage information and calls the scheduler
437 * to adjust priorities of the active thread.
438 *
439 * This should be called by all active processors.
440 */
441 void
442 statclock(int usermode)
443 {
444 struct rusage *ru;
445 struct vmspace *vm;
446 struct thread *td;
447 struct proc *p;
448 long rss;
449 long *cp_time;
450
451 td = curthread;
452 p = td->td_proc;
453
454 cp_time = (long *)PCPU_PTR(cp_time);
455 if (usermode) {
456 /*
457 * Charge the time as appropriate.
458 */
459 td->td_uticks++;
460 if (p->p_nice > NZERO)
461 cp_time[CP_NICE]++;
462 else
463 cp_time[CP_USER]++;
464 } else {
465 /*
466 * Came from kernel mode, so we were:
467 * - handling an interrupt,
468 * - doing syscall or trap work on behalf of the current
469 * user process, or
470 * - spinning in the idle loop.
471 * Whichever it is, charge the time as appropriate.
472 * Note that we charge interrupts to the current process,
473 * regardless of whether they are ``for'' that process,
474 * so that we know how much of its real time was spent
475 * in ``non-process'' (i.e., interrupt) work.
476 */
477 if ((td->td_pflags & TDP_ITHREAD) ||
478 td->td_intr_nesting_level >= 2) {
479 td->td_iticks++;
480 cp_time[CP_INTR]++;
481 } else {
482 td->td_pticks++;
483 td->td_sticks++;
484 if (!TD_IS_IDLETHREAD(td))
485 cp_time[CP_SYS]++;
486 else
487 cp_time[CP_IDLE]++;
488 }
489 }
490
491 /* Update resource usage integrals and maximums. */
492 MPASS(p->p_vmspace != NULL);
493 vm = p->p_vmspace;
494 ru = &td->td_ru;
495 ru->ru_ixrss += pgtok(vm->vm_tsize);
496 ru->ru_idrss += pgtok(vm->vm_dsize);
497 ru->ru_isrss += pgtok(vm->vm_ssize);
498 rss = pgtok(vmspace_resident_count(vm));
499 if (ru->ru_maxrss < rss)
500 ru->ru_maxrss = rss;
501 KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
502 "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
503 thread_lock_flags(td, MTX_QUIET);
504 sched_clock(td);
505 thread_unlock(td);
506 }
507
508 void
509 profclock(int usermode, uintfptr_t pc)
510 {
511 struct thread *td;
512 #ifdef GPROF
513 struct gmonparam *g;
514 uintfptr_t i;
515 #endif
516
517 td = curthread;
518 if (usermode) {
519 /*
520 * Came from user mode; CPU was in user state.
521 * If this process is being profiled, record the tick.
522 * if there is no related user location yet, don't
523 * bother trying to count it.
524 */
525 if (td->td_proc->p_flag & P_PROFIL)
526 addupc_intr(td, pc, 1);
527 }
528 #ifdef GPROF
529 else {
530 /*
531 * Kernel statistics are just like addupc_intr, only easier.
532 */
533 g = &_gmonparam;
534 if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
535 i = PC_TO_I(g, pc);
536 if (i < g->textsize) {
537 KCOUNT(g, i)++;
538 }
539 }
540 }
541 #endif
542 }
543
544 /*
545 * Return information about system clocks.
546 */
547 static int
548 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
549 {
550 struct clockinfo clkinfo;
551 /*
552 * Construct clockinfo structure.
553 */
554 bzero(&clkinfo, sizeof(clkinfo));
555 clkinfo.hz = hz;
556 clkinfo.tick = tick;
557 clkinfo.profhz = profhz;
558 clkinfo.stathz = stathz ? stathz : hz;
559 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
560 }
561
562 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
563 CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
564 0, 0, sysctl_kern_clockrate, "S,clockinfo",
565 "Rate and period of various kernel clocks");
566
567 #ifdef SW_WATCHDOG
568
569 static void
570 watchdog_config(void *unused __unused, u_int cmd, int *error)
571 {
572 u_int u;
573
574 u = cmd & WD_INTERVAL;
575 if (u >= WD_TO_1SEC) {
576 watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
577 watchdog_enabled = 1;
578 *error = 0;
579 } else {
580 watchdog_enabled = 0;
581 }
582 }
583
584 /*
585 * Handle a watchdog timeout by dumping interrupt information and
586 * then either dropping to DDB or panicking.
587 */
588 static void
589 watchdog_fire(void)
590 {
591 int nintr;
592 u_int64_t inttotal;
593 u_long *curintr;
594 char *curname;
595
596 curintr = intrcnt;
597 curname = intrnames;
598 inttotal = 0;
599 nintr = eintrcnt - intrcnt;
600
601 printf("interrupt total\n");
602 while (--nintr >= 0) {
603 if (*curintr)
604 printf("%-12s %20lu\n", curname, *curintr);
605 curname += strlen(curname) + 1;
606 inttotal += *curintr++;
607 }
608 printf("Total %20ju\n", (uintmax_t)inttotal);
609
610 #if defined(KDB) && !defined(KDB_UNATTENDED)
611 kdb_backtrace();
612 kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
613 #else
614 panic("watchdog timeout");
615 #endif
616 }
617
618 #endif /* SW_WATCHDOG */
Cache object: 376b5180e5cc74099563711ed040ce40
|