FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_clock.c
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: releng/5.0/sys/kern/kern_clock.c 107034 2002-11-18 01:59:31Z davidxu $
40 */
41
42 #include "opt_ntp.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/dkstat.h>
47 #include <sys/callout.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/ktr.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/signalvar.h>
56 #include <sys/smp.h>
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_map.h>
60 #include <sys/sysctl.h>
61 #include <sys/bus.h>
62 #include <sys/interrupt.h>
63 #include <sys/timetc.h>
64
65 #include <machine/cpu.h>
66 #include <machine/limits.h>
67
68 #ifdef GPROF
69 #include <sys/gmon.h>
70 #endif
71
72 #ifdef DEVICE_POLLING
73 extern void init_device_poll(void);
74 extern void hardclock_device_poll(void);
75 #endif /* DEVICE_POLLING */
76
77 static void initclocks(void *dummy);
78 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
79
80 /* Some of these don't belong here, but it's easiest to concentrate them. */
81 long cp_time[CPUSTATES];
82
83 SYSCTL_OPAQUE(_kern, OID_AUTO, cp_time, CTLFLAG_RD, &cp_time, sizeof(cp_time),
84 "LU", "CPU time statistics");
85
86 long tk_cancc;
87 long tk_nin;
88 long tk_nout;
89 long tk_rawcc;
90
91 /*
92 * Clock handling routines.
93 *
94 * This code is written to operate with two timers that run independently of
95 * each other.
96 *
97 * The main timer, running hz times per second, is used to trigger interval
98 * timers, timeouts and rescheduling as needed.
99 *
100 * The second timer handles kernel and user profiling,
101 * and does resource use estimation. If the second timer is programmable,
102 * it is randomized to avoid aliasing between the two clocks. For example,
103 * the randomization prevents an adversary from always giving up the cpu
104 * just before its quantum expires. Otherwise, it would never accumulate
105 * cpu ticks. The mean frequency of the second timer is stathz.
106 *
107 * If no second timer exists, stathz will be zero; in this case we drive
108 * profiling and statistics off the main clock. This WILL NOT be accurate;
109 * do not do it unless absolutely necessary.
110 *
111 * The statistics clock may (or may not) be run at a higher rate while
112 * profiling. This profile clock runs at profhz. We require that profhz
113 * be an integral multiple of stathz.
114 *
115 * If the statistics clock is running fast, it must be divided by the ratio
116 * profhz/stathz for statistics. (For profiling, every tick counts.)
117 *
118 * Time-of-day is maintained using a "timecounter", which may or may
119 * not be related to the hardware generating the above mentioned
120 * interrupts.
121 */
122
123 int stathz;
124 int profhz;
125 static int profprocs;
126 int ticks;
127 static int psdiv, pscnt; /* prof => stat divider */
128 int psratio; /* ratio: prof / stat */
129
130 /*
131 * Initialize clock frequencies and start both clocks running.
132 */
133 /* ARGSUSED*/
134 static void
135 initclocks(dummy)
136 void *dummy;
137 {
138 register int i;
139
140 /*
141 * Set divisors to 1 (normal case) and let the machine-specific
142 * code do its bit.
143 */
144 psdiv = pscnt = 1;
145 cpu_initclocks();
146
147 #ifdef DEVICE_POLLING
148 init_device_poll();
149 #endif
150 /*
151 * Compute profhz/stathz, and fix profhz if needed.
152 */
153 i = stathz ? stathz : hz;
154 if (profhz == 0)
155 profhz = i;
156 psratio = profhz / i;
157 }
158
159 /*
160 * Each time the real-time timer fires, this function is called on all CPUs
161 * with each CPU passing in its curthread as the first argument. If possible
162 * a nice optimization in the future would be to allow the CPU receiving the
163 * actual real-time timer interrupt to call this function on behalf of the
164 * other CPUs rather than sending an IPI to all other CPUs so that they
165 * can call this function. Note that hardclock() calls hardclock_process()
166 * for the CPU receiving the timer interrupt, so only the other CPUs in the
167 * system need to call this function (or have it called on their behalf.
168 */
169 void
170 hardclock_process(td, user)
171 struct thread *td;
172 int user;
173 {
174 struct pstats *pstats;
175 struct proc *p = td->td_proc;
176
177 /*
178 * Run current process's virtual and profile time, as needed.
179 */
180 mtx_assert(&sched_lock, MA_OWNED);
181 if (p->p_flag & P_KSES) {
182 /* XXXKSE What to do? */
183 } else {
184 pstats = p->p_stats;
185 if (user &&
186 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
187 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
188 p->p_sflag |= PS_ALRMPEND;
189 td->td_kse->ke_flags |= KEF_ASTPENDING;
190 }
191 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
192 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
193 p->p_sflag |= PS_PROFPEND;
194 td->td_kse->ke_flags |= KEF_ASTPENDING;
195 }
196 }
197 }
198
199 /*
200 * The real-time timer, interrupting hz times per second.
201 */
202 void
203 hardclock(frame)
204 register struct clockframe *frame;
205 {
206 int need_softclock = 0;
207
208 CTR0(KTR_CLK, "hardclock fired");
209 mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
210 hardclock_process(curthread, CLKF_USERMODE(frame));
211 mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
212
213 tc_ticktock();
214 /*
215 * If no separate statistics clock is available, run it from here.
216 *
217 * XXX: this only works for UP
218 */
219 if (stathz == 0)
220 statclock(frame);
221
222 #ifdef DEVICE_POLLING
223 hardclock_device_poll(); /* this is very short and quick */
224 #endif /* DEVICE_POLLING */
225
226 /*
227 * Process callouts at a very low cpu priority, so we don't keep the
228 * relatively high clock interrupt priority any longer than necessary.
229 */
230 mtx_lock_spin_flags(&callout_lock, MTX_QUIET);
231 ticks++;
232 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
233 need_softclock = 1;
234 } else if (softticks + 1 == ticks)
235 ++softticks;
236 mtx_unlock_spin_flags(&callout_lock, MTX_QUIET);
237
238 /*
239 * swi_sched acquires sched_lock, so we don't want to call it with
240 * callout_lock held; incorrect locking order.
241 */
242 if (need_softclock)
243 swi_sched(softclock_ih, 0);
244 }
245
246 /*
247 * Compute number of ticks in the specified amount of time.
248 */
249 int
250 tvtohz(tv)
251 struct timeval *tv;
252 {
253 register unsigned long ticks;
254 register long sec, usec;
255
256 /*
257 * If the number of usecs in the whole seconds part of the time
258 * difference fits in a long, then the total number of usecs will
259 * fit in an unsigned long. Compute the total and convert it to
260 * ticks, rounding up and adding 1 to allow for the current tick
261 * to expire. Rounding also depends on unsigned long arithmetic
262 * to avoid overflow.
263 *
264 * Otherwise, if the number of ticks in the whole seconds part of
265 * the time difference fits in a long, then convert the parts to
266 * ticks separately and add, using similar rounding methods and
267 * overflow avoidance. This method would work in the previous
268 * case but it is slightly slower and assumes that hz is integral.
269 *
270 * Otherwise, round the time difference down to the maximum
271 * representable value.
272 *
273 * If ints have 32 bits, then the maximum value for any timeout in
274 * 10ms ticks is 248 days.
275 */
276 sec = tv->tv_sec;
277 usec = tv->tv_usec;
278 if (usec < 0) {
279 sec--;
280 usec += 1000000;
281 }
282 if (sec < 0) {
283 #ifdef DIAGNOSTIC
284 if (usec > 0) {
285 sec++;
286 usec -= 1000000;
287 }
288 printf("tvotohz: negative time difference %ld sec %ld usec\n",
289 sec, usec);
290 #endif
291 ticks = 1;
292 } else if (sec <= LONG_MAX / 1000000)
293 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
294 / tick + 1;
295 else if (sec <= LONG_MAX / hz)
296 ticks = sec * hz
297 + ((unsigned long)usec + (tick - 1)) / tick + 1;
298 else
299 ticks = LONG_MAX;
300 if (ticks > INT_MAX)
301 ticks = INT_MAX;
302 return ((int)ticks);
303 }
304
305 /*
306 * Start profiling on a process.
307 *
308 * Kernel profiling passes proc0 which never exits and hence
309 * keeps the profile clock running constantly.
310 */
311 void
312 startprofclock(p)
313 register struct proc *p;
314 {
315 int s;
316
317 /*
318 * XXX; Right now sched_lock protects statclock(), but perhaps
319 * it should be protected later on by a time_lock, which would
320 * cover psdiv, etc. as well.
321 */
322 mtx_lock_spin(&sched_lock);
323 if ((p->p_sflag & PS_PROFIL) == 0) {
324 p->p_sflag |= PS_PROFIL;
325 if (++profprocs == 1 && stathz != 0) {
326 s = splstatclock();
327 psdiv = pscnt = psratio;
328 setstatclockrate(profhz);
329 splx(s);
330 }
331 }
332 mtx_unlock_spin(&sched_lock);
333 }
334
335 /*
336 * Stop profiling on a process.
337 */
338 void
339 stopprofclock(p)
340 register struct proc *p;
341 {
342 int s;
343
344 mtx_lock_spin(&sched_lock);
345 if (p->p_sflag & PS_PROFIL) {
346 p->p_sflag &= ~PS_PROFIL;
347 if (--profprocs == 0 && stathz != 0) {
348 s = splstatclock();
349 psdiv = pscnt = 1;
350 setstatclockrate(stathz);
351 splx(s);
352 }
353 }
354 mtx_unlock_spin(&sched_lock);
355 }
356
357 /*
358 * Do process and kernel statistics. Most of the statistics are only
359 * used by user-level statistics programs. The main exceptions are
360 * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu. This function
361 * should be called by all CPUs in the system for each statistics clock
362 * interrupt. See the description of hardclock_process for more detail on
363 * this function's relationship to statclock.
364 */
365 void
366 statclock_process(ke, pc, user)
367 struct kse *ke;
368 register_t pc;
369 int user;
370 {
371 #ifdef GPROF
372 struct gmonparam *g;
373 int i;
374 #endif
375 struct pstats *pstats;
376 long rss;
377 struct rusage *ru;
378 struct vmspace *vm;
379 struct proc *p = ke->ke_proc;
380 struct thread *td = ke->ke_thread; /* current thread */
381
382 KASSERT(ke == curthread->td_kse, ("statclock_process: td != curthread"));
383 mtx_assert(&sched_lock, MA_OWNED);
384 if (user) {
385 /*
386 * Came from user mode; CPU was in user state.
387 * If this process is being profiled, record the tick.
388 */
389 if (p->p_sflag & PS_PROFIL)
390 addupc_intr(ke, pc, 1);
391 if (pscnt < psdiv)
392 return;
393 /*
394 * Charge the time as appropriate.
395 */
396 if (p->p_flag & P_KSES)
397 thread_add_ticks_intr(1, 1);
398 ke->ke_uticks++;
399 if (ke->ke_ksegrp->kg_nice > NZERO)
400 cp_time[CP_NICE]++;
401 else
402 cp_time[CP_USER]++;
403 } else {
404 #ifdef GPROF
405 /*
406 * Kernel statistics are just like addupc_intr, only easier.
407 */
408 g = &_gmonparam;
409 if (g->state == GMON_PROF_ON) {
410 i = pc - g->lowpc;
411 if (i < g->textsize) {
412 i /= HISTFRACTION * sizeof(*g->kcount);
413 g->kcount[i]++;
414 }
415 }
416 #endif
417 if (pscnt < psdiv)
418 return;
419 /*
420 * Came from kernel mode, so we were:
421 * - handling an interrupt,
422 * - doing syscall or trap work on behalf of the current
423 * user process, or
424 * - spinning in the idle loop.
425 * Whichever it is, charge the time as appropriate.
426 * Note that we charge interrupts to the current process,
427 * regardless of whether they are ``for'' that process,
428 * so that we know how much of its real time was spent
429 * in ``non-process'' (i.e., interrupt) work.
430 */
431 if ((td->td_ithd != NULL) || td->td_intr_nesting_level >= 2) {
432 ke->ke_iticks++;
433 cp_time[CP_INTR]++;
434 } else {
435 if (p->p_flag & P_KSES)
436 thread_add_ticks_intr(0, 1);
437 ke->ke_sticks++;
438 if (p != PCPU_GET(idlethread)->td_proc)
439 cp_time[CP_SYS]++;
440 else
441 cp_time[CP_IDLE]++;
442 }
443 }
444
445 sched_clock(ke->ke_thread);
446
447 /* Update resource usage integrals and maximums. */
448 if ((pstats = p->p_stats) != NULL &&
449 (ru = &pstats->p_ru) != NULL &&
450 (vm = p->p_vmspace) != NULL) {
451 ru->ru_ixrss += pgtok(vm->vm_tsize);
452 ru->ru_idrss += pgtok(vm->vm_dsize);
453 ru->ru_isrss += pgtok(vm->vm_ssize);
454 rss = pgtok(vmspace_resident_count(vm));
455 if (ru->ru_maxrss < rss)
456 ru->ru_maxrss = rss;
457 }
458 }
459
460 /*
461 * Statistics clock. Grab profile sample, and if divider reaches 0,
462 * do process and kernel statistics. Most of the statistics are only
463 * used by user-level statistics programs. The main exceptions are
464 * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu.
465 */
466 void
467 statclock(frame)
468 register struct clockframe *frame;
469 {
470
471 CTR0(KTR_CLK, "statclock fired");
472 mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
473 if (--pscnt == 0)
474 pscnt = psdiv;
475 statclock_process(curthread->td_kse, CLKF_PC(frame), CLKF_USERMODE(frame));
476 mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
477 }
478
479 /*
480 * Return information about system clocks.
481 */
482 static int
483 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
484 {
485 struct clockinfo clkinfo;
486 /*
487 * Construct clockinfo structure.
488 */
489 bzero(&clkinfo, sizeof(clkinfo));
490 clkinfo.hz = hz;
491 clkinfo.tick = tick;
492 clkinfo.profhz = profhz;
493 clkinfo.stathz = stathz ? stathz : hz;
494 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
495 }
496
497 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
498 0, 0, sysctl_kern_clockrate, "S,clockinfo",
499 "Rate and period of various kernel clocks");
Cache object: dfd2c75378ce7426bd5c2e2a34732ee7
|