FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_clock.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include "opt_kdb.h"
43 #include "opt_device_polling.h"
44 #include "opt_hwpmc_hooks.h"
45 #include "opt_ntp.h"
46 #include "opt_watchdog.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/callout.h>
51 #include <sys/epoch.h>
52 #include <sys/eventhandler.h>
53 #include <sys/gtaskqueue.h>
54 #include <sys/kdb.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/ktr.h>
58 #include <sys/lock.h>
59 #include <sys/mutex.h>
60 #include <sys/proc.h>
61 #include <sys/resource.h>
62 #include <sys/resourcevar.h>
63 #include <sys/sched.h>
64 #include <sys/sdt.h>
65 #include <sys/signalvar.h>
66 #include <sys/sleepqueue.h>
67 #include <sys/smp.h>
68 #include <vm/vm.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_map.h>
71 #include <sys/sysctl.h>
72 #include <sys/bus.h>
73 #include <sys/interrupt.h>
74 #include <sys/limits.h>
75 #include <sys/timetc.h>
76
77 #ifdef GPROF
78 #include <sys/gmon.h>
79 #endif
80
81 #ifdef HWPMC_HOOKS
82 #include <sys/pmckern.h>
83 PMC_SOFT_DEFINE( , , clock, hard);
84 PMC_SOFT_DEFINE( , , clock, stat);
85 PMC_SOFT_DEFINE_EX( , , clock, prof, \
86 cpu_startprofclock, cpu_stopprofclock);
87 #endif
88
89 #ifdef DEVICE_POLLING
90 extern void hardclock_device_poll(void);
91 #endif /* DEVICE_POLLING */
92
93 static void initclocks(void *dummy);
94 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
95
96 /* Spin-lock protecting profiling statistics. */
97 static struct mtx time_lock;
98
99 SDT_PROVIDER_DECLARE(sched);
100 SDT_PROBE_DEFINE2(sched, , , tick, "struct thread *", "struct proc *");
101
102 static int
103 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
104 {
105 int error;
106 long cp_time[CPUSTATES];
107 #ifdef SCTL_MASK32
108 int i;
109 unsigned int cp_time32[CPUSTATES];
110 #endif
111
112 read_cpu_time(cp_time);
113 #ifdef SCTL_MASK32
114 if (req->flags & SCTL_MASK32) {
115 if (!req->oldptr)
116 return SYSCTL_OUT(req, 0, sizeof(cp_time32));
117 for (i = 0; i < CPUSTATES; i++)
118 cp_time32[i] = (unsigned int)cp_time[i];
119 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
120 } else
121 #endif
122 {
123 if (!req->oldptr)
124 return SYSCTL_OUT(req, 0, sizeof(cp_time));
125 error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
126 }
127 return error;
128 }
129
130 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
131 0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
132
133 static long empty[CPUSTATES];
134
135 static int
136 sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
137 {
138 struct pcpu *pcpu;
139 int error;
140 int c;
141 long *cp_time;
142 #ifdef SCTL_MASK32
143 unsigned int cp_time32[CPUSTATES];
144 int i;
145 #endif
146
147 if (!req->oldptr) {
148 #ifdef SCTL_MASK32
149 if (req->flags & SCTL_MASK32)
150 return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
151 else
152 #endif
153 return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
154 }
155 for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
156 if (!CPU_ABSENT(c)) {
157 pcpu = pcpu_find(c);
158 cp_time = pcpu->pc_cp_time;
159 } else {
160 cp_time = empty;
161 }
162 #ifdef SCTL_MASK32
163 if (req->flags & SCTL_MASK32) {
164 for (i = 0; i < CPUSTATES; i++)
165 cp_time32[i] = (unsigned int)cp_time[i];
166 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
167 } else
168 #endif
169 error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
170 }
171 return error;
172 }
173
174 SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
175 0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
176
177 #ifdef DEADLKRES
178 static const char *blessed[] = {
179 "getblk",
180 "so_snd_sx",
181 "so_rcv_sx",
182 NULL
183 };
184 static int slptime_threshold = 1800;
185 static int blktime_threshold = 900;
186 static int sleepfreq = 3;
187
188 static void
189 deadlres_td_on_lock(struct proc *p, struct thread *td, int blkticks)
190 {
191 int tticks;
192
193 sx_assert(&allproc_lock, SX_LOCKED);
194 PROC_LOCK_ASSERT(p, MA_OWNED);
195 THREAD_LOCK_ASSERT(td, MA_OWNED);
196 /*
197 * The thread should be blocked on a turnstile, simply check
198 * if the turnstile channel is in good state.
199 */
200 MPASS(td->td_blocked != NULL);
201
202 tticks = ticks - td->td_blktick;
203 if (tticks > blkticks)
204 /*
205 * Accordingly with provided thresholds, this thread is stuck
206 * for too long on a turnstile.
207 */
208 panic("%s: possible deadlock detected for %p (%s), "
209 "blocked for %d ticks\n", __func__,
210 td, sched_tdname(td), tticks);
211 }
212
213 static void
214 deadlres_td_sleep_q(struct proc *p, struct thread *td, int slpticks)
215 {
216 const void *wchan;
217 int i, slptype, tticks;
218
219 sx_assert(&allproc_lock, SX_LOCKED);
220 PROC_LOCK_ASSERT(p, MA_OWNED);
221 THREAD_LOCK_ASSERT(td, MA_OWNED);
222 /*
223 * Check if the thread is sleeping on a lock, otherwise skip the check.
224 * Drop the thread lock in order to avoid a LOR with the sleepqueue
225 * spinlock.
226 */
227 wchan = td->td_wchan;
228 tticks = ticks - td->td_slptick;
229 slptype = sleepq_type(wchan);
230 if ((slptype == SLEEPQ_SX || slptype == SLEEPQ_LK) &&
231 tticks > slpticks) {
232 /*
233 * Accordingly with provided thresholds, this thread is stuck
234 * for too long on a sleepqueue.
235 * However, being on a sleepqueue, we might still check for the
236 * blessed list.
237 */
238 for (i = 0; blessed[i] != NULL; i++)
239 if (!strcmp(blessed[i], td->td_wmesg))
240 return;
241
242 panic("%s: possible deadlock detected for %p (%s), "
243 "blocked for %d ticks\n", __func__,
244 td, sched_tdname(td), tticks);
245 }
246 }
247
248 static void
249 deadlkres(void)
250 {
251 struct proc *p;
252 struct thread *td;
253 int blkticks, slpticks, tryl;
254
255 tryl = 0;
256 for (;;) {
257 blkticks = blktime_threshold * hz;
258 slpticks = slptime_threshold * hz;
259
260 /*
261 * Avoid to sleep on the sx_lock in order to avoid a
262 * possible priority inversion problem leading to
263 * starvation.
264 * If the lock can't be held after 100 tries, panic.
265 */
266 if (!sx_try_slock(&allproc_lock)) {
267 if (tryl > 100)
268 panic("%s: possible deadlock detected "
269 "on allproc_lock\n", __func__);
270 tryl++;
271 pause("allproc", sleepfreq * hz);
272 continue;
273 }
274 tryl = 0;
275 FOREACH_PROC_IN_SYSTEM(p) {
276 PROC_LOCK(p);
277 if (p->p_state == PRS_NEW) {
278 PROC_UNLOCK(p);
279 continue;
280 }
281 FOREACH_THREAD_IN_PROC(p, td) {
282 thread_lock(td);
283 if (TD_ON_LOCK(td))
284 deadlres_td_on_lock(p, td,
285 blkticks);
286 else if (TD_IS_SLEEPING(td))
287 deadlres_td_sleep_q(p, td,
288 slpticks);
289 thread_unlock(td);
290 }
291 PROC_UNLOCK(p);
292 }
293 sx_sunlock(&allproc_lock);
294
295 /* Sleep for sleepfreq seconds. */
296 pause("-", sleepfreq * hz);
297 }
298 }
299
300 static struct kthread_desc deadlkres_kd = {
301 "deadlkres",
302 deadlkres,
303 (struct thread **)NULL
304 };
305
306 SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
307
308 static SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
309 "Deadlock resolver");
310 SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
311 &slptime_threshold, 0,
312 "Number of seconds within is valid to sleep on a sleepqueue");
313 SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
314 &blktime_threshold, 0,
315 "Number of seconds within is valid to block on a turnstile");
316 SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
317 "Number of seconds between any deadlock resolver thread run");
318 #endif /* DEADLKRES */
319
320 void
321 read_cpu_time(long *cp_time)
322 {
323 struct pcpu *pc;
324 int i, j;
325
326 /* Sum up global cp_time[]. */
327 bzero(cp_time, sizeof(long) * CPUSTATES);
328 CPU_FOREACH(i) {
329 pc = pcpu_find(i);
330 for (j = 0; j < CPUSTATES; j++)
331 cp_time[j] += pc->pc_cp_time[j];
332 }
333 }
334
335 #include <sys/watchdog.h>
336
337 static int watchdog_ticks;
338 static int watchdog_enabled;
339 static void watchdog_fire(void);
340 static void watchdog_config(void *, u_int, int *);
341
342 static void
343 watchdog_attach(void)
344 {
345 EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
346 }
347
348 /*
349 * Clock handling routines.
350 *
351 * This code is written to operate with two timers that run independently of
352 * each other.
353 *
354 * The main timer, running hz times per second, is used to trigger interval
355 * timers, timeouts and rescheduling as needed.
356 *
357 * The second timer handles kernel and user profiling,
358 * and does resource use estimation. If the second timer is programmable,
359 * it is randomized to avoid aliasing between the two clocks. For example,
360 * the randomization prevents an adversary from always giving up the cpu
361 * just before its quantum expires. Otherwise, it would never accumulate
362 * cpu ticks. The mean frequency of the second timer is stathz.
363 *
364 * If no second timer exists, stathz will be zero; in this case we drive
365 * profiling and statistics off the main clock. This WILL NOT be accurate;
366 * do not do it unless absolutely necessary.
367 *
368 * The statistics clock may (or may not) be run at a higher rate while
369 * profiling. This profile clock runs at profhz. We require that profhz
370 * be an integral multiple of stathz.
371 *
372 * If the statistics clock is running fast, it must be divided by the ratio
373 * profhz/stathz for statistics. (For profiling, every tick counts.)
374 *
375 * Time-of-day is maintained using a "timecounter", which may or may
376 * not be related to the hardware generating the above mentioned
377 * interrupts.
378 */
379
380 int stathz;
381 int profhz;
382 int profprocs;
383 volatile int ticks;
384 int psratio;
385
386 DPCPU_DEFINE_STATIC(int, pcputicks); /* Per-CPU version of ticks. */
387 #ifdef DEVICE_POLLING
388 static int devpoll_run = 0;
389 #endif
390
391 /*
392 * Initialize clock frequencies and start both clocks running.
393 */
394 /* ARGSUSED*/
395 static void
396 initclocks(void *dummy)
397 {
398 int i;
399
400 /*
401 * Set divisors to 1 (normal case) and let the machine-specific
402 * code do its bit.
403 */
404 mtx_init(&time_lock, "time lock", NULL, MTX_DEF);
405 cpu_initclocks();
406
407 /*
408 * Compute profhz/stathz, and fix profhz if needed.
409 */
410 i = stathz ? stathz : hz;
411 if (profhz == 0)
412 profhz = i;
413 psratio = profhz / i;
414
415 #ifdef SW_WATCHDOG
416 /* Enable hardclock watchdog now, even if a hardware watchdog exists. */
417 watchdog_attach();
418 #else
419 /* Volunteer to run a software watchdog. */
420 if (wdog_software_attach == NULL)
421 wdog_software_attach = watchdog_attach;
422 #endif
423 }
424
425 static __noinline void
426 hardclock_itimer(struct thread *td, struct pstats *pstats, int cnt, int usermode)
427 {
428 struct proc *p;
429 int flags;
430
431 flags = 0;
432 p = td->td_proc;
433 if (usermode &&
434 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
435 PROC_ITIMLOCK(p);
436 if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
437 tick * cnt) == 0)
438 flags |= TDF_ALRMPEND | TDF_ASTPENDING;
439 PROC_ITIMUNLOCK(p);
440 }
441 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
442 PROC_ITIMLOCK(p);
443 if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
444 tick * cnt) == 0)
445 flags |= TDF_PROFPEND | TDF_ASTPENDING;
446 PROC_ITIMUNLOCK(p);
447 }
448 if (flags != 0) {
449 thread_lock(td);
450 td->td_flags |= flags;
451 thread_unlock(td);
452 }
453 }
454
455 void
456 hardclock(int cnt, int usermode)
457 {
458 struct pstats *pstats;
459 struct thread *td = curthread;
460 struct proc *p = td->td_proc;
461 int *t = DPCPU_PTR(pcputicks);
462 int global, i, newticks;
463
464 /*
465 * Update per-CPU and possibly global ticks values.
466 */
467 *t += cnt;
468 global = ticks;
469 do {
470 newticks = *t - global;
471 if (newticks <= 0) {
472 if (newticks < -1)
473 *t = global - 1;
474 newticks = 0;
475 break;
476 }
477 } while (!atomic_fcmpset_int(&ticks, &global, *t));
478
479 /*
480 * Run current process's virtual and profile time, as needed.
481 */
482 pstats = p->p_stats;
483 if (__predict_false(
484 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) ||
485 timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)))
486 hardclock_itimer(td, pstats, cnt, usermode);
487
488 #ifdef HWPMC_HOOKS
489 if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
490 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
491 if (td->td_intr_frame != NULL)
492 PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
493 #endif
494 /* We are in charge to handle this tick duty. */
495 if (newticks > 0) {
496 tc_ticktock(newticks);
497 #ifdef DEVICE_POLLING
498 /* Dangerous and no need to call these things concurrently. */
499 if (atomic_cmpset_acq_int(&devpoll_run, 0, 1)) {
500 /* This is very short and quick. */
501 hardclock_device_poll();
502 atomic_store_rel_int(&devpoll_run, 0);
503 }
504 #endif /* DEVICE_POLLING */
505 if (watchdog_enabled > 0) {
506 i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
507 if (i > 0 && i <= newticks)
508 watchdog_fire();
509 }
510 intr_event_handle(clk_intr_event, NULL);
511 }
512 if (curcpu == CPU_FIRST())
513 cpu_tick_calibration();
514 if (__predict_false(DPCPU_GET(epoch_cb_count)))
515 GROUPTASK_ENQUEUE(DPCPU_PTR(epoch_cb_task));
516 }
517
518 void
519 hardclock_sync(int cpu)
520 {
521 int *t;
522 KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu));
523 t = DPCPU_ID_PTR(cpu, pcputicks);
524
525 *t = ticks;
526 }
527
528 /*
529 * Compute number of ticks in the specified amount of time.
530 */
531 int
532 tvtohz(struct timeval *tv)
533 {
534 unsigned long ticks;
535 long sec, usec;
536
537 /*
538 * If the number of usecs in the whole seconds part of the time
539 * difference fits in a long, then the total number of usecs will
540 * fit in an unsigned long. Compute the total and convert it to
541 * ticks, rounding up and adding 1 to allow for the current tick
542 * to expire. Rounding also depends on unsigned long arithmetic
543 * to avoid overflow.
544 *
545 * Otherwise, if the number of ticks in the whole seconds part of
546 * the time difference fits in a long, then convert the parts to
547 * ticks separately and add, using similar rounding methods and
548 * overflow avoidance. This method would work in the previous
549 * case but it is slightly slower and assumes that hz is integral.
550 *
551 * Otherwise, round the time difference down to the maximum
552 * representable value.
553 *
554 * If ints have 32 bits, then the maximum value for any timeout in
555 * 10ms ticks is 248 days.
556 */
557 sec = tv->tv_sec;
558 usec = tv->tv_usec;
559 if (usec < 0) {
560 sec--;
561 usec += 1000000;
562 }
563 if (sec < 0) {
564 #ifdef DIAGNOSTIC
565 if (usec > 0) {
566 sec++;
567 usec -= 1000000;
568 }
569 printf("tvotohz: negative time difference %ld sec %ld usec\n",
570 sec, usec);
571 #endif
572 ticks = 1;
573 } else if (sec <= LONG_MAX / 1000000)
574 ticks = howmany(sec * 1000000 + (unsigned long)usec, tick) + 1;
575 else if (sec <= LONG_MAX / hz)
576 ticks = sec * hz
577 + howmany((unsigned long)usec, tick) + 1;
578 else
579 ticks = LONG_MAX;
580 if (ticks > INT_MAX)
581 ticks = INT_MAX;
582 return ((int)ticks);
583 }
584
585 /*
586 * Start profiling on a process.
587 *
588 * Kernel profiling passes proc0 which never exits and hence
589 * keeps the profile clock running constantly.
590 */
591 void
592 startprofclock(struct proc *p)
593 {
594
595 PROC_LOCK_ASSERT(p, MA_OWNED);
596 if (p->p_flag & P_STOPPROF)
597 return;
598 if ((p->p_flag & P_PROFIL) == 0) {
599 p->p_flag |= P_PROFIL;
600 mtx_lock(&time_lock);
601 if (++profprocs == 1)
602 cpu_startprofclock();
603 mtx_unlock(&time_lock);
604 }
605 }
606
607 /*
608 * Stop profiling on a process.
609 */
610 void
611 stopprofclock(struct proc *p)
612 {
613
614 PROC_LOCK_ASSERT(p, MA_OWNED);
615 if (p->p_flag & P_PROFIL) {
616 if (p->p_profthreads != 0) {
617 while (p->p_profthreads != 0) {
618 p->p_flag |= P_STOPPROF;
619 msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
620 "stopprof", 0);
621 }
622 }
623 if ((p->p_flag & P_PROFIL) == 0)
624 return;
625 p->p_flag &= ~P_PROFIL;
626 mtx_lock(&time_lock);
627 if (--profprocs == 0)
628 cpu_stopprofclock();
629 mtx_unlock(&time_lock);
630 }
631 }
632
633 /*
634 * Statistics clock. Updates rusage information and calls the scheduler
635 * to adjust priorities of the active thread.
636 *
637 * This should be called by all active processors.
638 */
639 void
640 statclock(int cnt, int usermode)
641 {
642 struct rusage *ru;
643 struct vmspace *vm;
644 struct thread *td;
645 struct proc *p;
646 long rss;
647 long *cp_time;
648 uint64_t runtime, new_switchtime;
649
650 td = curthread;
651 p = td->td_proc;
652
653 cp_time = (long *)PCPU_PTR(cp_time);
654 if (usermode) {
655 /*
656 * Charge the time as appropriate.
657 */
658 td->td_uticks += cnt;
659 if (p->p_nice > NZERO)
660 cp_time[CP_NICE] += cnt;
661 else
662 cp_time[CP_USER] += cnt;
663 } else {
664 /*
665 * Came from kernel mode, so we were:
666 * - handling an interrupt,
667 * - doing syscall or trap work on behalf of the current
668 * user process, or
669 * - spinning in the idle loop.
670 * Whichever it is, charge the time as appropriate.
671 * Note that we charge interrupts to the current process,
672 * regardless of whether they are ``for'' that process,
673 * so that we know how much of its real time was spent
674 * in ``non-process'' (i.e., interrupt) work.
675 */
676 if ((td->td_pflags & TDP_ITHREAD) ||
677 td->td_intr_nesting_level >= 2) {
678 td->td_iticks += cnt;
679 cp_time[CP_INTR] += cnt;
680 } else {
681 td->td_pticks += cnt;
682 td->td_sticks += cnt;
683 if (!TD_IS_IDLETHREAD(td))
684 cp_time[CP_SYS] += cnt;
685 else
686 cp_time[CP_IDLE] += cnt;
687 }
688 }
689
690 /* Update resource usage integrals and maximums. */
691 MPASS(p->p_vmspace != NULL);
692 vm = p->p_vmspace;
693 ru = &td->td_ru;
694 ru->ru_ixrss += pgtok(vm->vm_tsize) * cnt;
695 ru->ru_idrss += pgtok(vm->vm_dsize) * cnt;
696 ru->ru_isrss += pgtok(vm->vm_ssize) * cnt;
697 rss = pgtok(vmspace_resident_count(vm));
698 if (ru->ru_maxrss < rss)
699 ru->ru_maxrss = rss;
700 KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
701 "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
702 SDT_PROBE2(sched, , , tick, td, td->td_proc);
703 thread_lock_flags(td, MTX_QUIET);
704
705 /*
706 * Compute the amount of time during which the current
707 * thread was running, and add that to its total so far.
708 */
709 new_switchtime = cpu_ticks();
710 runtime = new_switchtime - PCPU_GET(switchtime);
711 td->td_runtime += runtime;
712 td->td_incruntime += runtime;
713 PCPU_SET(switchtime, new_switchtime);
714
715 sched_clock(td, cnt);
716 thread_unlock(td);
717 #ifdef HWPMC_HOOKS
718 if (td->td_intr_frame != NULL)
719 PMC_SOFT_CALL_TF( , , clock, stat, td->td_intr_frame);
720 #endif
721 }
722
723 void
724 profclock(int cnt, int usermode, uintfptr_t pc)
725 {
726 struct thread *td;
727 #ifdef GPROF
728 struct gmonparam *g;
729 uintfptr_t i;
730 #endif
731
732 td = curthread;
733 if (usermode) {
734 /*
735 * Came from user mode; CPU was in user state.
736 * If this process is being profiled, record the tick.
737 * if there is no related user location yet, don't
738 * bother trying to count it.
739 */
740 if (td->td_proc->p_flag & P_PROFIL)
741 addupc_intr(td, pc, cnt);
742 }
743 #ifdef GPROF
744 else {
745 /*
746 * Kernel statistics are just like addupc_intr, only easier.
747 */
748 g = &_gmonparam;
749 if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
750 i = PC_TO_I(g, pc);
751 if (i < g->textsize) {
752 KCOUNT(g, i) += cnt;
753 }
754 }
755 }
756 #endif
757 #ifdef HWPMC_HOOKS
758 if (td->td_intr_frame != NULL)
759 PMC_SOFT_CALL_TF( , , clock, prof, td->td_intr_frame);
760 #endif
761 }
762
763 /*
764 * Return information about system clocks.
765 */
766 static int
767 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
768 {
769 struct clockinfo clkinfo;
770 /*
771 * Construct clockinfo structure.
772 */
773 bzero(&clkinfo, sizeof(clkinfo));
774 clkinfo.hz = hz;
775 clkinfo.tick = tick;
776 clkinfo.profhz = profhz;
777 clkinfo.stathz = stathz ? stathz : hz;
778 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
779 }
780
781 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
782 CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
783 0, 0, sysctl_kern_clockrate, "S,clockinfo",
784 "Rate and period of various kernel clocks");
785
786 static void
787 watchdog_config(void *unused __unused, u_int cmd, int *error)
788 {
789 u_int u;
790
791 u = cmd & WD_INTERVAL;
792 if (u >= WD_TO_1SEC) {
793 watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
794 watchdog_enabled = 1;
795 *error = 0;
796 } else {
797 watchdog_enabled = 0;
798 }
799 }
800
801 /*
802 * Handle a watchdog timeout by dumping interrupt information and
803 * then either dropping to DDB or panicking.
804 */
805 static void
806 watchdog_fire(void)
807 {
808 int nintr;
809 uint64_t inttotal;
810 u_long *curintr;
811 char *curname;
812
813 curintr = intrcnt;
814 curname = intrnames;
815 inttotal = 0;
816 nintr = sintrcnt / sizeof(u_long);
817
818 printf("interrupt total\n");
819 while (--nintr >= 0) {
820 if (*curintr)
821 printf("%-12s %20lu\n", curname, *curintr);
822 curname += strlen(curname) + 1;
823 inttotal += *curintr++;
824 }
825 printf("Total %20ju\n", (uintmax_t)inttotal);
826
827 #if defined(KDB) && !defined(KDB_UNATTENDED)
828 kdb_backtrace();
829 kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
830 #else
831 panic("watchdog timeout");
832 #endif
833 }
Cache object: ba7e6abf8e3d90bc1b5f0b509b681b4a
|