FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c
1 /*
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/5.0/sys/kern/kern_intr.c 105357 2002-10-17 21:02:02Z robert $
27 *
28 */
29
30
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/rtprio.h>
34 #include <sys/systm.h>
35 #include <sys/interrupt.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/random.h>
44 #include <sys/resourcevar.h>
45 #include <sys/sysctl.h>
46 #include <sys/unistd.h>
47 #include <sys/vmmeter.h>
48 #include <machine/atomic.h>
49 #include <machine/cpu.h>
50 #include <machine/md_var.h>
51 #include <machine/stdarg.h>
52
53 struct int_entropy {
54 struct proc *proc;
55 int vector;
56 };
57
58 void *vm_ih;
59 void *softclock_ih;
60 struct ithd *clk_ithd;
61 struct ithd *tty_ithd;
62
63 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
64
65 static void ithread_update(struct ithd *);
66 static void ithread_loop(void *);
67 static void start_softintr(void *);
68
69 u_char
70 ithread_priority(enum intr_type flags)
71 {
72 u_char pri;
73
74 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
75 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
76 switch (flags) {
77 case INTR_TYPE_TTY:
78 pri = PI_TTYLOW;
79 break;
80 case INTR_TYPE_BIO:
81 /*
82 * XXX We need to refine this. BSD/OS distinguishes
83 * between tape and disk priorities.
84 */
85 pri = PI_DISK;
86 break;
87 case INTR_TYPE_NET:
88 pri = PI_NET;
89 break;
90 case INTR_TYPE_CAM:
91 pri = PI_DISK; /* XXX or PI_CAM? */
92 break;
93 case INTR_TYPE_AV: /* Audio/video */
94 pri = PI_AV;
95 break;
96 case INTR_TYPE_CLK:
97 pri = PI_REALTIME;
98 break;
99 case INTR_TYPE_MISC:
100 pri = PI_DULL; /* don't care */
101 break;
102 default:
103 /* We didn't specify an interrupt level. */
104 panic("ithread_priority: no interrupt type in flags");
105 }
106
107 return pri;
108 }
109
110 /*
111 * Regenerate the name (p_comm) and priority for a threaded interrupt thread.
112 */
113 static void
114 ithread_update(struct ithd *ithd)
115 {
116 struct intrhand *ih;
117 struct thread *td;
118 struct proc *p;
119 int entropy;
120
121 mtx_assert(&ithd->it_lock, MA_OWNED);
122 td = ithd->it_td;
123 if (td == NULL)
124 return;
125 p = td->td_proc;
126
127 strlcpy(p->p_comm, ithd->it_name, sizeof(p->p_comm));
128
129 ih = TAILQ_FIRST(&ithd->it_handlers);
130 if (ih == NULL) {
131 mtx_lock_spin(&sched_lock);
132 td->td_priority = PRI_MAX_ITHD;
133 td->td_base_pri = PRI_MAX_ITHD;
134 mtx_unlock_spin(&sched_lock);
135 ithd->it_flags &= ~IT_ENTROPY;
136 return;
137 }
138 entropy = 0;
139 mtx_lock_spin(&sched_lock);
140 td->td_priority = ih->ih_pri;
141 td->td_base_pri = ih->ih_pri;
142 mtx_unlock_spin(&sched_lock);
143 TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
144 if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
145 sizeof(p->p_comm)) {
146 strcat(p->p_comm, " ");
147 strcat(p->p_comm, ih->ih_name);
148 } else if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) {
149 if (p->p_comm[sizeof(p->p_comm) - 2] == '+')
150 p->p_comm[sizeof(p->p_comm) - 2] = '*';
151 else
152 p->p_comm[sizeof(p->p_comm) - 2] = '+';
153 } else
154 strcat(p->p_comm, "+");
155 if (ih->ih_flags & IH_ENTROPY)
156 entropy++;
157 }
158 if (entropy)
159 ithd->it_flags |= IT_ENTROPY;
160 else
161 ithd->it_flags &= ~IT_ENTROPY;
162 CTR2(KTR_INTR, "%s: updated %s\n", __func__, p->p_comm);
163 }
164
165 int
166 ithread_create(struct ithd **ithread, int vector, int flags,
167 void (*disable)(int), void (*enable)(int), const char *fmt, ...)
168 {
169 struct ithd *ithd;
170 struct thread *td;
171 struct proc *p;
172 int error;
173 va_list ap;
174
175 /* The only valid flag during creation is IT_SOFT. */
176 if ((flags & ~IT_SOFT) != 0)
177 return (EINVAL);
178
179 ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
180 ithd->it_vector = vector;
181 ithd->it_disable = disable;
182 ithd->it_enable = enable;
183 ithd->it_flags = flags;
184 TAILQ_INIT(&ithd->it_handlers);
185 mtx_init(&ithd->it_lock, "ithread", NULL, MTX_DEF);
186
187 va_start(ap, fmt);
188 vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap);
189 va_end(ap);
190
191 error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
192 0, "%s", ithd->it_name);
193 if (error) {
194 mtx_destroy(&ithd->it_lock);
195 free(ithd, M_ITHREAD);
196 return (error);
197 }
198 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
199 td->td_ksegrp->kg_pri_class = PRI_ITHD;
200 td->td_priority = PRI_MAX_ITHD;
201 TD_SET_IWAIT(td);
202 ithd->it_td = td;
203 td->td_ithd = ithd;
204 if (ithread != NULL)
205 *ithread = ithd;
206
207 CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name);
208 return (0);
209 }
210
211 int
212 ithread_destroy(struct ithd *ithread)
213 {
214
215 struct thread *td;
216 struct proc *p;
217 if (ithread == NULL)
218 return (EINVAL);
219
220 td = ithread->it_td;
221 p = td->td_proc;
222 mtx_lock(&ithread->it_lock);
223 if (!TAILQ_EMPTY(&ithread->it_handlers)) {
224 mtx_unlock(&ithread->it_lock);
225 return (EINVAL);
226 }
227 ithread->it_flags |= IT_DEAD;
228 mtx_lock_spin(&sched_lock);
229 if (TD_AWAITING_INTR(td)) {
230 TD_CLR_IWAIT(td);
231 setrunqueue(td);
232 }
233 mtx_unlock_spin(&sched_lock);
234 mtx_unlock(&ithread->it_lock);
235 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name);
236 return (0);
237 }
238
239 int
240 ithread_add_handler(struct ithd* ithread, const char *name,
241 driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
242 void **cookiep)
243 {
244 struct intrhand *ih, *temp_ih;
245
246 if (ithread == NULL || name == NULL || handler == NULL)
247 return (EINVAL);
248 if ((flags & INTR_FAST) !=0)
249 flags |= INTR_EXCL;
250
251 ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO);
252 ih->ih_handler = handler;
253 ih->ih_argument = arg;
254 ih->ih_name = name;
255 ih->ih_ithread = ithread;
256 ih->ih_pri = pri;
257 if (flags & INTR_FAST)
258 ih->ih_flags = IH_FAST | IH_EXCLUSIVE;
259 else if (flags & INTR_EXCL)
260 ih->ih_flags = IH_EXCLUSIVE;
261 if (flags & INTR_MPSAFE)
262 ih->ih_flags |= IH_MPSAFE;
263 if (flags & INTR_ENTROPY)
264 ih->ih_flags |= IH_ENTROPY;
265
266 mtx_lock(&ithread->it_lock);
267 if ((flags & INTR_EXCL) !=0 && !TAILQ_EMPTY(&ithread->it_handlers))
268 goto fail;
269 if (!TAILQ_EMPTY(&ithread->it_handlers) &&
270 (TAILQ_FIRST(&ithread->it_handlers)->ih_flags & IH_EXCLUSIVE) != 0)
271 goto fail;
272
273 TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next)
274 if (temp_ih->ih_pri > ih->ih_pri)
275 break;
276 if (temp_ih == NULL)
277 TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next);
278 else
279 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
280 ithread_update(ithread);
281 mtx_unlock(&ithread->it_lock);
282
283 if (cookiep != NULL)
284 *cookiep = ih;
285 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
286 ithread->it_name);
287 return (0);
288
289 fail:
290 mtx_unlock(&ithread->it_lock);
291 free(ih, M_ITHREAD);
292 return (EINVAL);
293 }
294
295 int
296 ithread_remove_handler(void *cookie)
297 {
298 struct intrhand *handler = (struct intrhand *)cookie;
299 struct ithd *ithread;
300 #ifdef INVARIANTS
301 struct intrhand *ih;
302 #endif
303
304 if (handler == NULL)
305 return (EINVAL);
306 ithread = handler->ih_ithread;
307 KASSERT(ithread != NULL,
308 ("interrupt handler \"%s\" has a NULL interrupt thread",
309 handler->ih_name));
310 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
311 ithread->it_name);
312 mtx_lock(&ithread->it_lock);
313 #ifdef INVARIANTS
314 TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
315 if (ih == handler)
316 goto ok;
317 mtx_unlock(&ithread->it_lock);
318 panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
319 ih->ih_name, ithread->it_name);
320 ok:
321 #endif
322 /*
323 * If the interrupt thread is already running, then just mark this
324 * handler as being dead and let the ithread do the actual removal.
325 */
326 mtx_lock_spin(&sched_lock);
327 if (!TD_AWAITING_INTR(ithread->it_td)) {
328 handler->ih_flags |= IH_DEAD;
329
330 /*
331 * Ensure that the thread will process the handler list
332 * again and remove this handler if it has already passed
333 * it on the list.
334 */
335 ithread->it_need = 1;
336 } else
337 TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
338 mtx_unlock_spin(&sched_lock);
339 if ((handler->ih_flags & IH_DEAD) != 0)
340 msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0);
341 ithread_update(ithread);
342 mtx_unlock(&ithread->it_lock);
343 free(handler, M_ITHREAD);
344 return (0);
345 }
346
347 int
348 ithread_schedule(struct ithd *ithread, int do_switch)
349 {
350 struct int_entropy entropy;
351 struct thread *td;
352 struct thread *ctd;
353 struct proc *p;
354
355 /*
356 * If no ithread or no handlers, then we have a stray interrupt.
357 */
358 if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
359 return (EINVAL);
360
361 ctd = curthread;
362 /*
363 * If any of the handlers for this ithread claim to be good
364 * sources of entropy, then gather some.
365 */
366 if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
367 entropy.vector = ithread->it_vector;
368 entropy.proc = ctd->td_proc;
369 random_harvest(&entropy, sizeof(entropy), 2, 0,
370 RANDOM_INTERRUPT);
371 }
372
373 td = ithread->it_td;
374 p = td->td_proc;
375 KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
376 CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d",
377 __func__, p->p_pid, p->p_comm, ithread->it_need);
378
379 /*
380 * Set it_need to tell the thread to keep running if it is already
381 * running. Then, grab sched_lock and see if we actually need to
382 * put this thread on the runqueue. If so and the do_switch flag is
383 * true and it is safe to switch, then switch to the ithread
384 * immediately. Otherwise, set the needresched flag to guarantee
385 * that this ithread will run before any userland processes.
386 */
387 ithread->it_need = 1;
388 mtx_lock_spin(&sched_lock);
389 if (TD_AWAITING_INTR(td)) {
390 CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
391 TD_CLR_IWAIT(td);
392 setrunqueue(td);
393 if (do_switch &&
394 (ctd->td_critnest == 1) ) {
395 KASSERT((TD_IS_RUNNING(ctd)),
396 ("ithread_schedule: Bad state for curthread."));
397 ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
398 if (ctd->td_kse->ke_flags & KEF_IDLEKSE)
399 ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
400 mi_switch();
401 } else {
402 curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
403 }
404 } else {
405 CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
406 __func__, p->p_pid, ithread->it_need, p->p_state);
407 }
408 mtx_unlock_spin(&sched_lock);
409
410 return (0);
411 }
412
413 int
414 swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler,
415 void *arg, int pri, enum intr_type flags, void **cookiep)
416 {
417 struct ithd *ithd;
418 int error;
419
420 if (flags & (INTR_FAST | INTR_ENTROPY))
421 return (EINVAL);
422
423 ithd = (ithdp != NULL) ? *ithdp : NULL;
424
425 if (ithd != NULL) {
426 if ((ithd->it_flags & IT_SOFT) == 0)
427 return(EINVAL);
428 } else {
429 error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
430 "swi%d:", pri);
431 if (error)
432 return (error);
433
434 if (ithdp != NULL)
435 *ithdp = ithd;
436 }
437 return (ithread_add_handler(ithd, name, handler, arg,
438 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
439 }
440
441
442 /*
443 * Schedule a heavyweight software interrupt process.
444 */
445 void
446 swi_sched(void *cookie, int flags)
447 {
448 struct intrhand *ih = (struct intrhand *)cookie;
449 struct ithd *it = ih->ih_ithread;
450 int error;
451
452 atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
453
454 CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
455 it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
456
457 /*
458 * Set ih_need for this handler so that if the ithread is already
459 * running it will execute this handler on the next pass. Otherwise,
460 * it will execute it the next time it runs.
461 */
462 atomic_store_rel_int(&ih->ih_need, 1);
463 if (!(flags & SWI_DELAY)) {
464 error = ithread_schedule(it, !cold);
465 KASSERT(error == 0, ("stray software interrupt"));
466 }
467 }
468
469 /*
470 * This is the main code for interrupt threads.
471 */
472 static void
473 ithread_loop(void *arg)
474 {
475 struct ithd *ithd; /* our thread context */
476 struct intrhand *ih; /* and our interrupt handler chain */
477 struct thread *td;
478 struct proc *p;
479
480 td = curthread;
481 p = td->td_proc;
482 ithd = (struct ithd *)arg; /* point to myself */
483 KASSERT(ithd->it_td == td && td->td_ithd == ithd,
484 ("%s: ithread and proc linkage out of sync", __func__));
485
486 /*
487 * As long as we have interrupts outstanding, go through the
488 * list of handlers, giving each one a go at it.
489 */
490 for (;;) {
491 /*
492 * If we are an orphaned thread, then just die.
493 */
494 if (ithd->it_flags & IT_DEAD) {
495 CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__,
496 p->p_pid, p->p_comm);
497 td->td_ithd = NULL;
498 mtx_destroy(&ithd->it_lock);
499 mtx_lock(&Giant);
500 free(ithd, M_ITHREAD);
501 kthread_exit(0);
502 }
503
504 CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__,
505 p->p_pid, p->p_comm, ithd->it_need);
506 while (ithd->it_need) {
507 /*
508 * Service interrupts. If another interrupt
509 * arrives while we are running, they will set
510 * it_need to denote that we should make
511 * another pass.
512 */
513 atomic_store_rel_int(&ithd->it_need, 0);
514 restart:
515 TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
516 if (ithd->it_flags & IT_SOFT && !ih->ih_need)
517 continue;
518 atomic_store_rel_int(&ih->ih_need, 0);
519 CTR6(KTR_INTR,
520 "%s: pid %d ih=%p: %p(%p) flg=%x", __func__,
521 p->p_pid, (void *)ih,
522 (void *)ih->ih_handler, ih->ih_argument,
523 ih->ih_flags);
524
525 if ((ih->ih_flags & IH_DEAD) != 0) {
526 mtx_lock(&ithd->it_lock);
527 TAILQ_REMOVE(&ithd->it_handlers, ih,
528 ih_next);
529 wakeup(ih);
530 mtx_unlock(&ithd->it_lock);
531 goto restart;
532 }
533 if ((ih->ih_flags & IH_MPSAFE) == 0)
534 mtx_lock(&Giant);
535 ih->ih_handler(ih->ih_argument);
536 if ((ih->ih_flags & IH_MPSAFE) == 0)
537 mtx_unlock(&Giant);
538 }
539 }
540
541 /*
542 * Processed all our interrupts. Now get the sched
543 * lock. This may take a while and it_need may get
544 * set again, so we have to check it again.
545 */
546 mtx_assert(&Giant, MA_NOTOWNED);
547 mtx_lock_spin(&sched_lock);
548 if (!ithd->it_need) {
549 /*
550 * Should we call this earlier in the loop above?
551 */
552 if (ithd->it_enable != NULL)
553 ithd->it_enable(ithd->it_vector);
554 TD_SET_IWAIT(td); /* we're idle */
555 p->p_stats->p_ru.ru_nvcsw++;
556 CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
557 mi_switch();
558 CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
559 }
560 mtx_unlock_spin(&sched_lock);
561 }
562 }
563
564 /*
565 * Start standard software interrupt threads
566 */
567 static void
568 start_softintr(void *dummy)
569 {
570
571 if (swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK,
572 INTR_MPSAFE, &softclock_ih) ||
573 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, 0, &vm_ih))
574 panic("died while creating standard software ithreads");
575
576 PROC_LOCK(clk_ithd->it_td->td_proc);
577 clk_ithd->it_td->td_proc->p_flag |= P_NOLOAD;
578 PROC_UNLOCK(clk_ithd->it_td->td_proc);
579 }
580 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
581
582 /*
583 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
584 * The data for this machine dependent, and the declarations are in machine
585 * dependent code. The layout of intrnames and intrcnt however is machine
586 * independent.
587 *
588 * We do not know the length of intrcnt and intrnames at compile time, so
589 * calculate things at run time.
590 */
591 static int
592 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
593 {
594 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
595 req));
596 }
597
598 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
599 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
600
601 static int
602 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
603 {
604 return (sysctl_handle_opaque(oidp, intrcnt,
605 (char *)eintrcnt - (char *)intrcnt, req));
606 }
607
608 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
609 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
Cache object: 7a21c92c8f20cecfa5a74cfed0813f48
|