1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1997 - 2000, 2001 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 */
10 #ifndef _ASM_HARDIRQ_H
11 #define _ASM_HARDIRQ_H
12
13 #include <linux/config.h>
14 #include <linux/threads.h>
15 #include <linux/irq.h>
16
17 typedef struct {
18 unsigned int __softirq_pending;
19 unsigned int __local_irq_count;
20 unsigned int __local_bh_count;
21 unsigned int __syscall_count;
22 struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
23 } ____cacheline_aligned irq_cpustat_t;
24
25 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
26
27 /*
28 * Are we in an interrupt context? Either doing bottom half
29 * or hardware interrupt processing?
30 */
31 #define in_interrupt() ({ int __cpu = smp_processor_id(); \
32 (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
33 #define in_irq() (local_irq_count(smp_processor_id()) != 0)
34
35 #ifndef CONFIG_SMP
36
37 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
38 #define hardirq_endlock(cpu) do { } while (0)
39
40 #define irq_enter(cpu, irq) (local_irq_count(cpu)++)
41 #define irq_exit(cpu, irq) (local_irq_count(cpu)--)
42
43 #define synchronize_irq() barrier();
44
45 #else
46
47 #include <asm/atomic.h>
48 #include <linux/spinlock.h>
49 #include <asm/smp.h>
50
51 extern int global_irq_holder;
52 extern spinlock_t global_irq_lock;
53
54 static inline int irqs_running (void)
55 {
56 int i;
57
58 for (i = 0; i < smp_num_cpus; i++)
59 if (local_irq_count(i))
60 return 1;
61 return 0;
62 }
63
64 static inline void release_irqlock(int cpu)
65 {
66 /* if we didn't own the irq lock, just ignore.. */
67 if (global_irq_holder == cpu) {
68 global_irq_holder = NO_PROC_ID;
69 spin_unlock(&global_irq_lock);
70 }
71 }
72
73 static inline int hardirq_trylock(int cpu)
74 {
75 return !local_irq_count(cpu) && !spin_is_locked(&global_irq_lock);
76 }
77
78 #define hardirq_endlock(cpu) do { } while (0)
79
80 static inline void irq_enter(int cpu, int irq)
81 {
82 ++local_irq_count(cpu);
83
84 while (spin_is_locked(&global_irq_lock))
85 barrier();
86 }
87
88 static inline void irq_exit(int cpu, int irq)
89 {
90 --local_irq_count(cpu);
91 }
92
93 extern void synchronize_irq(void);
94
95 #endif /* CONFIG_SMP */
96
97 #endif /* _ASM_HARDIRQ_H */
Cache object: ab596791e09fd7a0216c80ecf92e4846
|