1 /*
2 * SMP- and interrupt-safe semaphores helper functions.
3 *
4 * Copyright (C) 1996 Linus Torvalds
5 * Copyright (C) 1999 Andrea Arcangeli
6 * Copyright (C) 1999, 2001, 2002 Ralf Baechle
7 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
8 * Copyright (C) 2000 MIPS Technologies, Inc.
9 */
10 #ifndef _ASM_SEMAPHORE_HELPER_H
11 #define _ASM_SEMAPHORE_HELPER_H
12
13 #include <linux/config.h>
14 #include <linux/errno.h>
15
16 #define sem_read(a) ((a)->counter)
17 #define sem_inc(a) (((a)->counter)++)
18 #define sem_dec(a) (((a)->counter)--)
19 /*
20 * These two _must_ execute atomically wrt each other.
21 */
22 static inline void wake_one_more(struct semaphore * sem)
23 {
24 atomic_inc(&sem->waking);
25 }
26
27 #ifdef CONFIG_CPU_HAS_LLSC
28
29 static inline int waking_non_zero(struct semaphore *sem)
30 {
31 int ret, tmp;
32
33 __asm__ __volatile__(
34 "1:\tll\t%1, %2\t\t\t# waking_non_zero\n\t"
35 "blez\t%1, 2f\n\t"
36 "subu\t%0, %1, 1\n\t"
37 "sc\t%0, %2\n\t"
38 "beqz\t%0, 1b\n"
39 "2:"
40 : "=r" (ret), "=r" (tmp), "+m" (sem->waking)
41 : "" (0));
42
43 return ret;
44 }
45
46 #else /* !CONFIG_CPU_HAS_LLSC */
47
48 /*
49 * It doesn't make sense, IMHO, to endlessly turn interrupts off and on again.
50 * Do it once and that's it. ll/sc *has* it's advantages. HK
51 */
52
53 static inline int waking_non_zero(struct semaphore *sem)
54 {
55 unsigned long flags;
56 int ret = 0;
57
58 local_irq_save(flags);
59 if (sem_read(&sem->waking) > 0) {
60 sem_dec(&sem->waking);
61 ret = 1;
62 }
63 local_irq_restore(flags);
64 return ret;
65 }
66 #endif /* !CONFIG_CPU_HAS_LLSC */
67
68 #ifdef CONFIG_CPU_HAS_LLDSCD
69
70 /*
71 * waking_non_zero_interruptible:
72 * 1 got the lock
73 * 0 go to sleep
74 * -EINTR interrupted
75 *
76 * We must undo the sem->count down_interruptible decrement
77 * simultaneously and atomically with the sem->waking adjustment,
78 * otherwise we can race with wake_one_more.
79 *
80 * This is accomplished by doing a 64-bit lld/scd on the 2 32-bit words.
81 *
82 * This is crazy. Normally it's strictly forbidden to use 64-bit operations
83 * in the 32-bit MIPS kernel. In this case it's however ok because if an
84 * interrupt has destroyed the upper half of registers sc will fail.
85 * Note also that this will not work for MIPS32 CPUs!
86 *
87 * Pseudocode:
88 *
89 * If(sem->waking > 0) {
90 * Decrement(sem->waking)
91 * Return(SUCCESS)
92 * } else If(signal_pending(tsk)) {
93 * Increment(sem->count)
94 * Return(-EINTR)
95 * } else {
96 * Return(SLEEP)
97 * }
98 */
99
100 static inline int
101 waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
102 {
103 long ret, tmp;
104
105 __asm__ __volatile__(
106 ".set\tpush\t\t\t# waking_non_zero_interruptible\n\t"
107 ".set\tmips3\n\t"
108 ".set\tnoat\n"
109 "0:\tlld\t%1, %2\n\t"
110 "li\t%0, 0\n\t"
111 "sll\t$1, %1, 0\n\t"
112 "blez\t$1, 1f\n\t"
113 "daddiu\t%1, %1, -1\n\t"
114 "li\t%0, 1\n\t"
115 "b\t2f\n"
116 "1:\tbeqz\t%3, 2f\n\t"
117 "li\t%0, %4\n\t"
118 "dli\t$1, 0x0000000100000000\n\t"
119 "daddu\t%1, %1, $1\n"
120 "2:\tscd\t%1, %2\n\t"
121 "beqz\t%1, 0b\n\t"
122 ".set\tpop"
123 : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
124 : "r" (signal_pending(tsk)), "i" (-EINTR));
125
126 return ret;
127 }
128
129 /*
130 * waking_non_zero_trylock is unused. we do everything in
131 * down_trylock and let non-ll/sc hosts bounce around.
132 */
133
134 static inline int waking_non_zero_trylock(struct semaphore *sem)
135 {
136 #if WAITQUEUE_DEBUG
137 CHECK_MAGIC(sem->__magic);
138 #endif
139
140 return 0;
141 }
142
143 #else /* !CONFIG_CPU_HAS_LLDSCD */
144
145 static inline int waking_non_zero_interruptible(struct semaphore *sem,
146 struct task_struct *tsk)
147 {
148 int ret = 0;
149 unsigned long flags;
150
151 local_irq_save(flags);
152 if (sem_read(&sem->waking) > 0) {
153 sem_dec(&sem->waking);
154 ret = 1;
155 } else if (signal_pending(tsk)) {
156 sem_inc(&sem->count);
157 ret = -EINTR;
158 }
159 local_irq_restore(flags);
160 return ret;
161 }
162
163 static inline int waking_non_zero_trylock(struct semaphore *sem)
164 {
165 int ret = 1;
166 unsigned long flags;
167
168 local_irq_save(flags);
169 if (sem_read(&sem->waking) <= 0)
170 sem_inc(&sem->count);
171 else {
172 sem_dec(&sem->waking);
173 ret = 0;
174 }
175 local_irq_restore(flags);
176
177 return ret;
178 }
179
180 #endif /* !CONFIG_CPU_HAS_LLDSCD */
181
182 #endif /* _ASM_SEMAPHORE_HELPER_H */
Cache object: 384849fe5b16a4a1d8f8e69bf13a21ef
|