1 /******************************************************************************
2 * os.h
3 *
4 * random collection of macros and definition
5 */
6
7 #ifndef _XEN_OS_H_
8 #define _XEN_OS_H_
9 #include <machine/param.h>
10
11 #ifdef PAE
12 #define CONFIG_X86_PAE
13 #endif
14
15 #if !defined(__XEN_INTERFACE_VERSION__)
16 /*
17 * Can update to a more recent version when we implement
18 * the hypercall page
19 */
20 #define __XEN_INTERFACE_VERSION__ 0x00030204
21 #endif
22
23 #include <xen/interface/xen.h>
24
25 /* Force a proper event-channel callback from Xen. */
26 void force_evtchn_callback(void);
27
28 #define likely(x) __builtin_expect((x),1)
29 #define unlikely(x) __builtin_expect((x),0)
30
31 #ifndef vtophys
32 #include <vm/vm.h>
33 #include <vm/vm_param.h>
34 #include <vm/pmap.h>
35 #endif
36
37 extern int gdtset;
38 #ifdef SMP
39 #include <sys/time.h> /* XXX for pcpu.h */
40 #include <sys/pcpu.h> /* XXX for PCPU_GET */
41 static inline int
42 smp_processor_id(void)
43 {
44 if (likely(gdtset))
45 return PCPU_GET(cpuid);
46 return 0;
47 }
48
49 #else
50 #define smp_processor_id() 0
51 #endif
52
53 #ifndef NULL
54 #define NULL (void *)0
55 #endif
56
57 #ifndef PANIC_IF
58 #define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);}
59 #endif
60
61 extern shared_info_t *HYPERVISOR_shared_info;
62
63 /* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
64 a mechanism by which the user can annotate likely branch directions and
65 expect the blocks to be reordered appropriately. Define __builtin_expect
66 to nothing for earlier compilers. */
67
68 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
69 static inline void rep_nop(void)
70 {
71 __asm__ __volatile__ ( "rep;nop" : : : "memory" );
72 }
73 #define cpu_relax() rep_nop()
74
75
76 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
77 #define __builtin_expect(x, expected_value) (x)
78 #endif
79
80 #define per_cpu(var, cpu) (pcpu_find((cpu))->pc_ ## var)
81
82 /* crude memory allocator for memory allocation early in
83 * boot
84 */
85 void *bootmem_alloc(unsigned int size);
86 void bootmem_free(void *ptr, unsigned int size);
87
88
89 /* Everything below this point is not included by assembler (.S) files. */
90 #ifndef __ASSEMBLY__
91 #include <sys/types.h>
92
93 void printk(const char *fmt, ...);
94
95 /* some function prototypes */
96 void trap_init(void);
97
98 #ifndef XENHVM
99
100 /*
101 * STI/CLI equivalents. These basically set and clear the virtual
102 * event_enable flag in teh shared_info structure. Note that when
103 * the enable bit is set, there may be pending events to be handled.
104 * We may therefore call into do_hypervisor_callback() directly.
105 */
106
107
108 #define __cli() \
109 do { \
110 vcpu_info_t *_vcpu; \
111 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
112 _vcpu->evtchn_upcall_mask = 1; \
113 barrier(); \
114 } while (0)
115
116 #define __sti() \
117 do { \
118 vcpu_info_t *_vcpu; \
119 barrier(); \
120 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
121 _vcpu->evtchn_upcall_mask = 0; \
122 barrier(); /* unmask then check (avoid races) */ \
123 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
124 force_evtchn_callback(); \
125 } while (0)
126
127 #define __restore_flags(x) \
128 do { \
129 vcpu_info_t *_vcpu; \
130 barrier(); \
131 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
132 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
133 barrier(); /* unmask then check (avoid races) */ \
134 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
135 force_evtchn_callback(); \
136 } \
137 } while (0)
138
139 /*
140 * Add critical_{enter, exit}?
141 *
142 */
143 #define __save_and_cli(x) \
144 do { \
145 vcpu_info_t *_vcpu; \
146 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
147 (x) = _vcpu->evtchn_upcall_mask; \
148 _vcpu->evtchn_upcall_mask = 1; \
149 barrier(); \
150 } while (0)
151
152
153 #define cli() __cli()
154 #define sti() __sti()
155 #define save_flags(x) __save_flags(x)
156 #define restore_flags(x) __restore_flags(x)
157 #define save_and_cli(x) __save_and_cli(x)
158
159 #define local_irq_save(x) __save_and_cli(x)
160 #define local_irq_restore(x) __restore_flags(x)
161 #define local_irq_disable() __cli()
162 #define local_irq_enable() __sti()
163
164 #define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
165 #define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
166 #define spin_lock_irqsave mtx_lock_irqsave
167 #define spin_unlock_irqrestore mtx_unlock_irqrestore
168
169 #endif
170
171 #ifdef SMP
172 #define smp_mb() mb()
173 #define smp_rmb() rmb()
174 #define smp_wmb() wmb()
175 #define smp_read_barrier_depends() read_barrier_depends()
176 #define set_mb(var, value) do { xchg(&var, value); } while (0)
177 #else
178 #define smp_mb() barrier()
179 #define smp_rmb() barrier()
180 #define smp_wmb() barrier()
181 #define smp_read_barrier_depends() do { } while(0)
182 #define set_mb(var, value) do { var = value; barrier(); } while (0)
183 #endif
184
185
186 /* This is a barrier for the compiler only, NOT the processor! */
187 #define barrier() __asm__ __volatile__("": : :"memory")
188
189 #define LOCK_PREFIX ""
190 #define LOCK ""
191 #define ADDR (*(volatile long *) addr)
192 /*
193 * Make sure gcc doesn't try to be clever and move things around
194 * on us. We need to use _exactly_ the address the user gave us,
195 * not some alias that contains the same information.
196 */
197 typedef struct { volatile int counter; } atomic_t;
198
199
200
201 #define xen_xchg(ptr,v) \
202 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
203 struct __xchg_dummy { unsigned long a[100]; };
204 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
205 static __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
206 int size)
207 {
208 switch (size) {
209 case 1:
210 __asm__ __volatile__("xchgb %b0,%1"
211 :"=q" (x)
212 :"m" (*__xg(ptr)), "" (x)
213 :"memory");
214 break;
215 case 2:
216 __asm__ __volatile__("xchgw %w0,%1"
217 :"=r" (x)
218 :"m" (*__xg(ptr)), "" (x)
219 :"memory");
220 break;
221 case 4:
222 __asm__ __volatile__("xchgl %0,%1"
223 :"=r" (x)
224 :"m" (*__xg(ptr)), "" (x)
225 :"memory");
226 break;
227 }
228 return x;
229 }
230
231 /**
232 * test_and_clear_bit - Clear a bit and return its old value
233 * @nr: Bit to set
234 * @addr: Address to count from
235 *
236 * This operation is atomic and cannot be reordered.
237 * It also implies a memory barrier.
238 */
239 static __inline int test_and_clear_bit(int nr, volatile void * addr)
240 {
241 int oldbit;
242
243 __asm__ __volatile__( LOCK_PREFIX
244 "btrl %2,%1\n\tsbbl %0,%0"
245 :"=r" (oldbit),"=m" (ADDR)
246 :"Ir" (nr) : "memory");
247 return oldbit;
248 }
249
250 static __inline int constant_test_bit(int nr, const volatile void * addr)
251 {
252 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
253 }
254
255 static __inline int variable_test_bit(int nr, volatile void * addr)
256 {
257 int oldbit;
258
259 __asm__ __volatile__(
260 "btl %2,%1\n\tsbbl %0,%0"
261 :"=r" (oldbit)
262 :"m" (ADDR),"Ir" (nr));
263 return oldbit;
264 }
265
266 #define test_bit(nr,addr) \
267 (__builtin_constant_p(nr) ? \
268 constant_test_bit((nr),(addr)) : \
269 variable_test_bit((nr),(addr)))
270
271
272 /**
273 * set_bit - Atomically set a bit in memory
274 * @nr: the bit to set
275 * @addr: the address to start counting from
276 *
277 * This function is atomic and may not be reordered. See __set_bit()
278 * if you do not require the atomic guarantees.
279 * Note that @nr may be almost arbitrarily large; this function is not
280 * restricted to acting on a single-word quantity.
281 */
282 static __inline__ void set_bit(int nr, volatile void * addr)
283 {
284 __asm__ __volatile__( LOCK_PREFIX
285 "btsl %1,%0"
286 :"=m" (ADDR)
287 :"Ir" (nr));
288 }
289
290 /**
291 * clear_bit - Clears a bit in memory
292 * @nr: Bit to clear
293 * @addr: Address to start counting from
294 *
295 * clear_bit() is atomic and may not be reordered. However, it does
296 * not contain a memory barrier, so if it is used for locking purposes,
297 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
298 * in order to ensure changes are visible on other processors.
299 */
300 static __inline__ void clear_bit(int nr, volatile void * addr)
301 {
302 __asm__ __volatile__( LOCK_PREFIX
303 "btrl %1,%0"
304 :"=m" (ADDR)
305 :"Ir" (nr));
306 }
307
308 /**
309 * atomic_inc - increment atomic variable
310 * @v: pointer of type atomic_t
311 *
312 * Atomically increments @v by 1. Note that the guaranteed
313 * useful range of an atomic_t is only 24 bits.
314 */
315 static __inline__ void atomic_inc(atomic_t *v)
316 {
317 __asm__ __volatile__(
318 LOCK "incl %0"
319 :"=m" (v->counter)
320 :"m" (v->counter));
321 }
322
323
324 #define rdtscll(val) \
325 __asm__ __volatile__("rdtsc" : "=A" (val))
326
327
328
329 /*
330 * Kernel pointers have redundant information, so we can use a
331 * scheme where we can return either an error code or a dentry
332 * pointer with the same return value.
333 *
334 * This should be a per-architecture thing, to allow different
335 * error and pointer decisions.
336 */
337 #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
338
339 static inline void *ERR_PTR(long error)
340 {
341 return (void *) error;
342 }
343
344 static inline long PTR_ERR(const void *ptr)
345 {
346 return (long) ptr;
347 }
348
349 static inline long IS_ERR(const void *ptr)
350 {
351 return IS_ERR_VALUE((unsigned long)ptr);
352 }
353
354 #endif /* !__ASSEMBLY__ */
355
356 #endif /* _OS_H_ */
Cache object: 3625c411c20bbb273817b38f98897dce
|