1 #ifndef __ALPHA_SYSTEM_H
2 #define __ALPHA_SYSTEM_H
3
4 #include <linux/config.h>
5 #include <asm/pal.h>
6 #include <asm/page.h>
7
8 /*
9 * System defines.. Note that this is included both from .c and .S
10 * files, so it does only defines, not any C code.
11 */
12
13 /*
14 * We leave one page for the initial stack page, and one page for
15 * the initial process structure. Also, the console eats 3 MB for
16 * the initial bootloader (one of which we can reclaim later).
17 */
18 #define BOOT_PCB 0x20000000
19 #define BOOT_ADDR 0x20000000
20 /* Remove when official MILO sources have ELF support: */
21 #define BOOT_SIZE (16*1024)
22
23 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
24 #define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
25 #else
26 #define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
27 #endif
28
29 #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
30 #define SWAPPER_PGD KERNEL_START
31 #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
32 #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
33 #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
34 #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
35
36 #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
37
38 /*
39 * This is setup by the secondary bootstrap loader. Because
40 * the zero page is zeroed out as soon as the vm system is
41 * initialized, we need to copy things out into a more permanent
42 * place.
43 */
44 #define PARAM ZERO_PGE
45 #define COMMAND_LINE ((char*)(PARAM + 0x0000))
46 #define COMMAND_LINE_SIZE 256
47 #define INITRD_START (*(unsigned long *) (PARAM+0x100))
48 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
49
50 #ifndef __ASSEMBLY__
51 #include <linux/kernel.h>
52
53 /*
54 * This is the logout header that should be common to all platforms
55 * (assuming they are running OSF/1 PALcode, I guess).
56 */
57 struct el_common {
58 unsigned int size; /* size in bytes of logout area */
59 int sbz1 : 30; /* should be zero */
60 int err2 : 1; /* second error */
61 int retry : 1; /* retry flag */
62 unsigned int proc_offset; /* processor-specific offset */
63 unsigned int sys_offset; /* system-specific offset */
64 unsigned int code; /* machine check code */
65 unsigned int frame_rev; /* frame revision */
66 };
67
68 /* Machine Check Frame for uncorrectable errors (Large format)
69 * --- This is used to log uncorrectable errors such as
70 * double bit ECC errors.
71 * --- These errors are detected by both processor and systems.
72 */
73 struct el_common_EV5_uncorrectable_mcheck {
74 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
75 unsigned long paltemp[24]; /* PAL TEMP REGS. */
76 unsigned long exc_addr; /* Address of excepting instruction*/
77 unsigned long exc_sum; /* Summary of arithmetic traps. */
78 unsigned long exc_mask; /* Exception mask (from exc_sum). */
79 unsigned long pal_base; /* Base address for PALcode. */
80 unsigned long isr; /* Interrupt Status Reg. */
81 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
82 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
83 <12> set TAG parity*/
84 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
85 <2> Data error in bank 0
86 <3> Data error in bank 1
87 <4> Tag error in bank 0
88 <5> Tag error in bank 1 */
89 unsigned long va; /* Effective VA of fault or miss. */
90 unsigned long mm_stat; /* Holds the reason for D-stream
91 fault or D-cache parity errors */
92 unsigned long sc_addr; /* Address that was being accessed
93 when EV5 detected Secondary cache
94 failure. */
95 unsigned long sc_stat; /* Helps determine if the error was
96 TAG/Data parity(Secondary Cache)*/
97 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
98 unsigned long ei_addr; /* Physical address of any transfer
99 that is logged in EV5 EI_STAT */
100 unsigned long fill_syndrome; /* For correcting ECC errors. */
101 unsigned long ei_stat; /* Helps identify reason of any
102 processor uncorrectable error
103 at its external interface. */
104 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
105 };
106
107 struct el_common_EV6_mcheck {
108 unsigned int FrameSize; /* Bytes, including this field */
109 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
110 unsigned int CpuOffset; /* Offset to CPU-specific info */
111 unsigned int SystemOffset; /* Offset to system-specific info */
112 unsigned int MCHK_Code;
113 unsigned int MCHK_Frame_Rev;
114 unsigned long I_STAT; /* EV6 Internal Processor Registers */
115 unsigned long DC_STAT; /* (See the 21264 Spec) */
116 unsigned long C_ADDR;
117 unsigned long DC1_SYNDROME;
118 unsigned long DC0_SYNDROME;
119 unsigned long C_STAT;
120 unsigned long C_STS;
121 unsigned long MM_STAT;
122 unsigned long EXC_ADDR;
123 unsigned long IER_CM;
124 unsigned long ISUM;
125 unsigned long RESERVED0;
126 unsigned long PAL_BASE;
127 unsigned long I_CTL;
128 unsigned long PCTX;
129 };
130
131 extern void halt(void) __attribute__((noreturn));
132 #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
133
134 #define prepare_to_switch() do { } while(0)
135 #define switch_to(prev,next,last) \
136 do { \
137 unsigned long pcbb; \
138 current = (next); \
139 pcbb = virt_to_phys(¤t->thread); \
140 (last) = alpha_switch_to(pcbb, (prev)); \
141 check_mmu_context(); \
142 } while (0)
143
144 extern struct task_struct* alpha_switch_to(unsigned long, struct task_struct*);
145
146 #define mb() \
147 __asm__ __volatile__("mb": : :"memory")
148
149 #define rmb() \
150 __asm__ __volatile__("mb": : :"memory")
151
152 #define wmb() \
153 __asm__ __volatile__("wmb": : :"memory")
154
155 #ifdef CONFIG_SMP
156 #define smp_mb() mb()
157 #define smp_rmb() rmb()
158 #define smp_wmb() wmb()
159 #else
160 #define smp_mb() barrier()
161 #define smp_rmb() barrier()
162 #define smp_wmb() barrier()
163 #endif
164
165 #define set_mb(var, value) \
166 do { var = value; mb(); } while (0)
167
168 #define set_wmb(var, value) \
169 do { var = value; wmb(); } while (0)
170
171 #define imb() \
172 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
173
174 #define draina() \
175 __asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
176
177 enum implver_enum {
178 IMPLVER_EV4,
179 IMPLVER_EV5,
180 IMPLVER_EV6
181 };
182
183 #ifdef CONFIG_ALPHA_GENERIC
184 #define implver() \
185 ({ unsigned long __implver; \
186 __asm__ ("implver %0" : "=r"(__implver)); \
187 (enum implver_enum) __implver; })
188 #else
189 /* Try to eliminate some dead code. */
190 #ifdef CONFIG_ALPHA_EV4
191 #define implver() IMPLVER_EV4
192 #endif
193 #ifdef CONFIG_ALPHA_EV5
194 #define implver() IMPLVER_EV5
195 #endif
196 #if defined(CONFIG_ALPHA_EV6)
197 #define implver() IMPLVER_EV6
198 #endif
199 #endif
200
201 enum amask_enum {
202 AMASK_BWX = (1UL << 0),
203 AMASK_FIX = (1UL << 1),
204 AMASK_MAX = (1UL << 8),
205 AMASK_PRECISE_TRAP = (1UL << 9),
206 };
207
208 #define amask(mask) \
209 ({ unsigned long __amask, __input = (mask); \
210 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
211 __amask; })
212
213 #define __CALL_PAL_R0(NAME, TYPE) \
214 static inline TYPE NAME(void) \
215 { \
216 register TYPE __r0 __asm__("$0"); \
217 __asm__ __volatile__( \
218 "call_pal %1 # " #NAME \
219 :"=r" (__r0) \
220 :"i" (PAL_ ## NAME) \
221 :"$1", "$16", "$22", "$23", "$24", "$25"); \
222 return __r0; \
223 }
224
225 #define __CALL_PAL_W1(NAME, TYPE0) \
226 static inline void NAME(TYPE0 arg0) \
227 { \
228 register TYPE0 __r16 __asm__("$16") = arg0; \
229 __asm__ __volatile__( \
230 "call_pal %1 # "#NAME \
231 : "=r"(__r16) \
232 : "i"(PAL_ ## NAME), ""(__r16) \
233 : "$1", "$22", "$23", "$24", "$25"); \
234 }
235
236 #define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
237 static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
238 { \
239 register TYPE0 __r16 __asm__("$16") = arg0; \
240 register TYPE1 __r17 __asm__("$17") = arg1; \
241 __asm__ __volatile__( \
242 "call_pal %2 # "#NAME \
243 : "=r"(__r16), "=r"(__r17) \
244 : "i"(PAL_ ## NAME), ""(__r16), "1"(__r17) \
245 : "$1", "$22", "$23", "$24", "$25"); \
246 }
247
248 #define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
249 static inline RTYPE NAME(TYPE0 arg0) \
250 { \
251 register RTYPE __r0 __asm__("$0"); \
252 register TYPE0 __r16 __asm__("$16") = arg0; \
253 __asm__ __volatile__( \
254 "call_pal %2 # "#NAME \
255 : "=r"(__r16), "=r"(__r0) \
256 : "i"(PAL_ ## NAME), ""(__r16) \
257 : "$1", "$22", "$23", "$24", "$25"); \
258 return __r0; \
259 }
260
261 #define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
262 static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
263 { \
264 register RTYPE __r0 __asm__("$0"); \
265 register TYPE0 __r16 __asm__("$16") = arg0; \
266 register TYPE1 __r17 __asm__("$17") = arg1; \
267 __asm__ __volatile__( \
268 "call_pal %3 # "#NAME \
269 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \
270 : "i"(PAL_ ## NAME), ""(__r16), "1"(__r17) \
271 : "$1", "$22", "$23", "$24", "$25"); \
272 return __r0; \
273 }
274
275 __CALL_PAL_W1(cflush, unsigned long);
276 __CALL_PAL_R0(rdmces, unsigned long);
277 __CALL_PAL_R0(rdps, unsigned long);
278 __CALL_PAL_R0(rdusp, unsigned long);
279 __CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
280 __CALL_PAL_R0(whami, unsigned long);
281 __CALL_PAL_W2(wrent, void*, unsigned long);
282 __CALL_PAL_W1(wripir, unsigned long);
283 __CALL_PAL_W1(wrkgp, unsigned long);
284 __CALL_PAL_W1(wrmces, unsigned long);
285 __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
286 __CALL_PAL_W1(wrusp, unsigned long);
287 __CALL_PAL_W1(wrvptptr, unsigned long);
288
289 #define IPL_MIN 0
290 #define IPL_SW0 1
291 #define IPL_SW1 2
292 #define IPL_DEV0 3
293 #define IPL_DEV1 4
294 #define IPL_TIMER 5
295 #define IPL_PERF 6
296 #define IPL_POWERFAIL 6
297 #define IPL_MCHECK 7
298 #define IPL_MAX 7
299
300 #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
301 #undef IPL_MIN
302 #define IPL_MIN __min_ipl
303 extern int __min_ipl;
304 #endif
305
306 #define getipl() (rdps() & 7)
307 #define setipl(ipl) ((void) swpipl(ipl))
308
309 #define __cli() do { setipl(IPL_MAX); barrier(); } while(0)
310 #define __sti() do { barrier(); setipl(IPL_MIN); } while(0)
311 #define __save_flags(flags) ((flags) = rdps())
312 #define __save_and_cli(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
313 #define __save_and_sti(flags) do { barrier(); (flags) = swpipl(IPL_MIN); } while(0)
314 #define __restore_flags(flags) do { barrier(); setipl(flags); barrier(); } while(0)
315
316 #define local_irq_save(flags) __save_and_cli(flags)
317 #define local_irq_set(flags) __save_and_sti(flags)
318 #define local_irq_restore(flags) __restore_flags(flags)
319 #define local_irq_disable() __cli()
320 #define local_irq_enable() __sti()
321
322 #ifdef CONFIG_SMP
323
324 extern int global_irq_holder;
325
326 #define save_and_cli(flags) (save_flags(flags), cli())
327
328 extern void __global_cli(void);
329 extern void __global_sti(void);
330 extern unsigned long __global_save_flags(void);
331 extern void __global_restore_flags(unsigned long flags);
332
333 #define cli() __global_cli()
334 #define sti() __global_sti()
335 #define save_flags(flags) ((flags) = __global_save_flags())
336 #define restore_flags(flags) __global_restore_flags(flags)
337
338 #else /* CONFIG_SMP */
339
340 #define cli() __cli()
341 #define sti() __sti()
342 #define save_flags(flags) __save_flags(flags)
343 #define save_and_cli(flags) __save_and_cli(flags)
344 #define restore_flags(flags) __restore_flags(flags)
345
346 #endif /* CONFIG_SMP */
347
348 /*
349 * TB routines..
350 */
351 #define __tbi(nr,arg,arg1...) \
352 ({ \
353 register unsigned long __r16 __asm__("$16") = (nr); \
354 register unsigned long __r17 __asm__("$17"); arg; \
355 __asm__ __volatile__( \
356 "call_pal %3 #__tbi" \
357 :"=r" (__r16),"=r" (__r17) \
358 :"" (__r16),"i" (PAL_tbi) ,##arg1 \
359 :"$0", "$1", "$22", "$23", "$24", "$25"); \
360 })
361
362 #define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
363 #define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
364 #define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
365 #define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
366 #define tbiap() __tbi(-1, /* no second argument */)
367 #define tbia() __tbi(-2, /* no second argument */)
368
369 /*
370 * Atomic exchange.
371 * Since it can be used to implement critical sections
372 * it must clobber "memory" (also for interrupts in UP).
373 */
374
375 extern __inline__ unsigned long
376 __xchg_u32(volatile int *m, unsigned long val)
377 {
378 unsigned long dummy;
379
380 __asm__ __volatile__(
381 "1: ldl_l %0,%4\n"
382 " bis $31,%3,%1\n"
383 " stl_c %1,%2\n"
384 " beq %1,2f\n"
385 #ifdef CONFIG_SMP
386 " mb\n"
387 #endif
388 ".subsection 2\n"
389 "2: br 1b\n"
390 ".previous"
391 : "=&r" (val), "=&r" (dummy), "=m" (*m)
392 : "rI" (val), "m" (*m) : "memory");
393
394 return val;
395 }
396
397 extern __inline__ unsigned long
398 __xchg_u64(volatile long *m, unsigned long val)
399 {
400 unsigned long dummy;
401
402 __asm__ __volatile__(
403 "1: ldq_l %0,%4\n"
404 " bis $31,%3,%1\n"
405 " stq_c %1,%2\n"
406 " beq %1,2f\n"
407 #ifdef CONFIG_SMP
408 " mb\n"
409 #endif
410 ".subsection 2\n"
411 "2: br 1b\n"
412 ".previous"
413 : "=&r" (val), "=&r" (dummy), "=m" (*m)
414 : "rI" (val), "m" (*m) : "memory");
415
416 return val;
417 }
418
419 /* This function doesn't exist, so you'll get a linker error
420 if something tries to do an invalid xchg(). */
421 extern void __xchg_called_with_bad_pointer(void);
422
423 static __inline__ unsigned long
424 __xchg(volatile void *ptr, unsigned long x, int size)
425 {
426 switch (size) {
427 case 4:
428 return __xchg_u32(ptr, x);
429 case 8:
430 return __xchg_u64(ptr, x);
431 }
432 __xchg_called_with_bad_pointer();
433 return x;
434 }
435
436 #define xchg(ptr,x) \
437 ({ \
438 __typeof__(*(ptr)) _x_ = (x); \
439 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
440 })
441
442 #define tas(ptr) (xchg((ptr),1))
443
444
445 /*
446 * Atomic compare and exchange. Compare OLD with MEM, if identical,
447 * store NEW in MEM. Return the initial value in MEM. Success is
448 * indicated by comparing RETURN with OLD.
449 *
450 * The memory barrier should be placed in SMP only when we actually
451 * make the change. If we don't change anything (so if the returned
452 * prev is equal to old) then we aren't acquiring anything new and
453 * we don't need any memory barrier as far I can tell.
454 */
455
456 #define __HAVE_ARCH_CMPXCHG 1
457
458 extern __inline__ unsigned long
459 __cmpxchg_u32(volatile int *m, int old, int new)
460 {
461 unsigned long prev, cmp;
462
463 __asm__ __volatile__(
464 "1: ldl_l %0,%5\n"
465 " cmpeq %0,%3,%1\n"
466 " beq %1,2f\n"
467 " mov %4,%1\n"
468 " stl_c %1,%2\n"
469 " beq %1,3f\n"
470 #ifdef CONFIG_SMP
471 " mb\n"
472 #endif
473 "2:\n"
474 ".subsection 2\n"
475 "3: br 1b\n"
476 ".previous"
477 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
478 : "r"((long) old), "r"(new), "m"(*m) : "memory");
479
480 return prev;
481 }
482
483 extern __inline__ unsigned long
484 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
485 {
486 unsigned long prev, cmp;
487
488 __asm__ __volatile__(
489 "1: ldq_l %0,%5\n"
490 " cmpeq %0,%3,%1\n"
491 " beq %1,2f\n"
492 " mov %4,%1\n"
493 " stq_c %1,%2\n"
494 " beq %1,3f\n"
495 #ifdef CONFIG_SMP
496 " mb\n"
497 #endif
498 "2:\n"
499 ".subsection 2\n"
500 "3: br 1b\n"
501 ".previous"
502 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
503 : "r"((long) old), "r"(new), "m"(*m) : "memory");
504
505 return prev;
506 }
507
508 /* This function doesn't exist, so you'll get a linker error
509 if something tries to do an invalid cmpxchg(). */
510 extern void __cmpxchg_called_with_bad_pointer(void);
511
512 static __inline__ unsigned long
513 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
514 {
515 switch (size) {
516 case 4:
517 return __cmpxchg_u32(ptr, old, new);
518 case 8:
519 return __cmpxchg_u64(ptr, old, new);
520 }
521 __cmpxchg_called_with_bad_pointer();
522 return old;
523 }
524
525 #define cmpxchg(ptr,o,n) \
526 ({ \
527 __typeof__(*(ptr)) _o_ = (o); \
528 __typeof__(*(ptr)) _n_ = (n); \
529 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
530 (unsigned long)_n_, sizeof(*(ptr))); \
531 })
532
533 #endif /* __ASSEMBLY__ */
534
535 #endif
Cache object: e30292dde294e495f3e3909993cf3c09
|