1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PAGE_H
10 #define _ASM_PAGE_H
11
12 #include <linux/config.h>
13
14 /* PAGE_SHIFT determines the page size */
15 #define PAGE_SHIFT 12
16 #define PAGE_SIZE (1UL << PAGE_SHIFT)
17 #define PAGE_MASK (~(PAGE_SIZE-1))
18
19 #ifdef __KERNEL__
20
21 #ifndef __ASSEMBLY__
22
23 #include <asm/cacheflush.h>
24
25 #define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0)
26 #define PAGE_BUG(page) do { BUG(); } while (0)
27
28 extern void (*_clear_page)(void * page);
29 extern void (*_copy_page)(void * to, void * from);
30
31 #define clear_page(page) _clear_page(page)
32 #define copy_page(to, from) _copy_page(to, from)
33
34 extern unsigned long shm_align_mask;
35
36 static inline unsigned long pages_do_alias(unsigned long addr1,
37 unsigned long addr2)
38 {
39 return (addr1 ^ addr2) & shm_align_mask;
40 }
41
42 static inline void clear_user_page(void *page, unsigned long vaddr)
43 {
44 unsigned long kaddr = (unsigned long) page;
45
46 clear_page(page);
47 if (pages_do_alias(kaddr, vaddr))
48 flush_data_cache_page(kaddr);
49 }
50
51 static inline void copy_user_page(void * to, void * from, unsigned long vaddr)
52 {
53 unsigned long kto = (unsigned long) to;
54
55 copy_page(to, from);
56 if (pages_do_alias(kto, vaddr))
57 flush_data_cache_page(kto);
58 }
59
60 /*
61 * These are used to make use of C type-checking..
62 */
63 typedef struct { unsigned long pte; } pte_t;
64 typedef struct { unsigned long pmd; } pmd_t;
65 typedef struct { unsigned long pgd; } pgd_t;
66 typedef struct { unsigned long pgprot; } pgprot_t;
67
68 #define pte_val(x) ((x).pte)
69 #define pmd_val(x) ((x).pmd)
70 #define pgd_val(x) ((x).pgd)
71 #define pgprot_val(x) ((x).pgprot)
72
73 #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
74
75 #define __pte(x) ((pte_t) { (x) } )
76 #define __pmd(x) ((pmd_t) { (x) } )
77 #define __pgd(x) ((pgd_t) { (x) } )
78 #define __pgprot(x) ((pgprot_t) { (x) } )
79
80 /* Pure 2^n version of get_order */
81 extern __inline__ int get_order(unsigned long size)
82 {
83 int order;
84
85 size = (size-1) >> (PAGE_SHIFT-1);
86 order = -1;
87 do {
88 size >>= 1;
89 order++;
90 } while (size);
91 return order;
92 }
93
94 #endif /* !__ASSEMBLY__ */
95
96 /* to align the pointer to the (next) page boundary */
97 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
98
99 /*
100 * This handles the memory map.
101 * We handle pages at KSEG0 for kernels with upto 512mb of memory,
102 * at XKPHYS with a suitable caching mode for kernels with more than that.
103 */
104 #if defined(CONFIG_SGI_IP22) || defined(CONFIG_MIPS_ATLAS) || \
105 defined(CONFIG_MIPS_MALTA) || defined(CONFIG_MIPS_SEAD) || \
106 defined(CONFIG_DECSTATION)
107 #define PAGE_OFFSET 0xffffffff80000000UL
108 #define UNCAC_BASE 0xffffffffa0000000UL
109 #endif
110 #if defined(CONFIG_SGI_IP27)
111 #define PAGE_OFFSET 0xa800000000000000UL
112 #define UNCAC_BASE 0x9600000000000000UL
113 #endif
114 #if defined(CONFIG_SIBYTE_SB1xxx_SOC)
115 #define PAGE_OFFSET 0xa800000000000000UL
116 #endif
117
118 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
119 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
120 #ifndef CONFIG_DISCONTIGMEM
121 #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
122 #define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
123 #endif
124
125 #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
126 #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
127
128 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
129 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
130
131 #endif /* defined (__KERNEL__) */
132
133 #endif /* _ASM_PAGE_H */
Cache object: fb915987e92759f124c3cf4087b52c5f
|