FreeBSD/Linux Kernel Cross Reference
sys/alphapc/mmu.c
1 #include "u.h"
2 #include "../port/lib.h"
3 #include "mem.h"
4 #include "dat.h"
5 #include "fns.h"
6 #include "/sys/src/boot/alphapc/conf.h"
7
8 static uvlong origlvl1; /* physical address */
9 static uvlong klvl2; /* physical, as created by boot loader */
10 static uchar *nextio; /* next virtual address to be allocated by kmapv */
11 extern Bootconf *bootconf;
12
13 #define LVL2OFF(v) ((((long)(v))>>(2*PGSHIFT-3))&(PTE2PG-1))
14 #define LVL3OFF(v) ((((long)(v))>>(PGSHIFT))&(PTE2PG-1))
15
16 static void
17 setptb(ulong pa)
18 {
19 m->ptbr = (uvlong)pa>>PGSHIFT;
20 swpctx(m);
21 }
22
23 void
24 mmuinit(void)
25 {
26 uvlong *plvl2;
27
28 /* set PCB to new one in mach structure before stomping on old one */
29 m->usp = 0;
30 m->fen = 1;
31 m->ptbr = bootconf->pcb->ptbr;
32 origlvl1 = (m->ptbr << PGSHIFT);
33 setpcb(m);
34
35 plvl2 = (uvlong*) (KZERO|origlvl1|(BY2PG-8));
36 klvl2 = (*plvl2 >> 32)<<PGSHIFT;
37
38 nextio = (uchar*) (KZERO|bootconf->maxphys);
39 }
40
41 static void
42 mmuptefree(Proc* proc)
43 {
44 uvlong *lvl2;
45 Page **last, *page;
46
47 if(proc->mmutop && proc->mmuused){
48 lvl2 = (uvlong*)proc->mmulvl2->va;
49 last = &proc->mmuused;
50 for(page = *last; page; page = page->next){
51 lvl2[page->daddr] = 0;
52 last = &page->next;
53 }
54 *last = proc->mmufree;
55 proc->mmufree = proc->mmuused;
56 proc->mmuused = 0;
57 }
58 }
59
60 void
61 mmuswitch(Proc *proc)
62 {
63 if(proc->newtlb){
64 mmuptefree(proc);
65 proc->newtlb = 0;
66 }
67
68 /* tell processor about new page table and flush cached entries */
69 if(proc->mmutop == 0)
70 setptb(origlvl1);
71 else
72 setptb(proc->mmutop->pa);
73 tlbflush(-1, 0);
74 icflush();
75 }
76
77 /* point to protoype page map */
78 void
79 mmupark(void)
80 {
81 setptb(origlvl1);
82 icflush();
83 }
84
85 /*
86 * give all page table pages back to the free pool. This is called in sched()
87 * with palloc locked.
88 */
89 void
90 mmurelease(Proc *proc)
91 {
92 Page *page, *next;
93
94 mmupark();
95 mmuptefree(proc);
96 proc->mmuused = 0;
97 if(proc->mmutop) {
98 proc->mmutop->next = proc->mmufree;
99 proc->mmufree = proc->mmutop;
100 proc->mmutop = 0;
101 }
102 if(proc->mmulvl2) {
103 proc->mmulvl2->next = proc->mmufree;
104 proc->mmufree = proc->mmulvl2;
105 proc->mmulvl2 = 0;
106 }
107 for(page = proc->mmufree; page; page = next){
108 next = page->next;
109 if(--page->ref)
110 panic("mmurelease: page->ref %d\n", page->ref);
111 pagechainhead(page);
112 }
113 if(proc->mmufree && palloc.r.p)
114 wakeup(&palloc.r);
115 proc->mmufree = 0;
116 }
117
118 void
119 mmunewtop(void)
120 {
121 Page *top, *lvl2;
122 uvlong *ppte;
123
124 top = newpage(1, 0, 0);
125 top->va = VA(kmap(top));
126 lvl2 = newpage(1, 0, 0);
127 lvl2->va = VA(kmap(lvl2));
128
129 ppte = (uvlong *)top->va;
130 ppte[0] = PTEPFN(lvl2->pa) | PTEKVALID;
131 ppte[PTE2PG-2] = PTEPFN(top->pa) | PTEKVALID;
132 ppte[PTE2PG-1] = PTEPFN(klvl2) | PTEKVALID;
133
134 up->mmutop = top;
135 up->mmulvl2 = lvl2;
136 setptb(top->pa);
137 tlbflush(-1, 0);
138 icflush();
139 }
140
141 void
142 putmmu(ulong va, ulong pa, Page *pg)
143 {
144 int lvl2off;
145 uvlong *lvl2, *pt;
146 int s;
147
148 if(up->mmutop == 0)
149 mmunewtop();
150
151 lvl2 = (uvlong*)up->mmulvl2->va;
152 lvl2off = LVL2OFF(va);
153
154 /*
155 * if bottom level page table missing, allocate one
156 * and point the top level page at it.
157 */
158 s = splhi();
159 if(lvl2[lvl2off] == 0){
160 if(up->mmufree == 0){
161 spllo();
162 pg = newpage(1, 0, 0);
163 pg->va = VA(kmap(pg));
164 splhi();
165 } else {
166 pg = up->mmufree;
167 up->mmufree = pg->next;
168 memset((void*)pg->va, 0, BY2PG);
169 }
170 lvl2[lvl2off] = PTEPFN(pg->pa) | PTEVALID;
171 pg->daddr = lvl2off;
172 pg->next = up->mmuused;
173 up->mmuused = pg;
174 }
175
176 /*
177 * put in new mmu entry
178 */
179 pt = (uvlong*)(((lvl2[lvl2off] >> 32)<<PGSHIFT)|KZERO);
180 pt[LVL3OFF(va)] = FIXPTE(pa);
181
182 /* flush cached mmu entries */
183 tlbflush(3, va);
184 icflush();
185 splx(s);
186 }
187
188 void *
189 kmapv(uvlong pa, int size)
190 {
191 void *va, *new;
192 int lvl2off, i, npage, offset;
193 uvlong *lvl2, *pt;
194
195 offset = pa&(BY2PG-1);
196 npage = ((size+offset+BY2PG-1)>>PGSHIFT);
197
198 va = nextio+offset;
199 lvl2 = (uvlong*)(KZERO|klvl2);
200 for (i = 0; i < npage; i++) {
201 lvl2off = LVL2OFF(nextio);
202 if (lvl2[lvl2off] == 0) {
203 new = xspanalloc(BY2PG, BY2PG, 0);
204 memset(new, 0, BY2PG);
205 lvl2[lvl2off] = PTEPFN(PADDR(new)) | PTEKVALID | PTEASM;
206 }
207 pt = (uvlong*)(((lvl2[lvl2off] >> 32)<<PGSHIFT)|KZERO);
208 pt[LVL3OFF(nextio)] = PTEPFN(pa) | PTEKVALID | PTEASM;
209 nextio += BY2PG;
210 pa += BY2PG;
211 }
212 return va;
213 }
214
215 void
216 flushmmu(void)
217 {
218 int s;
219
220 s = splhi();
221 up->newtlb = 1;
222 mmuswitch(up);
223 splx(s);
224
225 }
226
227 void*
228 vmap(ulong pa, int size)
229 {
230 void *va;
231
232 /*
233 * Viability hack. Only for PCI framebuffers.
234 */
235 if(pa == 0)
236 return 0;
237 va = kmapv(((uvlong)0x88<<32LL)|pa, size);
238 if(va == nil)
239 return 0;
240 return (void*)va;
241 }
242
243 void
244 vunmap(void*, int)
245 {
246 print("vunmap: virtual mapping not freed\n");
247 }
248
249 void
250 mmudump(void)
251 {
252 Page *top, *lvl2;
253
254 iprint("ptbr %lux up %#p\n", (ulong)m->ptbr, up);
255 if(up) {
256 top = up->mmutop;
257 if(top != nil)
258 iprint("top %lux top[N-1] %llux\n", top->va, ((uvlong *)top->va)[PTE2PG-1]);
259 lvl2 = up->mmulvl2;
260 if(lvl2 != nil)
261 iprint("lvl2 %lux\n", lvl2->va);
262 }
263 }
264
265 ulong
266 upaalloc(int, int)
267 {
268 return 0;
269 }
270
271 void
272 upafree(ulong, int)
273 {
274 }
275
276 void
277 checkmmu(ulong, ulong)
278 {
279 }
280
281 void
282 countpagerefs(ulong*, int)
283 {
284 }
285
286 /*
287 * Return the number of bytes that can be accessed via KADDR(pa).
288 * If pa is not a valid argument to KADDR, return 0.
289 */
290 ulong
291 cankaddr(ulong pa)
292 {
293 ulong kzero;
294
295 kzero = -KZERO;
296 if(pa >= kzero)
297 return 0;
298 return kzero - pa;
299 }
Cache object: 015f6ba60327d3591b80b283ce482cf2
|