1 /*-
2 * Copyright (c) 2004 Marcel Moolenaar
3 * Copyright (c) 2001 Doug Rabson
4 * Copyright (c) 2016 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Konstantin Belousov
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/efi.h>
37 #include <sys/kernel.h>
38 #include <sys/linker.h>
39 #include <sys/lock.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/clock.h>
43 #include <sys/proc.h>
44 #include <sys/rwlock.h>
45 #include <sys/sched.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 #include <sys/vmmeter.h>
49 #include <isa/rtc.h>
50 #include <machine/fpu.h>
51 #include <machine/efi.h>
52 #include <machine/metadata.h>
53 #include <machine/md_var.h>
54 #include <machine/smp.h>
55 #include <machine/vmparam.h>
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62
63 static pml4_entry_t *efi_pml4;
64 static vm_object_t obj_1t1_pt;
65 static vm_page_t efi_pml4_page;
66 static vm_pindex_t efi_1t1_idx;
67
68 void
69 efi_destroy_1t1_map(void)
70 {
71 vm_page_t m;
72
73 if (obj_1t1_pt != NULL) {
74 VM_OBJECT_RLOCK(obj_1t1_pt);
75 TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
76 m->wire_count = 0;
77 atomic_subtract_int(&vm_cnt.v_wire_count,
78 obj_1t1_pt->resident_page_count);
79 VM_OBJECT_RUNLOCK(obj_1t1_pt);
80 vm_object_deallocate(obj_1t1_pt);
81 }
82
83 obj_1t1_pt = NULL;
84 efi_pml4 = NULL;
85 efi_pml4_page = NULL;
86 }
87
88 /*
89 * Map a physical address from EFI runtime space into KVA space. Returns 0 to
90 * indicate a failed mapping so that the caller may handle error.
91 */
92 vm_offset_t
93 efi_phys_to_kva(vm_paddr_t paddr)
94 {
95
96 if (paddr >= dmaplimit)
97 return (0);
98 return (PHYS_TO_DMAP(paddr));
99 }
100
101 static vm_page_t
102 efi_1t1_page(void)
103 {
104
105 return (vm_page_grab(obj_1t1_pt, efi_1t1_idx++, VM_ALLOC_NOBUSY |
106 VM_ALLOC_WIRED | VM_ALLOC_ZERO));
107 }
108
109 static pt_entry_t *
110 efi_1t1_pte(vm_offset_t va)
111 {
112 pml4_entry_t *pml4e;
113 pdp_entry_t *pdpe;
114 pd_entry_t *pde;
115 pt_entry_t *pte;
116 vm_page_t m;
117 vm_pindex_t pml4_idx, pdp_idx, pd_idx;
118 vm_paddr_t mphys;
119
120 pml4_idx = pmap_pml4e_index(va);
121 pml4e = &efi_pml4[pml4_idx];
122 if (*pml4e == 0) {
123 m = efi_1t1_page();
124 mphys = VM_PAGE_TO_PHYS(m);
125 *pml4e = mphys | X86_PG_RW | X86_PG_V;
126 } else {
127 mphys = *pml4e & ~PAGE_MASK;
128 }
129
130 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys);
131 pdp_idx = pmap_pdpe_index(va);
132 pdpe += pdp_idx;
133 if (*pdpe == 0) {
134 m = efi_1t1_page();
135 mphys = VM_PAGE_TO_PHYS(m);
136 *pdpe = mphys | X86_PG_RW | X86_PG_V;
137 } else {
138 mphys = *pdpe & ~PAGE_MASK;
139 }
140
141 pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
142 pd_idx = pmap_pde_index(va);
143 pde += pd_idx;
144 if (*pde == 0) {
145 m = efi_1t1_page();
146 mphys = VM_PAGE_TO_PHYS(m);
147 *pde = mphys | X86_PG_RW | X86_PG_V;
148 } else {
149 mphys = *pde & ~PAGE_MASK;
150 }
151
152 pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
153 pte += pmap_pte_index(va);
154 KASSERT(*pte == 0, ("va %#jx *pt %#jx", va, *pte));
155
156 return (pte);
157 }
158
159 bool
160 efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
161 {
162 struct efi_md *p;
163 pt_entry_t *pte;
164 vm_offset_t va;
165 uint64_t idx;
166 int bits, i, mode;
167
168 obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, ptoa(1 +
169 NPML4EPG + NPML4EPG * NPDPEPG + NPML4EPG * NPDPEPG * NPDEPG),
170 VM_PROT_ALL, 0, NULL);
171 efi_1t1_idx = 0;
172 VM_OBJECT_WLOCK(obj_1t1_pt);
173 efi_pml4_page = efi_1t1_page();
174 VM_OBJECT_WUNLOCK(obj_1t1_pt);
175 efi_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_pml4_page));
176 pmap_pinit_pml4(efi_pml4_page);
177
178 for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
179 descsz)) {
180 if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
181 continue;
182 if (p->md_virt != NULL && (uint64_t)p->md_virt != p->md_phys) {
183 if (bootverbose)
184 printf("EFI Runtime entry %d is mapped\n", i);
185 goto fail;
186 }
187 if ((p->md_phys & EFI_PAGE_MASK) != 0) {
188 if (bootverbose)
189 printf("EFI Runtime entry %d is not aligned\n",
190 i);
191 goto fail;
192 }
193 if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
194 p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
195 VM_MAXUSER_ADDRESS) {
196 printf("EFI Runtime entry %d is not in mappable for RT:"
197 "base %#016jx %#jx pages\n",
198 i, (uintmax_t)p->md_phys,
199 (uintmax_t)p->md_pages);
200 goto fail;
201 }
202 if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
203 mode = VM_MEMATTR_WRITE_BACK;
204 else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
205 mode = VM_MEMATTR_WRITE_THROUGH;
206 else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
207 mode = VM_MEMATTR_WRITE_COMBINING;
208 else if ((p->md_attr & EFI_MD_ATTR_WP) != 0)
209 mode = VM_MEMATTR_WRITE_PROTECTED;
210 else if ((p->md_attr & EFI_MD_ATTR_UC) != 0)
211 mode = VM_MEMATTR_UNCACHEABLE;
212 else {
213 if (bootverbose)
214 printf("EFI Runtime entry %d mapping "
215 "attributes unsupported\n", i);
216 mode = VM_MEMATTR_UNCACHEABLE;
217 }
218 bits = pmap_cache_bits(kernel_pmap, mode, FALSE) | X86_PG_RW |
219 X86_PG_V;
220 VM_OBJECT_WLOCK(obj_1t1_pt);
221 for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++,
222 va += PAGE_SIZE) {
223 pte = efi_1t1_pte(va);
224 pte_store(pte, va | bits);
225 }
226 VM_OBJECT_WUNLOCK(obj_1t1_pt);
227 }
228
229 return (true);
230
231 fail:
232 efi_destroy_1t1_map();
233 return (false);
234 }
235
236 /*
237 * Create an environment for the EFI runtime code call. The most
238 * important part is creating the required 1:1 physical->virtual
239 * mappings for the runtime segments. To do that, we manually create
240 * page table which unmap userspace but gives correct kernel mapping.
241 * The 1:1 mappings for runtime segments usually occupy low 4G of the
242 * physical address map.
243 *
244 * The 1:1 mappings were chosen over the SetVirtualAddressMap() EFI RT
245 * service, because there are some BIOSes which fail to correctly
246 * relocate itself on the call, requiring both 1:1 and virtual
247 * mapping. As result, we must provide 1:1 mapping anyway, so no
248 * reason to bother with the virtual map, and no need to add a
249 * complexity into loader.
250 *
251 * The fpu_kern_enter() call allows firmware to use FPU, as mandated
252 * by the specification. In particular, CR0.TS bit is cleared. Also
253 * it enters critical section, giving us neccessary protection against
254 * context switch.
255 *
256 * There is no need to disable interrupts around the change of %cr3,
257 * the kernel mappings are correct, while we only grabbed the
258 * userspace portion of VA. Interrupts handlers must not access
259 * userspace. Having interrupts enabled fixes the issue with
260 * firmware/SMM long operation, which would negatively affect IPIs,
261 * esp. TLB shootdown requests.
262 */
263 int
264 efi_arch_enter(void)
265 {
266 pmap_t curpmap;
267
268 curpmap = PCPU_GET(curpmap);
269 PMAP_LOCK_ASSERT(curpmap, MA_OWNED);
270
271 /*
272 * IPI TLB shootdown handler invltlb_pcid_handler() reloads
273 * %cr3 from the curpmap->pm_cr3, which would disable runtime
274 * segments mappings. Block the handler's action by setting
275 * curpmap to impossible value. See also comment in
276 * pmap.c:pmap_activate_sw().
277 */
278 if (pmap_pcid_enabled && !invpcid_works)
279 PCPU_SET(curpmap, NULL);
280
281 load_cr3(VM_PAGE_TO_PHYS(efi_pml4_page) | (pmap_pcid_enabled ?
282 curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
283 /*
284 * If PCID is enabled, the clear CR3_PCID_SAVE bit in the loaded %cr3
285 * causes TLB invalidation.
286 */
287 if (!pmap_pcid_enabled)
288 invltlb();
289 return (0);
290 }
291
292 void
293 efi_arch_leave(void)
294 {
295 pmap_t curpmap;
296
297 curpmap = &curproc->p_vmspace->vm_pmap;
298 if (pmap_pcid_enabled && !invpcid_works)
299 PCPU_SET(curpmap, curpmap);
300 load_cr3(curpmap->pm_cr3 | (pmap_pcid_enabled ?
301 curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
302 if (!pmap_pcid_enabled)
303 invltlb();
304 }
305
306 /* XXX debug stuff */
307 static int
308 efi_time_sysctl_handler(SYSCTL_HANDLER_ARGS)
309 {
310 struct efi_tm tm;
311 int error, val;
312
313 val = 0;
314 error = sysctl_handle_int(oidp, &val, 0, req);
315 if (error != 0 || req->newptr == NULL)
316 return (error);
317 error = efi_get_time(&tm);
318 if (error == 0) {
319 uprintf("EFI reports: Year %d Month %d Day %d Hour %d Min %d "
320 "Sec %d\n", tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
321 tm.tm_min, tm.tm_sec);
322 }
323 return (error);
324 }
325
326 SYSCTL_PROC(_debug, OID_AUTO, efi_time, CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
327 efi_time_sysctl_handler, "I", "");
Cache object: bf711461c3e4a3b7d329188640e5679a
|