1 /*
2 *
3 * Copyright (c) 2004 Christian Limpach.
4 * Copyright (c) 2004,2005 Kip Macy
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Christian Limpach.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * $FreeBSD: releng/8.0/sys/i386/include/xen/xenpmap.h 190627 2009-04-01 17:06:28Z dfr $
34 */
35
36 #ifndef _XEN_XENPMAP_H_
37 #define _XEN_XENPMAP_H_
38 void _xen_queue_pt_update(vm_paddr_t, vm_paddr_t, char *, int);
39 void xen_pt_switch(vm_paddr_t);
40 void xen_set_ldt(vm_paddr_t, unsigned long);
41 void xen_pgdpt_pin(vm_paddr_t);
42 void xen_pgd_pin(vm_paddr_t);
43 void xen_pgd_unpin(vm_paddr_t);
44 void xen_pt_pin(vm_paddr_t);
45 void xen_pt_unpin(vm_paddr_t);
46 void xen_flush_queue(void);
47 void pmap_ref(pt_entry_t *pte, vm_paddr_t ma);
48 void pmap_suspend(void);
49 void pmap_resume(void);
50 void xen_check_queue(void);
51
52 #ifdef INVARIANTS
53 #define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), __FILE__, __LINE__)
54 #else
55 #define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), NULL, 0)
56 #endif
57
58
59 #include <sys/param.h>
60 #include <sys/pcpu.h>
61
62 #ifdef PMAP_DEBUG
63 #define PMAP_REF pmap_ref
64 #define PMAP_DEC_REF_PAGE pmap_dec_ref_page
65 #define PMAP_MARK_PRIV pmap_mark_privileged
66 #define PMAP_MARK_UNPRIV pmap_mark_unprivileged
67 #else
68 #define PMAP_MARK_PRIV(a)
69 #define PMAP_MARK_UNPRIV(a)
70 #define PMAP_REF(a, b)
71 #define PMAP_DEC_REF_PAGE(a)
72 #endif
73
74 #define ALWAYS_SYNC 0
75
76 #ifdef PT_DEBUG
77 #define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__)
78 #else
79 #define PT_LOG()
80 #endif
81
82 #define INVALID_P2M_ENTRY (~0UL)
83
84 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
85
86 #define SH_PD_SET_VA 1
87 #define SH_PD_SET_VA_MA 2
88 #define SH_PD_SET_VA_CLEAR 3
89
90 struct pmap;
91 void pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type);
92 #ifdef notyet
93 static vm_paddr_t
94 vptetomachpte(vm_paddr_t *pte)
95 {
96 vm_offset_t offset, ppte;
97 vm_paddr_t pgoffset, retval, *pdir_shadow_ptr;
98 int pgindex;
99
100 ppte = (vm_offset_t)pte;
101 pgoffset = (ppte & PAGE_MASK);
102 offset = ppte - (vm_offset_t)PTmap;
103 pgindex = ppte >> PDRSHIFT;
104
105 pdir_shadow_ptr = (vm_paddr_t *)PCPU_GET(pdir_shadow);
106 retval = (pdir_shadow_ptr[pgindex] & ~PAGE_MASK) + pgoffset;
107 return (retval);
108 }
109 #endif
110 #define PT_GET(_ptp) \
111 (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : (0))
112
113 #ifdef WRITABLE_PAGETABLES
114
115 #define PT_SET_VA(_ptp,_npte,sync) do { \
116 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
117 PT_LOG(); \
118 *(_ptp) = xpmap_ptom((_npte)); \
119 } while (/*CONSTCOND*/0)
120 #define PT_SET_VA_MA(_ptp,_npte,sync) do { \
121 PMAP_REF((_ptp), (_npte)); \
122 PT_LOG(); \
123 *(_ptp) = (_npte); \
124 } while (/*CONSTCOND*/0)
125 #define PT_CLEAR_VA(_ptp, sync) do { \
126 PMAP_REF((pt_entry_t *)(_ptp), 0); \
127 PT_LOG(); \
128 *(_ptp) = 0; \
129 } while (/*CONSTCOND*/0)
130
131 #define PD_SET_VA(_pmap, _ptp, _npte, sync) do { \
132 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
133 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA); \
134 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
135 } while (/*CONSTCOND*/0)
136 #define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do { \
137 PMAP_REF((_ptp), (_npte)); \
138 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA); \
139 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
140 } while (/*CONSTCOND*/0)
141 #define PD_CLEAR_VA(_pmap, _ptp, sync) do { \
142 PMAP_REF((pt_entry_t *)(_ptp), 0); \
143 pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR); \
144 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
145 } while (/*CONSTCOND*/0)
146
147 #else /* !WRITABLE_PAGETABLES */
148
149 #define PT_SET_VA(_ptp,_npte,sync) do { \
150 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
151 xen_queue_pt_update(vtomach(_ptp), \
152 xpmap_ptom(_npte)); \
153 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
154 } while (/*CONSTCOND*/0)
155 #define PT_SET_VA_MA(_ptp,_npte,sync) do { \
156 PMAP_REF((_ptp), (_npte)); \
157 xen_queue_pt_update(vtomach(_ptp), _npte); \
158 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
159 } while (/*CONSTCOND*/0)
160 #define PT_CLEAR_VA(_ptp, sync) do { \
161 PMAP_REF((pt_entry_t *)(_ptp), 0); \
162 xen_queue_pt_update(vtomach(_ptp), 0); \
163 if (sync || ALWAYS_SYNC) \
164 xen_flush_queue(); \
165 } while (/*CONSTCOND*/0)
166
167 #define PD_SET_VA(_pmap, _ptepindex,_npte,sync) do { \
168 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
169 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA); \
170 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
171 } while (/*CONSTCOND*/0)
172 #define PD_SET_VA_MA(_pmap, _ptepindex,_npte,sync) do { \
173 PMAP_REF((_ptp), (_npte)); \
174 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA_MA); \
175 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
176 } while (/*CONSTCOND*/0)
177 #define PD_CLEAR_VA(_pmap, _ptepindex, sync) do { \
178 PMAP_REF((pt_entry_t *)(_ptp), 0); \
179 pd_set((_pmap),(_ptepindex), 0, SH_PD_SET_VA_CLEAR); \
180 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
181 } while (/*CONSTCOND*/0)
182
183 #endif
184
185 #define PT_SET_MA(_va, _ma) \
186 do { \
187 PANIC_IF(HYPERVISOR_update_va_mapping(((unsigned long)(_va)),\
188 (_ma), \
189 UVMF_INVLPG| UVMF_ALL) < 0); \
190 } while (/*CONSTCOND*/0)
191
192 #define PT_UPDATES_FLUSH() do { \
193 xen_flush_queue(); \
194 } while (/*CONSTCOND*/0)
195
196 static __inline vm_paddr_t
197 xpmap_mtop(vm_paddr_t mpa)
198 {
199 vm_paddr_t tmp = (mpa & PG_FRAME);
200
201 return machtophys(tmp) | (mpa & ~PG_FRAME);
202 }
203
204 static __inline vm_paddr_t
205 xpmap_ptom(vm_paddr_t ppa)
206 {
207 vm_paddr_t tmp = (ppa & PG_FRAME);
208
209 return phystomach(tmp) | (ppa & ~PG_FRAME);
210 }
211
212 static __inline void
213 set_phys_to_machine(unsigned long pfn, unsigned long mfn)
214 {
215 #ifdef notyet
216 PANIC_IF(max_mapnr && pfn >= max_mapnr);
217 #endif
218 if (xen_feature(XENFEAT_auto_translated_physmap)) {
219 #ifdef notyet
220 PANIC_IF((pfn != mfn && mfn != INVALID_P2M_ENTRY));
221 #endif
222 return;
223 }
224 xen_phys_machine[pfn] = mfn;
225 }
226
227 static __inline int
228 phys_to_machine_mapping_valid(unsigned long pfn)
229 {
230 return xen_phys_machine[pfn] != INVALID_P2M_ENTRY;
231 }
232
233
234 #endif /* _XEN_XENPMAP_H_ */
Cache object: 0eb5dbb85769fbfffb653fe6c3184fdf
|