FreeBSD/Linux Kernel Cross Reference
sys/vm/phys_pager.c
1 /*-
2 * Copyright (c) 2000 Peter Wemm
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/linker_set.h>
32 #include <sys/conf.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/proc.h>
36 #include <sys/mutex.h>
37 #include <sys/mman.h>
38 #include <sys/sysctl.h>
39
40 #include <vm/vm.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pager.h>
44
45 /* prevent concurrant creation races */
46 static int phys_pager_alloc_lock;
47 /* list of phys pager objects */
48 static struct pagerlst phys_pager_object_list;
49 /* protect access to phys_pager_object_list */
50 static struct mtx phys_pager_mtx;
51
52 static void
53 phys_pager_init(void)
54 {
55
56 TAILQ_INIT(&phys_pager_object_list);
57 mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
58 }
59
60 /*
61 * MPSAFE
62 */
63 static vm_object_t
64 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
65 vm_ooffset_t foff)
66 {
67 vm_object_t object;
68 vm_pindex_t pindex;
69
70 /*
71 * Offset should be page aligned.
72 */
73 if (foff & PAGE_MASK)
74 return (NULL);
75
76 pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
77
78 if (handle != NULL) {
79 mtx_lock(&Giant);
80 /*
81 * Lock to prevent object creation race condition.
82 */
83 while (phys_pager_alloc_lock) {
84 phys_pager_alloc_lock = -1;
85 tsleep(&phys_pager_alloc_lock, PVM, "phyalc", 0);
86 }
87 phys_pager_alloc_lock = 1;
88
89 /*
90 * Look up pager, creating as necessary.
91 */
92 object = vm_pager_object_lookup(&phys_pager_object_list, handle);
93 if (object == NULL) {
94 /*
95 * Allocate object and associate it with the pager.
96 */
97 object = vm_object_allocate(OBJT_PHYS, pindex);
98 object->handle = handle;
99 mtx_lock(&phys_pager_mtx);
100 TAILQ_INSERT_TAIL(&phys_pager_object_list, object,
101 pager_object_list);
102 mtx_unlock(&phys_pager_mtx);
103 } else {
104 /*
105 * Gain a reference to the object.
106 */
107 vm_object_reference(object);
108 if (pindex > object->size)
109 object->size = pindex;
110 }
111 if (phys_pager_alloc_lock == -1)
112 wakeup(&phys_pager_alloc_lock);
113 phys_pager_alloc_lock = 0;
114 mtx_unlock(&Giant);
115 } else {
116 object = vm_object_allocate(OBJT_PHYS, pindex);
117 }
118
119 return (object);
120 }
121
122 /*
123 * MPSAFE
124 */
125 static void
126 phys_pager_dealloc(vm_object_t object)
127 {
128
129 if (object->handle != NULL) {
130 mtx_lock(&phys_pager_mtx);
131 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
132 mtx_unlock(&phys_pager_mtx);
133 }
134 }
135
136 /*
137 * Fill as many pages as vm_fault has allocated for us.
138 */
139 static int
140 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
141 {
142 int i;
143
144 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
145 for (i = 0; i < count; i++) {
146 if (m[i]->valid == 0) {
147 if ((m[i]->flags & PG_ZERO) == 0)
148 pmap_zero_page(m[i]);
149 m[i]->valid = VM_PAGE_BITS_ALL;
150 }
151 KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
152 ("phys_pager_getpages: partially valid page %p", m[i]));
153 }
154 vm_page_lock_queues();
155 for (i = 0; i < count; i++) {
156 /* Switch off pv_entries */
157 vm_page_unmanage(m[i]);
158 m[i]->dirty = 0;
159 /* The requested page must remain busy, the others not. */
160 if (reqpage != i) {
161 vm_page_flag_clear(m[i], PG_BUSY);
162 m[i]->busy = 0;
163 }
164 }
165 vm_page_unlock_queues();
166 return (VM_PAGER_OK);
167 }
168
169 static void
170 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
171 int *rtvals)
172 {
173
174 panic("phys_pager_putpage called");
175 }
176
177 /*
178 * Implement a pretty aggressive clustered getpages strategy. Hint that
179 * everything in an entire 4MB window should be prefaulted at once.
180 *
181 * XXX 4MB (1024 slots per page table page) is convenient for x86,
182 * but may not be for other arches.
183 */
184 #ifndef PHYSCLUSTER
185 #define PHYSCLUSTER 1024
186 #endif
187 static boolean_t
188 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
189 int *after)
190 {
191 vm_pindex_t base, end;
192
193 base = pindex & (~(PHYSCLUSTER - 1));
194 end = base + (PHYSCLUSTER - 1);
195 if (before != NULL)
196 *before = pindex - base;
197 if (after != NULL)
198 *after = end - pindex;
199 return (TRUE);
200 }
201
202 struct pagerops physpagerops = {
203 .pgo_init = phys_pager_init,
204 .pgo_alloc = phys_pager_alloc,
205 .pgo_dealloc = phys_pager_dealloc,
206 .pgo_getpages = phys_pager_getpages,
207 .pgo_putpages = phys_pager_putpages,
208 .pgo_haspage = phys_pager_haspage,
209 };
Cache object: 63bd387d466c339c46398e3def76b64b
|