FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_page_i.h
1 /* $NetBSD: uvm_page_i.h,v 1.22 2004/05/12 20:09:52 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
42 * from: Id: uvm_page_i.h,v 1.1.2.7 1998/01/05 00:26:02 chuck Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_page_i.h
71 */
72
73 /*
74 * inline functions [maybe]
75 */
76
77 #if defined(UVM_PAGE_INLINE) || defined(UVM_PAGE)
78
79 #ifndef _UVM_UVM_PAGE_I_H_
80 #define _UVM_UVM_PAGE_I_H_
81
82 /*
83 * uvm_lock_fpageq: lock the free page queue
84 *
85 * => free page queue can be accessed in interrupt context, so this
86 * blocks all interrupts that can cause memory allocation, and
87 * returns the previous interrupt level.
88 */
89
90 PAGE_INLINE int
91 uvm_lock_fpageq()
92 {
93 int s;
94
95 s = splvm();
96 simple_lock(&uvm.fpageqlock);
97 return (s);
98 }
99
100 /*
101 * uvm_unlock_fpageq: unlock the free page queue
102 *
103 * => caller must supply interrupt level returned by uvm_lock_fpageq()
104 * so that it may be restored.
105 */
106
107 PAGE_INLINE void
108 uvm_unlock_fpageq(s)
109 int s;
110 {
111
112 simple_unlock(&uvm.fpageqlock);
113 splx(s);
114 }
115
116 /*
117 * uvm_pagelookup: look up a page
118 *
119 * => caller should lock object to keep someone from pulling the page
120 * out from under it
121 */
122
123 struct vm_page *
124 uvm_pagelookup(obj, off)
125 struct uvm_object *obj;
126 voff_t off;
127 {
128 struct vm_page *pg;
129 struct pglist *buck;
130
131 buck = &uvm.page_hash[uvm_pagehash(obj,off)];
132 simple_lock(&uvm.hashlock);
133 TAILQ_FOREACH(pg, buck, hashq) {
134 if (pg->uobject == obj && pg->offset == off) {
135 break;
136 }
137 }
138 simple_unlock(&uvm.hashlock);
139 KASSERT(pg == NULL || obj->uo_npages != 0);
140 KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
141 (pg->flags & PG_BUSY) != 0);
142 return(pg);
143 }
144
145 /*
146 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
147 *
148 * => caller must lock page queues
149 */
150
151 PAGE_INLINE void
152 uvm_pagewire(pg)
153 struct vm_page *pg;
154 {
155 UVM_LOCK_ASSERT_PAGEQ();
156 if (pg->wire_count == 0) {
157 uvm_pagedequeue(pg);
158 uvmexp.wired++;
159 }
160 pg->wire_count++;
161 }
162
163 /*
164 * uvm_pageunwire: unwire the page.
165 *
166 * => activate if wire count goes to zero.
167 * => caller must lock page queues
168 */
169
170 PAGE_INLINE void
171 uvm_pageunwire(pg)
172 struct vm_page *pg;
173 {
174 UVM_LOCK_ASSERT_PAGEQ();
175 pg->wire_count--;
176 if (pg->wire_count == 0) {
177 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
178 uvmexp.active++;
179 pg->pqflags |= PQ_ACTIVE;
180 uvmexp.wired--;
181 }
182 }
183
184 /*
185 * uvm_pagedeactivate: deactivate page
186 *
187 * => caller must lock page queues
188 * => caller must check to make sure page is not wired
189 * => object that page belongs to must be locked (so we can adjust pg->flags)
190 * => caller must clear the reference on the page before calling
191 */
192
193 PAGE_INLINE void
194 uvm_pagedeactivate(pg)
195 struct vm_page *pg;
196 {
197 UVM_LOCK_ASSERT_PAGEQ();
198 if (pg->pqflags & PQ_ACTIVE) {
199 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
200 pg->pqflags &= ~PQ_ACTIVE;
201 uvmexp.active--;
202 }
203 if ((pg->pqflags & PQ_INACTIVE) == 0) {
204 KASSERT(pg->wire_count == 0);
205 TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq);
206 pg->pqflags |= PQ_INACTIVE;
207 uvmexp.inactive++;
208 }
209 }
210
211 /*
212 * uvm_pageactivate: activate page
213 *
214 * => caller must lock page queues
215 */
216
217 PAGE_INLINE void
218 uvm_pageactivate(pg)
219 struct vm_page *pg;
220 {
221 UVM_LOCK_ASSERT_PAGEQ();
222 uvm_pagedequeue(pg);
223 if (pg->wire_count == 0) {
224 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
225 pg->pqflags |= PQ_ACTIVE;
226 uvmexp.active++;
227 }
228 }
229
230 /*
231 * uvm_pagedequeue: remove a page from any paging queue
232 */
233
234 PAGE_INLINE void
235 uvm_pagedequeue(pg)
236 struct vm_page *pg;
237 {
238 if (pg->pqflags & PQ_ACTIVE) {
239 UVM_LOCK_ASSERT_PAGEQ();
240 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
241 pg->pqflags &= ~PQ_ACTIVE;
242 uvmexp.active--;
243 } else if (pg->pqflags & PQ_INACTIVE) {
244 UVM_LOCK_ASSERT_PAGEQ();
245 TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
246 pg->pqflags &= ~PQ_INACTIVE;
247 uvmexp.inactive--;
248 }
249 }
250
251 /*
252 * uvm_pagezero: zero fill a page
253 *
254 * => if page is part of an object then the object should be locked
255 * to protect pg->flags.
256 */
257
258 PAGE_INLINE void
259 uvm_pagezero(pg)
260 struct vm_page *pg;
261 {
262 pg->flags &= ~PG_CLEAN;
263 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
264 }
265
266 /*
267 * uvm_pagecopy: copy a page
268 *
269 * => if page is part of an object then the object should be locked
270 * to protect pg->flags.
271 */
272
273 PAGE_INLINE void
274 uvm_pagecopy(src, dst)
275 struct vm_page *src, *dst;
276 {
277
278 dst->flags &= ~PG_CLEAN;
279 pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
280 }
281
282 /*
283 * uvm_page_lookup_freelist: look up the free list for the specified page
284 */
285
286 PAGE_INLINE int
287 uvm_page_lookup_freelist(pg)
288 struct vm_page *pg;
289 {
290 int lcv;
291
292 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
293 KASSERT(lcv != -1);
294 return (vm_physmem[lcv].free_list);
295 }
296
297 #endif /* _UVM_UVM_PAGE_I_H_ */
298
299 #endif /* defined(UVM_PAGE_INLINE) || defined(UVM_PAGE) */
Cache object: 47edeab04ddfce83849b199c22ea1f9e
|