FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_page_i.h
1 /* $NetBSD: uvm_page_i.h,v 1.21 2002/12/01 22:58:43 matt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
42 * from: Id: uvm_page_i.h,v 1.1.2.7 1998/01/05 00:26:02 chuck Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_page_i.h
71 */
72
73 /*
74 * inline functions [maybe]
75 */
76
77 #if defined(UVM_PAGE_INLINE) || defined(UVM_PAGE)
78
79 #ifndef _UVM_UVM_PAGE_I_H_
80 #define _UVM_UVM_PAGE_I_H_
81
82 /*
83 * uvm_lock_fpageq: lock the free page queue
84 *
85 * => free page queue can be accessed in interrupt context, so this
86 * blocks all interrupts that can cause memory allocation, and
87 * returns the previous interrupt level.
88 */
89
90 PAGE_INLINE int
91 uvm_lock_fpageq()
92 {
93 int s;
94
95 s = splvm();
96 simple_lock(&uvm.fpageqlock);
97 return (s);
98 }
99
100 /*
101 * uvm_unlock_fpageq: unlock the free page queue
102 *
103 * => caller must supply interrupt level returned by uvm_lock_fpageq()
104 * so that it may be restored.
105 */
106
107 PAGE_INLINE void
108 uvm_unlock_fpageq(s)
109 int s;
110 {
111
112 simple_unlock(&uvm.fpageqlock);
113 splx(s);
114 }
115
116 /*
117 * uvm_pagelookup: look up a page
118 *
119 * => caller should lock object to keep someone from pulling the page
120 * out from under it
121 */
122
123 struct vm_page *
124 uvm_pagelookup(obj, off)
125 struct uvm_object *obj;
126 voff_t off;
127 {
128 struct vm_page *pg;
129 struct pglist *buck;
130
131 buck = &uvm.page_hash[uvm_pagehash(obj,off)];
132 simple_lock(&uvm.hashlock);
133 TAILQ_FOREACH(pg, buck, hashq) {
134 if (pg->uobject == obj && pg->offset == off) {
135 break;
136 }
137 }
138 simple_unlock(&uvm.hashlock);
139 KASSERT(pg == NULL || obj->uo_npages != 0);
140 KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
141 (pg->flags & PG_BUSY) != 0);
142 return(pg);
143 }
144
145 /*
146 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
147 *
148 * => caller must lock page queues
149 */
150
151 PAGE_INLINE void
152 uvm_pagewire(pg)
153 struct vm_page *pg;
154 {
155 if (pg->wire_count == 0) {
156 uvm_pagedequeue(pg);
157 uvmexp.wired++;
158 }
159 pg->wire_count++;
160 }
161
162 /*
163 * uvm_pageunwire: unwire the page.
164 *
165 * => activate if wire count goes to zero.
166 * => caller must lock page queues
167 */
168
169 PAGE_INLINE void
170 uvm_pageunwire(pg)
171 struct vm_page *pg;
172 {
173 pg->wire_count--;
174 if (pg->wire_count == 0) {
175 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
176 uvmexp.active++;
177 pg->pqflags |= PQ_ACTIVE;
178 uvmexp.wired--;
179 }
180 }
181
182 /*
183 * uvm_pagedeactivate: deactivate page
184 *
185 * => caller must lock page queues
186 * => caller must check to make sure page is not wired
187 * => object that page belongs to must be locked (so we can adjust pg->flags)
188 * => caller must clear the reference on the page before calling
189 */
190
191 PAGE_INLINE void
192 uvm_pagedeactivate(pg)
193 struct vm_page *pg;
194 {
195 if (pg->pqflags & PQ_ACTIVE) {
196 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
197 pg->pqflags &= ~PQ_ACTIVE;
198 uvmexp.active--;
199 }
200 if ((pg->pqflags & PQ_INACTIVE) == 0) {
201 KASSERT(pg->wire_count == 0);
202 TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq);
203 pg->pqflags |= PQ_INACTIVE;
204 uvmexp.inactive++;
205 }
206 }
207
208 /*
209 * uvm_pageactivate: activate page
210 *
211 * => caller must lock page queues
212 */
213
214 PAGE_INLINE void
215 uvm_pageactivate(pg)
216 struct vm_page *pg;
217 {
218 uvm_pagedequeue(pg);
219 if (pg->wire_count == 0) {
220 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
221 pg->pqflags |= PQ_ACTIVE;
222 uvmexp.active++;
223 }
224 }
225
226 /*
227 * uvm_pagedequeue: remove a page from any paging queue
228 */
229
230 PAGE_INLINE void
231 uvm_pagedequeue(pg)
232 struct vm_page *pg;
233 {
234 if (pg->pqflags & PQ_ACTIVE) {
235 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
236 pg->pqflags &= ~PQ_ACTIVE;
237 uvmexp.active--;
238 } else if (pg->pqflags & PQ_INACTIVE) {
239 TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
240 pg->pqflags &= ~PQ_INACTIVE;
241 uvmexp.inactive--;
242 }
243 }
244
245 /*
246 * uvm_pagezero: zero fill a page
247 *
248 * => if page is part of an object then the object should be locked
249 * to protect pg->flags.
250 */
251
252 PAGE_INLINE void
253 uvm_pagezero(pg)
254 struct vm_page *pg;
255 {
256 pg->flags &= ~PG_CLEAN;
257 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
258 }
259
260 /*
261 * uvm_pagecopy: copy a page
262 *
263 * => if page is part of an object then the object should be locked
264 * to protect pg->flags.
265 */
266
267 PAGE_INLINE void
268 uvm_pagecopy(src, dst)
269 struct vm_page *src, *dst;
270 {
271
272 dst->flags &= ~PG_CLEAN;
273 pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
274 }
275
276 /*
277 * uvm_page_lookup_freelist: look up the free list for the specified page
278 */
279
280 PAGE_INLINE int
281 uvm_page_lookup_freelist(pg)
282 struct vm_page *pg;
283 {
284 int lcv;
285
286 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
287 KASSERT(lcv != -1);
288 return (vm_physmem[lcv].free_list);
289 }
290
291 #endif /* _UVM_UVM_PAGE_I_H_ */
292
293 #endif /* defined(UVM_PAGE_INLINE) || defined(UVM_PAGE) */
Cache object: 2786d7128461b0e23218827589997ed6
|