1 /* $NetBSD: ufs_inode.c,v 1.42 2003/11/05 10:18:38 hannken Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ufs_inode.c,v 1.42 2003/11/05 10:18:38 hannken Exp $");
41
42 #include "opt_quota.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/kernel.h>
50 #include <sys/namei.h>
51
52 #include <ufs/ufs/quota.h>
53 #include <ufs/ufs/inode.h>
54 #include <ufs/ufs/ufsmount.h>
55 #include <ufs/ufs/ufs_extern.h>
56
57 #include <uvm/uvm.h>
58
59 extern int prtactive;
60
61 /*
62 * Last reference to an inode. If necessary, write or delete it.
63 */
64 int
65 ufs_inactive(v)
66 void *v;
67 {
68 struct vop_inactive_args /* {
69 struct vnode *a_vp;
70 struct proc *a_p;
71 } */ *ap = v;
72 struct vnode *vp = ap->a_vp;
73 struct inode *ip = VTOI(vp);
74 struct mount *mp;
75 struct proc *p = ap->a_p;
76 mode_t mode;
77 int error = 0;
78
79 if (prtactive && vp->v_usecount != 0)
80 vprint("ufs_inactive: pushing active", vp);
81
82 /*
83 * Ignore inodes related to stale file handles.
84 */
85 if (ip->i_mode == 0)
86 goto out;
87 if (ip->i_ffs_effnlink == 0 && DOINGSOFTDEP(vp))
88 softdep_releasefile(ip);
89
90 if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
91 vn_start_write(vp, &mp, V_WAIT | V_LOWER);
92 #ifdef QUOTA
93 if (!getinoquota(ip))
94 (void)chkiq(ip, -1, NOCRED, 0);
95 #endif
96 if (ip->i_size != 0) {
97 error = VOP_TRUNCATE(vp, (off_t)0, 0, NOCRED, p);
98 }
99 /*
100 * Setting the mode to zero needs to wait for the inode
101 * to be written just as does a change to the link count.
102 * So, rather than creating a new entry point to do the
103 * same thing, we just use softdep_change_linkcnt().
104 */
105 DIP_ASSIGN(ip, rdev, 0);
106 mode = ip->i_mode;
107 ip->i_mode = 0;
108 DIP_ASSIGN(ip, mode, 0);
109 ip->i_flag |= IN_CHANGE | IN_UPDATE;
110 if (DOINGSOFTDEP(vp))
111 softdep_change_linkcnt(ip);
112 VOP_VFREE(vp, ip->i_number, mode);
113 vn_finished_write(mp, V_LOWER);
114 }
115
116 if (ip->i_flag &
117 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) {
118 vn_start_write(vp, &mp, V_WAIT | V_LOWER);
119 VOP_UPDATE(vp, NULL, NULL, 0);
120 vn_finished_write(mp, V_LOWER);
121 }
122 out:
123 VOP_UNLOCK(vp, 0);
124 /*
125 * If we are done with the inode, reclaim it
126 * so that it can be reused immediately.
127 */
128
129 if (ip->i_mode == 0)
130 vrecycle(vp, NULL, p);
131 return (error);
132 }
133
134 /*
135 * Reclaim an inode so that it can be used for other purposes.
136 */
137 int
138 ufs_reclaim(vp, p)
139 struct vnode *vp;
140 struct proc *p;
141 {
142 struct inode *ip;
143
144 if (prtactive && vp->v_usecount != 0)
145 vprint("ufs_reclaim: pushing active", vp);
146 /*
147 * Remove the inode from its hash chain.
148 */
149 ip = VTOI(vp);
150 ufs_ihashrem(ip);
151 /*
152 * Purge old data structures associated with the inode.
153 */
154 cache_purge(vp);
155 if (ip->i_devvp) {
156 vrele(ip->i_devvp);
157 ip->i_devvp = 0;
158 }
159 #ifdef QUOTA
160 {
161 int i;
162 for (i = 0; i < MAXQUOTAS; i++) {
163 if (ip->i_dquot[i] != NODQUOT) {
164 dqrele(vp, ip->i_dquot[i]);
165 ip->i_dquot[i] = NODQUOT;
166 }
167 }
168 }
169 #endif
170 return (0);
171 }
172
173 /*
174 * allocate a range of blocks in a file.
175 * after this function returns, any page entirely contained within the range
176 * will map to invalid data and thus must be overwritten before it is made
177 * accessible to others.
178 */
179
180 int
181 ufs_balloc_range(vp, off, len, cred, flags)
182 struct vnode *vp;
183 off_t off, len;
184 struct ucred *cred;
185 int flags;
186 {
187 off_t oldeof, neweof, oldeob, oldeop, neweob, pagestart;
188 struct uvm_object *uobj;
189 struct genfs_node *gp = VTOG(vp);
190 int i, delta, error, npages;
191 int bshift = vp->v_mount->mnt_fs_bshift;
192 int bsize = 1 << bshift;
193 int ppb = MAX(bsize >> PAGE_SHIFT, 1);
194 struct vm_page *pgs[ppb];
195 UVMHIST_FUNC("ufs_balloc_range"); UVMHIST_CALLED(ubchist);
196 UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x",
197 vp, off, len, vp->v_size);
198
199 oldeof = vp->v_size;
200 GOP_SIZE(vp, oldeof, &oldeop, GOP_SIZE_WRITE);
201 GOP_SIZE(vp, oldeof, &oldeob, GOP_SIZE_READ);
202
203 /*
204 * If we need to map pages in the former last block,
205 * do so now.
206 */
207 if (oldeob != oldeop) {
208 uvm_vnp_zerorange(vp, oldeop, oldeob - oldeop);
209 }
210
211 neweof = MAX(vp->v_size, off + len);
212 GOP_SIZE(vp, neweof, &neweob, GOP_SIZE_WRITE);
213
214 error = 0;
215 uobj = &vp->v_uobj;
216 pgs[0] = NULL;
217
218 /*
219 * read or create pages covering the range of the allocation and
220 * keep them locked until the new block is allocated, so there
221 * will be no window where the old contents of the new block are
222 * visible to racing threads.
223 */
224
225 pagestart = trunc_page(off) & ~(bsize - 1);
226 npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT);
227 memset(pgs, 0, npages * sizeof(struct vm_page *));
228 simple_lock(&uobj->vmobjlock);
229 error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
230 VM_PROT_READ, 0, PGO_SYNCIO|PGO_PASTEOF);
231 if (error) {
232 return error;
233 }
234 simple_lock(&uobj->vmobjlock);
235 uvm_lock_pageq();
236 for (i = 0; i < npages; i++) {
237 UVMHIST_LOG(ubchist, "got pgs[%d] %p", i, pgs[i],0,0);
238 KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
239 pgs[i]->flags &= ~PG_CLEAN;
240 uvm_pageactivate(pgs[i]);
241 }
242 uvm_unlock_pageq();
243 simple_unlock(&uobj->vmobjlock);
244
245 /*
246 * adjust off to be block-aligned.
247 */
248
249 delta = off & (bsize - 1);
250 off -= delta;
251 len += delta;
252
253 /*
254 * now allocate the range.
255 */
256
257 lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
258 error = GOP_ALLOC(vp, off, len, flags, cred);
259 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
260
261 /*
262 * clear PG_RDONLY on any pages we are holding
263 * (since they now have backing store) and unbusy them.
264 */
265
266 simple_lock(&uobj->vmobjlock);
267 for (i = 0; i < npages; i++) {
268 pgs[i]->flags &= ~PG_RDONLY;
269 if (error) {
270 pgs[i]->flags |= PG_RELEASED;
271 }
272 }
273 if (error) {
274 uvm_lock_pageq();
275 uvm_page_unbusy(pgs, npages);
276 uvm_unlock_pageq();
277 } else {
278 uvm_page_unbusy(pgs, npages);
279 }
280 simple_unlock(&uobj->vmobjlock);
281 return error;
282 }
Cache object: 0954a567a8e5a6f2e295c5e5e17b81d1
|