1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)ufs_bmap.c 8.7 (Berkeley) 3/21/95
37 * $FreeBSD$
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bio.h>
43 #include <sys/buf.h>
44 #include <sys/proc.h>
45 #include <sys/vnode.h>
46 #include <sys/mount.h>
47 #include <sys/racct.h>
48 #include <sys/resourcevar.h>
49 #include <sys/stat.h>
50
51 #include <fs/ext2fs/fs.h>
52 #include <fs/ext2fs/inode.h>
53 #include <fs/ext2fs/ext2fs.h>
54 #include <fs/ext2fs/ext2_dinode.h>
55 #include <fs/ext2fs/ext2_extern.h>
56 #include <fs/ext2fs/ext2_mount.h>
57
58 /*
59 * Bmap converts the logical block number of a file to its physical block
60 * number on the disk. The conversion is done by using the logical block
61 * number to index into the array of block pointers described by the dinode.
62 */
63 int
64 ext2_bmap(struct vop_bmap_args *ap)
65 {
66 daddr_t blkno;
67 int error;
68
69 /*
70 * Check for underlying vnode requests and ensure that logical
71 * to physical mapping is requested.
72 */
73 if (ap->a_bop != NULL)
74 *ap->a_bop = &VTOI(ap->a_vp)->i_devvp->v_bufobj;
75 if (ap->a_bnp == NULL)
76 return (0);
77
78 if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS)
79 error = ext4_bmapext(ap->a_vp, ap->a_bn, &blkno,
80 ap->a_runp, ap->a_runb);
81 else
82 error = ext2_bmaparray(ap->a_vp, ap->a_bn, &blkno,
83 ap->a_runp, ap->a_runb);
84 *ap->a_bnp = blkno;
85 return (error);
86 }
87
88 /*
89 * Convert the logical block number of a file to its physical block number
90 * on the disk within ext4 extents.
91 */
92 int
93 ext4_bmapext(struct vnode *vp, int32_t bn, int64_t *bnp, int *runp, int *runb)
94 {
95 struct inode *ip;
96 struct m_ext2fs *fs;
97 struct mount *mp;
98 struct ext2mount *ump;
99 struct ext4_extent_header *ehp;
100 struct ext4_extent *ep;
101 struct ext4_extent_path *path = NULL;
102 daddr_t lbn;
103 int error, depth, maxrun = 0, bsize;
104
105 ip = VTOI(vp);
106 fs = ip->i_e2fs;
107 mp = vp->v_mount;
108 ump = VFSTOEXT2(mp);
109 lbn = bn;
110 ehp = (struct ext4_extent_header *)ip->i_data;
111 depth = ehp->eh_depth;
112 bsize = EXT2_BLOCK_SIZE(ump->um_e2fs);
113
114 *bnp = -1;
115 if (runp != NULL) {
116 maxrun = mp->mnt_iosize_max / bsize - 1;
117 *runp = 0;
118 }
119 if (runb != NULL)
120 *runb = 0;
121
122 error = ext4_ext_find_extent(ip, lbn, &path);
123 if (error)
124 return (error);
125
126 ep = path[depth].ep_ext;
127 if(ep) {
128 if (lbn < ep->e_blk) {
129 if (runp != NULL) {
130 *runp = min(maxrun, ep->e_blk - lbn - 1);
131 }
132 } else if (ep->e_blk <= lbn && lbn < ep->e_blk + ep->e_len) {
133 *bnp = fsbtodb(fs, lbn - ep->e_blk +
134 (ep->e_start_lo | (daddr_t)ep->e_start_hi << 32));
135 if (runp != NULL) {
136 *runp = min(maxrun,
137 ep->e_len - (lbn - ep->e_blk) - 1);
138 }
139 if (runb != NULL)
140 *runb = min(maxrun, lbn - ep->e_blk);
141 } else {
142 if (runb != NULL)
143 *runb = min(maxrun, ep->e_blk + lbn - ep->e_len);
144 }
145 }
146
147 ext4_ext_path_free(path);
148
149 return (error);
150 }
151
152 static int
153 readindir(struct vnode *vp, e2fs_lbn_t lbn, e2fs_daddr_t daddr, struct buf **bpp)
154 {
155 struct buf *bp;
156 struct mount *mp;
157 struct ext2mount *ump;
158 int error;
159
160 mp = vp->v_mount;
161 ump = VFSTOEXT2(mp);
162
163 bp = getblk(vp, lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
164 if ((bp->b_flags & B_CACHE) == 0) {
165 KASSERT(daddr != 0,
166 ("readindir: indirect block not in cache"));
167
168 bp->b_blkno = blkptrtodb(ump, daddr);
169 bp->b_iocmd = BIO_READ;
170 bp->b_flags &= ~B_INVAL;
171 bp->b_ioflags &= ~BIO_ERROR;
172 vfs_busy_pages(bp, 0);
173 bp->b_iooffset = dbtob(bp->b_blkno);
174 bstrategy(bp);
175 #ifdef RACCT
176 if (racct_enable) {
177 PROC_LOCK(curproc);
178 racct_add_buf(curproc, bp, 0);
179 PROC_UNLOCK(curproc);
180 }
181 #endif
182 curthread->td_ru.ru_inblock++;
183 error = bufwait(bp);
184 if (error != 0) {
185 brelse(bp);
186 return (error);
187 }
188 }
189 *bpp = bp;
190 return (0);
191 }
192
193 /*
194 * Indirect blocks are now on the vnode for the file. They are given negative
195 * logical block numbers. Indirect blocks are addressed by the negative
196 * address of the first data block to which they point. Double indirect blocks
197 * are addressed by one less than the address of the first indirect block to
198 * which they point. Triple indirect blocks are addressed by one less than
199 * the address of the first double indirect block to which they point.
200 *
201 * ext2_bmaparray does the bmap conversion, and if requested returns the
202 * array of logical blocks which must be traversed to get to a block.
203 * Each entry contains the offset into that block that gets you to the
204 * next block and the disk address of the block (if it is assigned).
205 */
206
207 int
208 ext2_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, int *runp, int *runb)
209 {
210 struct inode *ip;
211 struct buf *bp;
212 struct ext2mount *ump;
213 struct mount *mp;
214 struct indir a[EXT2_NIADDR + 1], *ap;
215 daddr_t daddr;
216 e2fs_lbn_t metalbn;
217 int error, num, maxrun = 0, bsize;
218 int *nump;
219
220 ap = NULL;
221 ip = VTOI(vp);
222 mp = vp->v_mount;
223 ump = VFSTOEXT2(mp);
224
225 bsize = EXT2_BLOCK_SIZE(ump->um_e2fs);
226
227 if (runp) {
228 maxrun = mp->mnt_iosize_max / bsize - 1;
229 *runp = 0;
230 }
231 if (runb)
232 *runb = 0;
233
234
235 ap = a;
236 nump = #
237 error = ext2_getlbns(vp, bn, ap, nump);
238 if (error)
239 return (error);
240
241 num = *nump;
242 if (num == 0) {
243 *bnp = blkptrtodb(ump, ip->i_db[bn]);
244 if (*bnp == 0) {
245 *bnp = -1;
246 } else if (runp) {
247 daddr_t bnb = bn;
248
249 for (++bn; bn < EXT2_NDADDR && *runp < maxrun &&
250 is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
251 ++bn, ++*runp);
252 bn = bnb;
253 if (runb && (bn > 0)) {
254 for (--bn; (bn >= 0) && (*runb < maxrun) &&
255 is_sequential(ump, ip->i_db[bn],
256 ip->i_db[bn + 1]);
257 --bn, ++*runb);
258 }
259 }
260 return (0);
261 }
262
263 /* Get disk address out of indirect block array */
264 daddr = ip->i_ib[ap->in_off];
265
266 for (bp = NULL, ++ap; --num; ++ap) {
267 /*
268 * Exit the loop if there is no disk address assigned yet and
269 * the indirect block isn't in the cache, or if we were
270 * looking for an indirect block and we've found it.
271 */
272
273 metalbn = ap->in_lbn;
274 if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
275 break;
276 /*
277 * If we get here, we've either got the block in the cache
278 * or we have a disk address for it, go fetch it.
279 */
280 if (bp)
281 bqrelse(bp);
282 error = readindir(vp, metalbn, daddr, &bp);
283 if (error != 0)
284 return (error);
285
286 daddr = ((e2fs_daddr_t *)bp->b_data)[ap->in_off];
287 if (num == 1 && daddr && runp) {
288 for (bn = ap->in_off + 1;
289 bn < MNINDIR(ump) && *runp < maxrun &&
290 is_sequential(ump,
291 ((e2fs_daddr_t *)bp->b_data)[bn - 1],
292 ((e2fs_daddr_t *)bp->b_data)[bn]);
293 ++bn, ++*runp);
294 bn = ap->in_off;
295 if (runb && bn) {
296 for (--bn; bn >= 0 && *runb < maxrun &&
297 is_sequential(ump,
298 ((e2fs_daddr_t *)bp->b_data)[bn],
299 ((e2fs_daddr_t *)bp->b_data)[bn + 1]);
300 --bn, ++*runb);
301 }
302 }
303 }
304 if (bp)
305 bqrelse(bp);
306
307 /*
308 * Since this is FFS independent code, we are out of scope for the
309 * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they
310 * will fall in the range 1..um_seqinc, so we use that test and
311 * return a request for a zeroed out buffer if attempts are made
312 * to read a BLK_NOCOPY or BLK_SNAP block.
313 */
314 if ((ip->i_flags & SF_SNAPSHOT) && daddr > 0 && daddr < ump->um_seqinc) {
315 *bnp = -1;
316 return (0);
317 }
318 *bnp = blkptrtodb(ump, daddr);
319 if (*bnp == 0) {
320 *bnp = -1;
321 }
322 return (0);
323 }
324
325 static e2fs_lbn_t
326 lbn_count(struct ext2mount *ump, int level)
327
328 {
329 e2fs_lbn_t blockcnt;
330
331 for (blockcnt = 1; level > 0; level--)
332 blockcnt *= MNINDIR(ump);
333 return (blockcnt);
334 }
335
336 int
337 ext2_bmap_seekdata(struct vnode *vp, off_t *offp)
338 {
339 struct buf *bp;
340 struct indir a[EXT2_NIADDR + 1], *ap;
341 struct inode *ip;
342 struct mount *mp;
343 struct ext2mount *ump;
344 e2fs_daddr_t bn, daddr, nextbn;
345 uint64_t bsize;
346 off_t numblks;
347 int error, num, num1, off;
348
349 bp = NULL;
350 error = 0;
351 ip = VTOI(vp);
352 mp = vp->v_mount;
353 ump = VFSTOEXT2(mp);
354
355 if (vp->v_type != VREG || (ip->i_flags & SF_SNAPSHOT) != 0)
356 return (EINVAL);
357 if (*offp < 0 || *offp >= ip->i_size)
358 return (ENXIO);
359
360 bsize = mp->mnt_stat.f_iosize;
361 for (bn = *offp / bsize, numblks = howmany(ip->i_size, bsize);
362 bn < numblks; bn = nextbn) {
363 if (bn < EXT2_NDADDR) {
364 daddr = ip->i_db[bn];
365 if (daddr != 0)
366 break;
367 nextbn = bn + 1;
368 continue;
369 }
370
371 ap = a;
372 error = ext2_getlbns(vp, bn, ap, &num);
373 if (error != 0)
374 break;
375 MPASS(num >= 2);
376 daddr = ip->i_ib[ap->in_off];
377 ap++, num--;
378 for (nextbn = EXT2_NDADDR, num1 = num - 1; num1 > 0; num1--)
379 nextbn += lbn_count(ump, num1);
380 if (daddr == 0) {
381 nextbn += lbn_count(ump, num);
382 continue;
383 }
384
385 for (; daddr != 0 && num > 0; ap++, num--) {
386 if (bp != NULL)
387 bqrelse(bp);
388 error = readindir(vp, ap->in_lbn, daddr, &bp);
389 if (error != 0)
390 return (error);
391
392 /*
393 * Scan the indirect block until we find a non-zero
394 * pointer.
395 */
396 off = ap->in_off;
397 do {
398 daddr = ((e2fs_daddr_t *)bp->b_data)[off];
399 } while (daddr == 0 && ++off < MNINDIR(ump));
400 nextbn += off * lbn_count(ump, num - 1);
401
402 /*
403 * We need to recompute the LBNs of indirect
404 * blocks, so restart with the updated block offset.
405 */
406 if (off != ap->in_off)
407 break;
408 }
409 if (num == 0) {
410 /*
411 * We found a data block.
412 */
413 bn = nextbn;
414 break;
415 }
416 }
417 if (bp != NULL)
418 bqrelse(bp);
419 if (bn >= numblks)
420 error = ENXIO;
421 if (error == 0 && *offp < bn * bsize)
422 *offp = bn * bsize;
423 return (error);
424 }
425
426 /*
427 * Create an array of logical block number/offset pairs which represent the
428 * path of indirect blocks required to access a data block. The first "pair"
429 * contains the logical block number of the appropriate single, double or
430 * triple indirect block and the offset into the inode indirect block array.
431 * Note, the logical block number of the inode single/double/triple indirect
432 * block appears twice in the array, once with the offset into the i_ib and
433 * once with the offset into the page itself.
434 */
435 int
436 ext2_getlbns(struct vnode *vp, daddr_t bn, struct indir *ap, int *nump)
437 {
438 long blockcnt;
439 e2fs_lbn_t metalbn, realbn;
440 struct ext2mount *ump;
441 int i, numlevels, off;
442 int64_t qblockcnt;
443
444 ump = VFSTOEXT2(vp->v_mount);
445 if (nump)
446 *nump = 0;
447 numlevels = 0;
448 realbn = bn;
449 if ((long)bn < 0)
450 bn = -(long)bn;
451
452 /* The first EXT2_NDADDR blocks are direct blocks. */
453 if (bn < EXT2_NDADDR)
454 return (0);
455
456 /*
457 * Determine the number of levels of indirection. After this loop
458 * is done, blockcnt indicates the number of data blocks possible
459 * at the previous level of indirection, and EXT2_NIADDR - i is the
460 * number of levels of indirection needed to locate the requested block.
461 */
462 for (blockcnt = 1, i = EXT2_NIADDR, bn -= EXT2_NDADDR; ;
463 i--, bn -= blockcnt) {
464 if (i == 0)
465 return (EFBIG);
466 /*
467 * Use int64_t's here to avoid overflow for triple indirect
468 * blocks when longs have 32 bits and the block size is more
469 * than 4K.
470 */
471 qblockcnt = (int64_t)blockcnt * MNINDIR(ump);
472 if (bn < qblockcnt)
473 break;
474 blockcnt = qblockcnt;
475 }
476
477 /* Calculate the address of the first meta-block. */
478 if (realbn >= 0)
479 metalbn = -(realbn - bn + EXT2_NIADDR - i);
480 else
481 metalbn = -(-realbn - bn + EXT2_NIADDR - i);
482
483 /*
484 * At each iteration, off is the offset into the bap array which is
485 * an array of disk addresses at the current level of indirection.
486 * The logical block number and the offset in that block are stored
487 * into the argument array.
488 */
489 ap->in_lbn = metalbn;
490 ap->in_off = off = EXT2_NIADDR - i;
491 ap++;
492 for (++numlevels; i <= EXT2_NIADDR; i++) {
493 /* If searching for a meta-data block, quit when found. */
494 if (metalbn == realbn)
495 break;
496
497 off = (bn / blockcnt) % MNINDIR(ump);
498
499 ++numlevels;
500 ap->in_lbn = metalbn;
501 ap->in_off = off;
502 ++ap;
503
504 metalbn -= -1 + off * blockcnt;
505 blockcnt /= MNINDIR(ump);
506 }
507 if (nump)
508 *nump = numlevels;
509 return (0);
510 }
Cache object: ec819424f08cd3554e5bc4f0ae3cb95d
|