xref: /freebsd/sys/fs/ext2fs/ext2_extents.c (revision be60d8f2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Zheng Liu <lz@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/vnode.h>
37 #include <sys/bio.h>
38 #include <sys/buf.h>
39 #include <sys/endian.h>
40 #include <sys/conf.h>
41 #include <sys/sdt.h>
42 #include <sys/stat.h>
43 
44 #include <fs/ext2fs/ext2_mount.h>
45 #include <fs/ext2fs/fs.h>
46 #include <fs/ext2fs/inode.h>
47 #include <fs/ext2fs/ext2fs.h>
48 #include <fs/ext2fs/ext2_extents.h>
49 #include <fs/ext2fs/ext2_extern.h>
50 
51 SDT_PROVIDER_DECLARE(ext2fs);
52 /*
53  * ext2fs trace probe:
54  * arg0: verbosity. Higher numbers give more verbose messages
55  * arg1: Textual message
56  */
57 SDT_PROBE_DEFINE2(ext2fs, , trace, extents, "int", "char*");
58 
59 static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents");
60 
61 #ifdef EXT2FS_PRINT_EXTENTS
62 static const bool print_extents_walk = true;
63 
64 static int ext4_ext_check_header(struct inode *, struct ext4_extent_header *);
65 static int ext4_ext_walk_header(struct inode *, struct ext4_extent_header *);
66 static inline e4fs_daddr_t ext4_ext_index_pblock(struct ext4_extent_index *);
67 static inline e4fs_daddr_t ext4_ext_extent_pblock(struct ext4_extent *);
68 
69 static int
70 ext4_ext_blk_check(struct inode *ip, e4fs_daddr_t blk)
71 {
72 	struct m_ext2fs *fs;
73 
74 	fs = ip->i_e2fs;
75 
76 	if (blk < fs->e2fs->e2fs_first_dblock || blk >= fs->e2fs_bcount)
77 		return (EIO);
78 
79 	return (0);
80 }
81 
82 static int
83 ext4_ext_walk_index(struct inode *ip, struct ext4_extent_index *ex, bool do_walk)
84 {
85 	struct m_ext2fs *fs;
86 	struct buf *bp;
87 	e4fs_daddr_t blk;
88 	int error;
89 
90 	fs = ip->i_e2fs;
91 
92 	if (print_extents_walk)
93 		printf("    index %p => (blk %u pblk %ju)\n", ex,
94 		    le32toh(ex->ei_blk), (uint64_t)le16toh(ex->ei_leaf_hi) << 32 |
95 		    le32toh(ex->ei_leaf_lo));
96 
97 	if(!do_walk)
98 		return (0);
99 
100 	blk = ext4_ext_index_pblock(ex);
101 	error = ext4_ext_blk_check(ip, blk);
102 	if (error)
103 		return (error);
104 
105 	if ((error = bread(ip->i_devvp,
106 	    fsbtodb(fs, blk), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
107 		brelse(bp);
108 		return (error);
109 	}
110 
111 	error = ext4_ext_walk_header(ip, (struct ext4_extent_header *)bp->b_data);
112 
113 	brelse(bp);
114 
115 	return (error);
116 }
117 
118 static int
119 ext4_ext_walk_extent(struct inode *ip, struct ext4_extent *ep)
120 {
121 	e4fs_daddr_t blk;
122 	int error;
123 
124 	blk = ext4_ext_extent_pblock(ep);
125 	error = ext4_ext_blk_check(ip, blk);
126 	if (error)
127 		return (error);
128 
129 	if (print_extents_walk)
130 		printf("    ext %p => (blk %u len %u start %ju)\n",
131 		    ep, le32toh(ep->e_blk), le16toh(ep->e_len),
132 		    (uint64_t)blk);
133 
134 	return (0);
135 }
136 
137 static int
138 ext4_ext_walk_header(struct inode *ip, struct ext4_extent_header *eh)
139 {
140 	int i, error = 0;
141 
142 	error = ext4_ext_check_header(ip, eh);
143 	if (error)
144 		return (error);
145 
146 	if (print_extents_walk)
147 		printf("header %p => (entries %d max %d depth %d gen %d)\n",
148 		    eh, le16toh(eh->eh_ecount),
149 		    le16toh(eh->eh_max), le16toh(eh->eh_depth), le32toh(eh->eh_gen));
150 
151 	for (i = 0; i < le16toh(eh->eh_ecount) && error == 0; i++)
152 		if (eh->eh_depth != 0)
153 			error = ext4_ext_walk_index(ip,
154 			    (struct ext4_extent_index *)(eh + 1 + i), true);
155 		else
156 			error = ext4_ext_walk_extent(ip, (struct ext4_extent *)(eh + 1 + i));
157 
158 	return (error);
159 }
160 
161 static int
162 ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path)
163 {
164 	int k, l, error = 0;
165 
166 	l = path->ep_depth;
167 
168 	if (print_extents_walk)
169 		printf("ip=%ju, Path:\n", ip->i_number);
170 
171 	for (k = 0; k <= l && error == 0; k++, path++) {
172 		if (path->ep_index) {
173 			error = ext4_ext_walk_index(ip, path->ep_index, false);
174 		} else if (path->ep_ext) {
175 			error = ext4_ext_walk_extent(ip, path->ep_ext);
176 		}
177 	}
178 
179 	return (error);
180 }
181 
182 int
183 ext4_ext_walk(struct inode *ip)
184 {
185 	struct ext4_extent_header *ehp;
186 
187 	ehp = (struct ext4_extent_header *)ip->i_db;
188 
189 	if (print_extents_walk)
190 		printf("Extent status:ip=%ju\n", ip->i_number);
191 
192 	if (!(ip->i_flag & IN_E4EXTENTS))
193 		return (0);
194 
195 	return (ext4_ext_walk_header(ip, ehp));
196 }
197 #endif
198 
199 static inline struct ext4_extent_header *
200 ext4_ext_inode_header(struct inode *ip)
201 {
202 
203 	return ((struct ext4_extent_header *)ip->i_db);
204 }
205 
206 static inline struct ext4_extent_header *
207 ext4_ext_block_header(char *bdata)
208 {
209 
210 	return ((struct ext4_extent_header *)bdata);
211 }
212 
213 static inline unsigned short
214 ext4_ext_inode_depth(struct inode *ip)
215 {
216 	struct ext4_extent_header *ehp;
217 
218 	ehp = (struct ext4_extent_header *)ip->i_data;
219 	return (le16toh(ehp->eh_depth));
220 }
221 
222 static inline e4fs_daddr_t
223 ext4_ext_index_pblock(struct ext4_extent_index *index)
224 {
225 	e4fs_daddr_t blk;
226 
227 	blk = le32toh(index->ei_leaf_lo);
228 	blk |= (e4fs_daddr_t)le16toh(index->ei_leaf_hi) << 32;
229 
230 	return (blk);
231 }
232 
233 static inline void
234 ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb)
235 {
236 
237 	index->ei_leaf_lo = htole32(pb & 0xffffffff);
238 	index->ei_leaf_hi = htole16((pb >> 32) & 0xffff);
239 }
240 
241 static inline e4fs_daddr_t
242 ext4_ext_extent_pblock(struct ext4_extent *extent)
243 {
244 	e4fs_daddr_t blk;
245 
246 	blk = le32toh(extent->e_start_lo);
247 	blk |= (e4fs_daddr_t)le16toh(extent->e_start_hi) << 32;
248 
249 	return (blk);
250 }
251 
252 static inline void
253 ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb)
254 {
255 
256 	ex->e_start_lo = htole32(pb & 0xffffffff);
257 	ex->e_start_hi = htole16((pb >> 32) & 0xffff);
258 }
259 
260 int
261 ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep)
262 {
263 	struct ext4_extent_cache *ecp;
264 	int ret = EXT4_EXT_CACHE_NO;
265 
266 	ecp = &ip->i_ext_cache;
267 	if (ecp->ec_type == EXT4_EXT_CACHE_NO)
268 		return (ret);
269 
270 	if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) {
271 		ep->e_blk = htole32(ecp->ec_blk);
272 		ep->e_start_lo = htole32(ecp->ec_start & 0xffffffff);
273 		ep->e_start_hi = htole16(ecp->ec_start >> 32 & 0xffff);
274 		ep->e_len = htole16(ecp->ec_len);
275 		ret = ecp->ec_type;
276 	}
277 	return (ret);
278 }
279 
280 static int
281 ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh)
282 {
283 	struct m_ext2fs *fs;
284 	char *error_msg;
285 
286 	fs = ip->i_e2fs;
287 
288 	if (le16toh(eh->eh_magic) != EXT4_EXT_MAGIC) {
289 		error_msg = "header: invalid magic";
290 		goto corrupted;
291 	}
292 	if (eh->eh_max == 0) {
293 		error_msg = "header: invalid eh_max";
294 		goto corrupted;
295 	}
296 	if (le16toh(eh->eh_ecount) > le16toh(eh->eh_max)) {
297 		error_msg = "header: invalid eh_entries";
298 		goto corrupted;
299 	}
300 	if (eh->eh_depth > 5) {
301 		error_msg = "header: invalid eh_depth";
302 		goto corrupted;
303 	}
304 
305 	return (0);
306 
307 corrupted:
308 	SDT_PROBE2(ext2fs, , trace, extents, 1, error_msg);
309 	return (EIO);
310 }
311 
312 static void
313 ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk)
314 {
315 	struct ext4_extent_header *eh;
316 	struct ext4_extent_index *r, *l, *m;
317 
318 	eh = path->ep_header;
319 
320 	KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max) &&
321 	    le16toh(eh->eh_ecount) > 0,
322 	    ("ext4_ext_binsearch_index: bad args"));
323 
324 	l = EXT_FIRST_INDEX(eh) + 1;
325 	r = EXT_FIRST_INDEX(eh) + le16toh(eh->eh_ecount) - 1;
326 	while (l <= r) {
327 		m = l + (r - l) / 2;
328 		if (blk < le32toh(m->ei_blk))
329 			r = m - 1;
330 		else
331 			l = m + 1;
332 	}
333 
334 	path->ep_index = l - 1;
335 }
336 
337 static void
338 ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk)
339 {
340 	struct ext4_extent_header *eh;
341 	struct ext4_extent *r, *l, *m;
342 
343 	eh = path->ep_header;
344 
345 	KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max),
346 	    ("ext4_ext_binsearch_ext: bad args"));
347 
348 	if (eh->eh_ecount == 0)
349 		return;
350 
351 	l = EXT_FIRST_EXTENT(eh) + 1;
352 	r = EXT_FIRST_EXTENT(eh) + le16toh(eh->eh_ecount) - 1;
353 
354 	while (l <= r) {
355 		m = l + (r - l) / 2;
356 		if (blk < le32toh(m->e_blk))
357 			r = m - 1;
358 		else
359 			l = m + 1;
360 	}
361 
362 	path->ep_ext = l - 1;
363 }
364 
365 static int
366 ext4_ext_fill_path_bdata(struct ext4_extent_path *path,
367     struct buf *bp, uint64_t blk)
368 {
369 
370 	KASSERT(path->ep_data == NULL,
371 	    ("ext4_ext_fill_path_bdata: bad ep_data"));
372 
373 	path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK);
374 	memcpy(path->ep_data, bp->b_data, bp->b_bufsize);
375 	path->ep_blk = blk;
376 
377 	return (0);
378 }
379 
380 static void
381 ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp)
382 {
383 
384 	KASSERT(path->ep_data != NULL,
385 	    ("ext4_ext_fill_path_buf: bad ep_data"));
386 
387 	memcpy(bp->b_data, path->ep_data, bp->b_bufsize);
388 }
389 
390 static void
391 ext4_ext_drop_refs(struct ext4_extent_path *path)
392 {
393 	int depth, i;
394 
395 	if (!path)
396 		return;
397 
398 	depth = path->ep_depth;
399 	for (i = 0; i <= depth; i++, path++)
400 		if (path->ep_data) {
401 			free(path->ep_data, M_EXT2EXTENTS);
402 			path->ep_data = NULL;
403 		}
404 }
405 
406 void
407 ext4_ext_path_free(struct ext4_extent_path *path)
408 {
409 
410 	if (!path)
411 		return;
412 
413 	ext4_ext_drop_refs(path);
414 	free(path, M_EXT2EXTENTS);
415 }
416 
417 int
418 ext4_ext_find_extent(struct inode *ip, daddr_t block,
419     struct ext4_extent_path **ppath)
420 {
421 	struct m_ext2fs *fs;
422 	struct ext4_extent_header *eh;
423 	struct ext4_extent_path *path;
424 	struct buf *bp;
425 	uint64_t blk;
426 	int error, depth, i, ppos, alloc;
427 
428 	fs = ip->i_e2fs;
429 	eh = ext4_ext_inode_header(ip);
430 	depth = ext4_ext_inode_depth(ip);
431 	ppos = 0;
432 	alloc = 0;
433 
434 	error = ext4_ext_check_header(ip, eh);
435 	if (error)
436 		return (error);
437 
438 	if (ppath == NULL)
439 		return (EINVAL);
440 
441 	path = *ppath;
442 	if (path == NULL) {
443 		path = malloc(EXT4_EXT_DEPTH_MAX *
444 		    sizeof(struct ext4_extent_path),
445 		    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
446 		*ppath = path;
447 		alloc = 1;
448 	}
449 
450 	path[0].ep_header = eh;
451 	path[0].ep_data = NULL;
452 
453 	/* Walk through the tree. */
454 	i = depth;
455 	while (i) {
456 		ext4_ext_binsearch_index(&path[ppos], block);
457 		blk = ext4_ext_index_pblock(path[ppos].ep_index);
458 		path[ppos].ep_depth = i;
459 		path[ppos].ep_ext = NULL;
460 
461 		error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk),
462 		    ip->i_e2fs->e2fs_bsize, NOCRED, &bp);
463 		if (error) {
464 			goto error;
465 		}
466 
467 		ppos++;
468 		if (ppos > depth) {
469 			SDT_PROBE2(ext2fs, , trace, extents, 1,
470 			    "ppos > depth => extent corrupted");
471 			error = EIO;
472 			brelse(bp);
473 			goto error;
474 		}
475 
476 		ext4_ext_fill_path_bdata(&path[ppos], bp, blk);
477 		bqrelse(bp);
478 
479 		eh = ext4_ext_block_header(path[ppos].ep_data);
480 		if (ext4_ext_check_header(ip, eh) ||
481 		    ext2_extent_blk_csum_verify(ip, path[ppos].ep_data)) {
482 			error = EIO;
483 			goto error;
484 		}
485 
486 		path[ppos].ep_header = eh;
487 
488 		i--;
489 	}
490 
491 	error = ext4_ext_check_header(ip, eh);
492 	if (error)
493 		goto error;
494 
495 	/* Find extent. */
496 	path[ppos].ep_depth = i;
497 	path[ppos].ep_header = eh;
498 	path[ppos].ep_ext = NULL;
499 	path[ppos].ep_index = NULL;
500 	ext4_ext_binsearch_ext(&path[ppos], block);
501 	return (0);
502 
503 error:
504 	ext4_ext_drop_refs(path);
505 	if (alloc)
506 		free(path, M_EXT2EXTENTS);
507 
508 	*ppath = NULL;
509 
510 	return (error);
511 }
512 
513 static inline int
514 ext4_ext_space_root(struct inode *ip)
515 {
516 	int size;
517 
518 	size = sizeof(ip->i_data);
519 	size -= sizeof(struct ext4_extent_header);
520 	size /= sizeof(struct ext4_extent);
521 
522 	return (size);
523 }
524 
525 static inline int
526 ext4_ext_space_block(struct inode *ip)
527 {
528 	struct m_ext2fs *fs;
529 	int size;
530 
531 	fs = ip->i_e2fs;
532 
533 	size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
534 	    sizeof(struct ext4_extent);
535 
536 	return (size);
537 }
538 
539 static inline int
540 ext4_ext_space_block_index(struct inode *ip)
541 {
542 	struct m_ext2fs *fs;
543 	int size;
544 
545 	fs = ip->i_e2fs;
546 
547 	size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
548 	    sizeof(struct ext4_extent_index);
549 
550 	return (size);
551 }
552 
553 void
554 ext4_ext_tree_init(struct inode *ip)
555 {
556 	struct ext4_extent_header *ehp;
557 
558 	ip->i_flag |= IN_E4EXTENTS;
559 
560 	memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR);
561 	ehp = (struct ext4_extent_header *)ip->i_data;
562 	ehp->eh_magic = htole16(EXT4_EXT_MAGIC);
563 	ehp->eh_max = htole16(ext4_ext_space_root(ip));
564 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
565 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
566 	ext2_update(ip->i_vnode, 1);
567 }
568 
569 static inline void
570 ext4_ext_put_in_cache(struct inode *ip, uint32_t blk,
571 			uint32_t len, uint32_t start, int type)
572 {
573 
574 	KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input"));
575 
576 	ip->i_ext_cache.ec_type = type;
577 	ip->i_ext_cache.ec_blk = blk;
578 	ip->i_ext_cache.ec_len = len;
579 	ip->i_ext_cache.ec_start = start;
580 }
581 
582 static e4fs_daddr_t
583 ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path,
584     e4fs_daddr_t block)
585 {
586 	struct m_ext2fs *fs;
587 	struct ext4_extent *ex;
588 	e4fs_daddr_t bg_start;
589 	int depth;
590 
591 	fs = ip->i_e2fs;
592 
593 	if (path) {
594 		depth = path->ep_depth;
595 		ex = path[depth].ep_ext;
596 		if (ex) {
597 			e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex);
598 			e2fs_daddr_t blk = le32toh(ex->e_blk);
599 
600 			if (block > blk)
601 				return (pblk + (block - blk));
602 			else
603 				return (pblk - (blk - block));
604 		}
605 
606 		/* Try to get block from index itself. */
607 		if (path[depth].ep_data)
608 			return (path[depth].ep_blk);
609 	}
610 
611 	/* Use inode's group. */
612 	bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
613 	    le32toh(fs->e2fs->e2fs_first_dblock);
614 
615 	return (bg_start + block);
616 }
617 
618 static int inline
619 ext4_can_extents_be_merged(struct ext4_extent *ex1,
620     struct ext4_extent *ex2)
621 {
622 
623 	if (le32toh(ex1->e_blk) + le16toh(ex1->e_len) != le32toh(ex2->e_blk))
624 		return (0);
625 
626 	if (le16toh(ex1->e_len) + le16toh(ex2->e_len) > EXT4_MAX_LEN)
627 		return (0);
628 
629 	if (ext4_ext_extent_pblock(ex1) + le16toh(ex1->e_len) ==
630 	    ext4_ext_extent_pblock(ex2))
631 		return (1);
632 
633 	return (0);
634 }
635 
636 static unsigned
637 ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path)
638 {
639 	int depth = path->ep_depth;
640 
641 	/* Empty tree */
642 	if (depth == 0)
643 		return (EXT4_MAX_BLOCKS);
644 
645 	/* Go to indexes. */
646 	depth--;
647 
648 	while (depth >= 0) {
649 		if (path[depth].ep_index !=
650 		    EXT_LAST_INDEX(path[depth].ep_header))
651 			return (le32toh(path[depth].ep_index[1].ei_blk));
652 
653 		depth--;
654 	}
655 
656 	return (EXT4_MAX_BLOCKS);
657 }
658 
659 static int
660 ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path)
661 {
662 	struct m_ext2fs *fs;
663 	struct buf *bp;
664 	uint64_t blk;
665 	int error;
666 
667 	fs = ip->i_e2fs;
668 
669 	if (!path)
670 		return (EINVAL);
671 
672 	if (path->ep_data) {
673 		blk = path->ep_blk;
674 		bp = getblk(ip->i_devvp, fsbtodb(fs, blk),
675 		    fs->e2fs_bsize, 0, 0, 0);
676 		if (!bp)
677 			return (EIO);
678 		ext4_ext_fill_path_buf(path, bp);
679 		ext2_extent_blk_csum_set(ip, bp->b_data);
680 		error = bwrite(bp);
681 	} else {
682 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
683 		error = ext2_update(ip->i_vnode, 1);
684 	}
685 
686 	return (error);
687 }
688 
689 static int
690 ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path,
691     uint32_t lblk, e4fs_daddr_t blk)
692 {
693 	struct m_ext2fs *fs;
694 	struct ext4_extent_index *idx;
695 	int len;
696 
697 	fs = ip->i_e2fs;
698 
699 	if (lblk == le32toh(path->ep_index->ei_blk)) {
700 		SDT_PROBE2(ext2fs, , trace, extents, 1,
701 		    "lblk == index blk => extent corrupted");
702 		return (EIO);
703 	}
704 
705 	if (le16toh(path->ep_header->eh_ecount) >=
706 	    le16toh(path->ep_header->eh_max)) {
707 		SDT_PROBE2(ext2fs, , trace, extents, 1,
708 		    "ecout > maxcount => extent corrupted");
709 		return (EIO);
710 	}
711 
712 	if (lblk > le32toh(path->ep_index->ei_blk)) {
713 		/* Insert after. */
714 		idx = path->ep_index + 1;
715 	} else {
716 		/* Insert before. */
717 		idx = path->ep_index;
718 	}
719 
720 	len = EXT_LAST_INDEX(path->ep_header) - idx + 1;
721 	if (len > 0)
722 		memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index));
723 
724 	if (idx > EXT_MAX_INDEX(path->ep_header)) {
725 		SDT_PROBE2(ext2fs, , trace, extents, 1,
726 		    "index is out of range => extent corrupted");
727 		return (EIO);
728 	}
729 
730 	idx->ei_blk = htole32(lblk);
731 	ext4_index_store_pblock(idx, blk);
732 	path->ep_header->eh_ecount =
733 	    htole16(le16toh(path->ep_header->eh_ecount) + 1);
734 
735 	return (ext4_ext_dirty(ip, path));
736 }
737 
738 static e4fs_daddr_t
739 ext4_ext_alloc_meta(struct inode *ip)
740 {
741 	e4fs_daddr_t blk = ext2_alloc_meta(ip);
742 	if (blk) {
743 		ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize);
744 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
745 		ext2_update(ip->i_vnode, 1);
746 	}
747 
748 	return (blk);
749 }
750 
751 static void
752 ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags)
753 {
754 	struct m_ext2fs *fs;
755 	int i, blocksreleased;
756 
757 	fs = ip->i_e2fs;
758 	blocksreleased = count;
759 
760 	for(i = 0; i < count; i++)
761 		ext2_blkfree(ip, blk + i, fs->e2fs_bsize);
762 
763 	if (ip->i_blocks >= blocksreleased)
764 		ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased);
765 	else
766 		ip->i_blocks = 0;
767 
768 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
769 	ext2_update(ip->i_vnode, 1);
770 }
771 
772 static int
773 ext4_ext_split(struct inode *ip, struct ext4_extent_path *path,
774     struct ext4_extent *newext, int at)
775 {
776 	struct m_ext2fs *fs;
777 	struct  buf *bp;
778 	int depth = ext4_ext_inode_depth(ip);
779 	struct ext4_extent_header *neh;
780 	struct ext4_extent_index *fidx;
781 	struct ext4_extent *ex;
782 	int i = at, k, m, a;
783 	e4fs_daddr_t newblk, oldblk;
784 	uint32_t border;
785 	e4fs_daddr_t *ablks = NULL;
786 	int error = 0;
787 
788 	fs = ip->i_e2fs;
789 	bp = NULL;
790 
791 	/*
792 	 * We will split at current extent for now.
793 	 */
794 	if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) {
795 		SDT_PROBE2(ext2fs, , trace, extents, 1,
796 		    "extent is out of range => extent corrupted");
797 		return (EIO);
798 	}
799 
800 	if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header))
801 		border = le32toh(path[depth].ep_ext[1].e_blk);
802 	else
803 		border = le32toh(newext->e_blk);
804 
805 	/* Allocate new blocks. */
806 	ablks = malloc(sizeof(e4fs_daddr_t) * depth,
807 	    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
808 	for (a = 0; a < depth - at; a++) {
809 		newblk = ext4_ext_alloc_meta(ip);
810 		if (newblk == 0)
811 			goto cleanup;
812 		ablks[a] = newblk;
813 	}
814 
815 	newblk = ablks[--a];
816 	bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
817 	if (!bp) {
818 		error = EIO;
819 		goto cleanup;
820 	}
821 
822 	neh = ext4_ext_block_header(bp->b_data);
823 	neh->eh_ecount = 0;
824 	neh->eh_max = le16toh(ext4_ext_space_block(ip));
825 	neh->eh_magic = le16toh(EXT4_EXT_MAGIC);
826 	neh->eh_depth = 0;
827 	ex = EXT_FIRST_EXTENT(neh);
828 
829 	if (le16toh(path[depth].ep_header->eh_ecount) !=
830 	    le16toh(path[depth].ep_header->eh_max)) {
831 		SDT_PROBE2(ext2fs, , trace, extents, 1,
832 		    "extents count out of range => extent corrupted");
833 		error = EIO;
834 		goto cleanup;
835 	}
836 
837 	/* Start copy from next extent. */
838 	m = 0;
839 	path[depth].ep_ext++;
840 	while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) {
841 		path[depth].ep_ext++;
842 		m++;
843 	}
844 	if (m) {
845 		memmove(ex, path[depth].ep_ext - m,
846 		    sizeof(struct ext4_extent) * m);
847 		neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m);
848 	}
849 
850 	ext2_extent_blk_csum_set(ip, bp->b_data);
851 	bwrite(bp);
852 	bp = NULL;
853 
854 	/* Fix old leaf. */
855 	if (m) {
856 		path[depth].ep_header->eh_ecount =
857 		    htole16(le16toh(path[depth].ep_header->eh_ecount) - m);
858 		ext4_ext_dirty(ip, path + depth);
859 	}
860 
861 	/* Create intermediate indexes. */
862 	k = depth - at - 1;
863 	KASSERT(k >= 0, ("ext4_ext_split: negative k"));
864 
865 	/* Insert new index into current index block. */
866 	i = depth - 1;
867 	while (k--) {
868 		oldblk = newblk;
869 		newblk = ablks[--a];
870 		error = bread(ip->i_devvp, fsbtodb(fs, newblk),
871 		    (int)fs->e2fs_bsize, NOCRED, &bp);
872 		if (error) {
873 			goto cleanup;
874 		}
875 
876 		neh = (struct ext4_extent_header *)bp->b_data;
877 		neh->eh_ecount = htole16(1);
878 		neh->eh_magic = htole16(EXT4_EXT_MAGIC);
879 		neh->eh_max = htole16(ext4_ext_space_block_index(ip));
880 		neh->eh_depth = htole16(depth - i);
881 		fidx = EXT_FIRST_INDEX(neh);
882 		fidx->ei_blk = htole32(border);
883 		ext4_index_store_pblock(fidx, oldblk);
884 
885 		m = 0;
886 		path[i].ep_index++;
887 		while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) {
888 			path[i].ep_index++;
889 			m++;
890 		}
891 		if (m) {
892 			memmove(++fidx, path[i].ep_index - m,
893 			    sizeof(struct ext4_extent_index) * m);
894 			neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m);
895 		}
896 
897 		ext2_extent_blk_csum_set(ip, bp->b_data);
898 		bwrite(bp);
899 		bp = NULL;
900 
901 		/* Fix old index. */
902 		if (m) {
903 			path[i].ep_header->eh_ecount =
904 			    htole16(le16toh(path[i].ep_header->eh_ecount) - m);
905 			ext4_ext_dirty(ip, path + i);
906 		}
907 
908 		i--;
909 	}
910 
911 	error = ext4_ext_insert_index(ip, path + at, border, newblk);
912 
913 cleanup:
914 	if (bp)
915 		brelse(bp);
916 
917 	if (error) {
918 		for (i = 0; i < depth; i++) {
919 			if (!ablks[i])
920 				continue;
921 			ext4_ext_blkfree(ip, ablks[i], 1, 0);
922 		}
923 	}
924 
925 	free(ablks, M_EXT2EXTENTS);
926 
927 	return (error);
928 }
929 
930 static int
931 ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path,
932     struct ext4_extent *newext)
933 {
934 	struct m_ext2fs *fs;
935 	struct ext4_extent_path *curpath;
936 	struct ext4_extent_header *neh;
937 	struct buf *bp;
938 	e4fs_daddr_t newblk;
939 	int error = 0;
940 
941 	fs = ip->i_e2fs;
942 	curpath = path;
943 
944 	newblk = ext4_ext_alloc_meta(ip);
945 	if (newblk == 0)
946 		return (error);
947 
948 	bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
949 	if (!bp)
950 		return (EIO);
951 
952 	/* Move top-level index/leaf into new block. */
953 	memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data));
954 
955 	/* Set size of new block */
956 	neh = ext4_ext_block_header(bp->b_data);
957 	neh->eh_magic = htole16(EXT4_EXT_MAGIC);
958 
959 	if (ext4_ext_inode_depth(ip))
960 		neh->eh_max = htole16(ext4_ext_space_block_index(ip));
961 	else
962 		neh->eh_max = htole16(ext4_ext_space_block(ip));
963 
964 	ext2_extent_blk_csum_set(ip, bp->b_data);
965 	error = bwrite(bp);
966 	if (error)
967 		goto out;
968 
969 	bp = NULL;
970 
971 	curpath->ep_header->eh_magic = htole16(EXT4_EXT_MAGIC);
972 	curpath->ep_header->eh_max = htole16(ext4_ext_space_root(ip));
973 	curpath->ep_header->eh_ecount = htole16(1);
974 	curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header);
975 	curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk;
976 	ext4_index_store_pblock(curpath->ep_index, newblk);
977 
978 	neh = ext4_ext_inode_header(ip);
979 	neh->eh_depth = htole16(path->ep_depth + 1);
980 	ext4_ext_dirty(ip, curpath);
981 out:
982 	brelse(bp);
983 
984 	return (error);
985 }
986 
987 static int
988 ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path,
989     struct ext4_extent *newext)
990 {
991 	struct ext4_extent_path *curpath;
992 	int depth, i, error;
993 
994 repeat:
995 	i = depth = ext4_ext_inode_depth(ip);
996 
997 	/* Look for free index entry int the tree */
998 	curpath = path + depth;
999 	while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) {
1000 		i--;
1001 		curpath--;
1002 	}
1003 
1004 	/*
1005 	 * We use already allocated block for index block,
1006 	 * so subsequent data blocks should be contiguous.
1007 	 */
1008 	if (EXT_HAS_FREE_INDEX(curpath)) {
1009 		error = ext4_ext_split(ip, path, newext, i);
1010 		if (error)
1011 			goto out;
1012 
1013 		/* Refill path. */
1014 		ext4_ext_drop_refs(path);
1015 		error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path);
1016 		if (error)
1017 			goto out;
1018 	} else {
1019 		/* Tree is full, do grow in depth. */
1020 		error = ext4_ext_grow_indepth(ip, path, newext);
1021 		if (error)
1022 			goto out;
1023 
1024 		/* Refill path. */
1025 		ext4_ext_drop_refs(path);
1026 		error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path);
1027 		if (error)
1028 			goto out;
1029 
1030 		/* Check and split tree if required. */
1031 		depth = ext4_ext_inode_depth(ip);
1032 		if (le16toh(path[depth].ep_header->eh_ecount) ==
1033 		    le16toh(path[depth].ep_header->eh_max))
1034 			goto repeat;
1035 	}
1036 
1037 out:
1038 	return (error);
1039 }
1040 
1041 static int
1042 ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path)
1043 {
1044 	struct ext4_extent_header *eh;
1045 	struct ext4_extent *ex;
1046 	int32_t border;
1047 	int depth, k;
1048 
1049 	depth = ext4_ext_inode_depth(ip);
1050 	eh = path[depth].ep_header;
1051 	ex = path[depth].ep_ext;
1052 
1053 	if (ex == NULL || eh == NULL)
1054 		return (EIO);
1055 
1056 	if (!depth)
1057 		return (0);
1058 
1059 	/* We will correct tree if first leaf got modified only. */
1060 	if (ex != EXT_FIRST_EXTENT(eh))
1061 		return (0);
1062 
1063 	k = depth - 1;
1064 	border = le32toh(path[depth].ep_ext->e_blk);
1065 	path[k].ep_index->ei_blk = htole32(border);
1066 	ext4_ext_dirty(ip, path + k);
1067 	while (k--) {
1068 		/* Change all left-side indexes. */
1069 		if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header))
1070 			break;
1071 
1072 		path[k].ep_index->ei_blk = htole32(border);
1073 		ext4_ext_dirty(ip, path + k);
1074 	}
1075 
1076 	return (0);
1077 }
1078 
1079 static int
1080 ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path,
1081     struct ext4_extent *newext)
1082 {
1083 	struct ext4_extent_header * eh;
1084 	struct ext4_extent *ex, *nex, *nearex;
1085 	struct ext4_extent_path *npath;
1086 	int depth, len, error, next;
1087 
1088 	depth = ext4_ext_inode_depth(ip);
1089 	ex = path[depth].ep_ext;
1090 	npath = NULL;
1091 
1092 	if (htole16(newext->e_len) == 0 || path[depth].ep_header == NULL)
1093 		return (EINVAL);
1094 
1095 	/* Insert block into found extent. */
1096 	if (ex && ext4_can_extents_be_merged(ex, newext)) {
1097 		ex->e_len = htole16(le16toh(ex->e_len) + le16toh(newext->e_len));
1098 		eh = path[depth].ep_header;
1099 		nearex = ex;
1100 		goto merge;
1101 	}
1102 
1103 repeat:
1104 	depth = ext4_ext_inode_depth(ip);
1105 	eh = path[depth].ep_header;
1106 	if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max))
1107 		goto has_space;
1108 
1109 	/* Try next leaf */
1110 	nex = EXT_LAST_EXTENT(eh);
1111 	next = ext4_ext_next_leaf_block(ip, path);
1112 	if (le32toh(newext->e_blk) > le32toh(nex->e_blk) && next !=
1113 	    EXT4_MAX_BLOCKS) {
1114 		KASSERT(npath == NULL,
1115 		    ("ext4_ext_insert_extent: bad path"));
1116 
1117 		error = ext4_ext_find_extent(ip, next, &npath);
1118 		if (error)
1119 			goto cleanup;
1120 
1121 		if (npath->ep_depth != path->ep_depth) {
1122 			error = EIO;
1123 			goto cleanup;
1124 		}
1125 
1126 		eh = npath[depth].ep_header;
1127 		if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max)) {
1128 			path = npath;
1129 			goto repeat;
1130 		}
1131 	}
1132 
1133 	/*
1134 	 * There is no free space in the found leaf,
1135 	 * try to add a new leaf to the tree.
1136 	 */
1137 	error = ext4_ext_create_new_leaf(ip, path, newext);
1138 	if (error)
1139 		goto cleanup;
1140 
1141 	depth = ext4_ext_inode_depth(ip);
1142 	eh = path[depth].ep_header;
1143 
1144 has_space:
1145 	nearex = path[depth].ep_ext;
1146 	if (!nearex) {
1147 		/* Create new extent in the leaf. */
1148 		path[depth].ep_ext = EXT_FIRST_EXTENT(eh);
1149 	} else if (le32toh(newext->e_blk) > le32toh(nearex->e_blk)) {
1150 		if (nearex != EXT_LAST_EXTENT(eh)) {
1151 			len = EXT_MAX_EXTENT(eh) - nearex;
1152 			len = (len - 1) * sizeof(struct ext4_extent);
1153 			len = len < 0 ? 0 : len;
1154 			memmove(nearex + 2, nearex + 1, len);
1155 		}
1156 		path[depth].ep_ext = nearex + 1;
1157 	} else {
1158 		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1159 		len = len < 0 ? 0 : len;
1160 		memmove(nearex + 1, nearex, len);
1161 		path[depth].ep_ext = nearex;
1162 	}
1163 
1164 	eh->eh_ecount = htole16(le16toh(eh->eh_ecount) + 1);
1165 	nearex = path[depth].ep_ext;
1166 	nearex->e_blk = newext->e_blk;
1167 	nearex->e_start_lo = newext->e_start_lo;
1168 	nearex->e_start_hi = newext->e_start_hi;
1169 	nearex->e_len = newext->e_len;
1170 
1171 merge:
1172 	/* Try to merge extents to the right. */
1173 	while (nearex < EXT_LAST_EXTENT(eh)) {
1174 		if (!ext4_can_extents_be_merged(nearex, nearex + 1))
1175 			break;
1176 
1177 		/* Merge with next extent. */
1178 		nearex->e_len = htole16(le16toh(nearex->e_len) +
1179 		    le16toh(nearex[1].e_len));
1180 		if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1181 			len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1182 			    sizeof(struct ext4_extent);
1183 			memmove(nearex + 1, nearex + 2, len);
1184 		}
1185 
1186 		eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1);
1187 		KASSERT(le16toh(eh->eh_ecount) != 0,
1188 		    ("ext4_ext_insert_extent: bad ecount"));
1189 	}
1190 
1191 	/*
1192 	 * Try to merge extents to the left,
1193 	 * start from inexes correction.
1194 	 */
1195 	error = ext4_ext_correct_indexes(ip, path);
1196 	if (error)
1197 		goto cleanup;
1198 
1199 	ext4_ext_dirty(ip, path + depth);
1200 
1201 cleanup:
1202 	if (npath) {
1203 		ext4_ext_drop_refs(npath);
1204 		free(npath, M_EXT2EXTENTS);
1205 	}
1206 
1207 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
1208 	return (error);
1209 }
1210 
1211 static e4fs_daddr_t
1212 ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref,
1213     struct ucred *cred, unsigned long *count, int *perror)
1214 {
1215 	struct m_ext2fs *fs;
1216 	e4fs_daddr_t newblk;
1217 
1218 	/*
1219 	 * We will allocate only single block for now.
1220 	 */
1221 	if (*count > 1)
1222 		return (0);
1223 
1224 	fs = ip->i_e2fs;
1225 	EXT2_LOCK(ip->i_ump);
1226 	*perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk);
1227 	if (*perror)
1228 		return (0);
1229 
1230 	if (newblk) {
1231 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
1232 		ext2_update(ip->i_vnode, 1);
1233 	}
1234 
1235 	return (newblk);
1236 }
1237 
1238 int
1239 ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk,
1240     unsigned long max_blocks, struct ucred *cred, struct buf **bpp,
1241     int *pallocated, daddr_t *nb)
1242 {
1243 	struct m_ext2fs *fs;
1244 	struct buf *bp = NULL;
1245 	struct ext4_extent_path *path;
1246 	struct ext4_extent newex, *ex;
1247 	e4fs_daddr_t bpref, newblk = 0;
1248 	unsigned long allocated = 0;
1249 	int error = 0, depth;
1250 
1251 	if(bpp)
1252 		*bpp = NULL;
1253 	*pallocated = 0;
1254 
1255 	/* Check cache. */
1256 	path = NULL;
1257 	if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) {
1258 		if (bpref == EXT4_EXT_CACHE_IN) {
1259 			/* Block is already allocated. */
1260 			newblk = iblk - le32toh(newex.e_blk) +
1261 			    ext4_ext_extent_pblock(&newex);
1262 			allocated = le16toh(newex.e_len) - (iblk - le32toh(newex.e_blk));
1263 			goto out;
1264 		} else {
1265 			error = EIO;
1266 			goto out2;
1267 		}
1268 	}
1269 
1270 	error = ext4_ext_find_extent(ip, iblk, &path);
1271 	if (error) {
1272 		goto out2;
1273 	}
1274 
1275 	depth = ext4_ext_inode_depth(ip);
1276 	if (path[depth].ep_ext == NULL && depth != 0) {
1277 		error = EIO;
1278 		goto out2;
1279 	}
1280 
1281 	if ((ex = path[depth].ep_ext)) {
1282 		uint64_t lblk = le32toh(ex->e_blk);
1283 		uint16_t e_len  = le16toh(ex->e_len);
1284 		e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex);
1285 
1286 		if (e_len > EXT4_MAX_LEN)
1287 			goto out2;
1288 
1289 		/* If we found extent covers block, simply return it. */
1290 		if (iblk >= lblk && iblk < lblk + e_len) {
1291 			newblk = iblk - lblk + e_start;
1292 			allocated = e_len - (iblk - lblk);
1293 			ext4_ext_put_in_cache(ip, lblk, e_len,
1294 			    e_start, EXT4_EXT_CACHE_IN);
1295 			goto out;
1296 		}
1297 	}
1298 
1299 	/* Allocate the new block. */
1300 	if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) {
1301 		ip->i_next_alloc_goal = 0;
1302 	}
1303 
1304 	bpref = ext4_ext_blkpref(ip, path, iblk);
1305 	allocated = max_blocks;
1306 	newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error);
1307 	if (!newblk)
1308 		goto out2;
1309 
1310 	/* Try to insert new extent into found leaf and return. */
1311 	newex.e_blk = htole32(iblk);
1312 	ext4_ext_store_pblock(&newex, newblk);
1313 	newex.e_len = htole16(allocated);
1314 	error = ext4_ext_insert_extent(ip, path, &newex);
1315 	if (error)
1316 		goto out2;
1317 
1318 	newblk = ext4_ext_extent_pblock(&newex);
1319 	ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN);
1320 	*pallocated = 1;
1321 
1322 out:
1323 	if (allocated > max_blocks)
1324 		allocated = max_blocks;
1325 
1326 	if (bpp)
1327 	{
1328 		fs = ip->i_e2fs;
1329 		error = bread(ip->i_devvp, fsbtodb(fs, newblk),
1330 		    fs->e2fs_bsize, cred, &bp);
1331 		if (error) {
1332 			brelse(bp);
1333 		} else {
1334 			*bpp = bp;
1335 		}
1336 	}
1337 
1338 out2:
1339 	if (path) {
1340 		ext4_ext_drop_refs(path);
1341 		free(path, M_EXT2EXTENTS);
1342 	}
1343 
1344 	if (nb)
1345 		*nb = newblk;
1346 
1347 	return (error);
1348 }
1349 
1350 static inline uint16_t
1351 ext4_ext_get_actual_len(struct ext4_extent *ext)
1352 {
1353 
1354 	return (le16toh(ext->e_len) <= EXT_INIT_MAX_LEN ?
1355 	    le16toh(ext->e_len) : (le16toh(ext->e_len) - EXT_INIT_MAX_LEN));
1356 }
1357 
1358 static inline struct ext4_extent_header *
1359 ext4_ext_header(struct inode *ip)
1360 {
1361 
1362 	return ((struct ext4_extent_header *)ip->i_db);
1363 }
1364 
1365 static int
1366 ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex,
1367     unsigned long from, unsigned long to)
1368 {
1369 	unsigned long num, start;
1370 
1371 	if (from >= le32toh(ex->e_blk) &&
1372 	    to == le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - 1) {
1373 		/* Tail cleanup. */
1374 		num = le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - from;
1375 		start = ext4_ext_extent_pblock(ex) +
1376 		    ext4_ext_get_actual_len(ex) - num;
1377 		ext4_ext_blkfree(ip, start, num, 0);
1378 	}
1379 
1380 	return (0);
1381 }
1382 
1383 static int
1384 ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path)
1385 {
1386 	e4fs_daddr_t leaf;
1387 
1388 	/* Free index block. */
1389 	path--;
1390 	leaf = ext4_ext_index_pblock(path->ep_index);
1391 	KASSERT(path->ep_header->eh_ecount != 0,
1392 	    ("ext4_ext_rm_index: bad ecount"));
1393 	path->ep_header->eh_ecount =
1394 	    htole16(le16toh(path->ep_header->eh_ecount) - 1);
1395 	ext4_ext_dirty(ip, path);
1396 	ext4_ext_blkfree(ip, leaf, 1, 0);
1397 	return (0);
1398 }
1399 
1400 static int
1401 ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path,
1402     uint64_t start)
1403 {
1404 	struct ext4_extent_header *eh;
1405 	struct ext4_extent *ex;
1406 	unsigned int a, b, block, num;
1407 	unsigned long ex_blk;
1408 	unsigned short ex_len;
1409 	int depth;
1410 	int error, correct_index;
1411 
1412 	depth = ext4_ext_inode_depth(ip);
1413 	if (!path[depth].ep_header) {
1414 		if (path[depth].ep_data == NULL)
1415 			return (EINVAL);
1416 		path[depth].ep_header =
1417 		    (struct ext4_extent_header* )path[depth].ep_data;
1418 	}
1419 
1420 	eh = path[depth].ep_header;
1421 	if (!eh) {
1422 		SDT_PROBE2(ext2fs, , trace, extents, 1,
1423 		    "bad header => extent corrupted");
1424 		return (EIO);
1425 	}
1426 
1427 	ex = EXT_LAST_EXTENT(eh);
1428 	ex_blk = le32toh(ex->e_blk);
1429 	ex_len = ext4_ext_get_actual_len(ex);
1430 
1431 	error = 0;
1432 	correct_index = 0;
1433 	while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) {
1434 		path[depth].ep_ext = ex;
1435 		a = ex_blk > start ? ex_blk : start;
1436 		b = (uint64_t)ex_blk + ex_len - 1 <
1437 		    EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS;
1438 
1439 		if (a != ex_blk && b != ex_blk + ex_len - 1)
1440 			return (EINVAL);
1441 		else if (a != ex_blk) {
1442 			/* Remove tail of the extent. */
1443 			block = ex_blk;
1444 			num = a - block;
1445 		} else if (b != ex_blk + ex_len - 1) {
1446 			/* Remove head of the extent, not implemented. */
1447 			return (EINVAL);
1448 		} else {
1449 			/* Remove whole extent. */
1450 			block = ex_blk;
1451 			num = 0;
1452 		}
1453 
1454 		if (ex == EXT_FIRST_EXTENT(eh))
1455 			correct_index = 1;
1456 
1457 		error = ext4_remove_blocks(ip, ex, a, b);
1458 		if (error)
1459 			goto out;
1460 
1461 		if (num == 0) {
1462 			ext4_ext_store_pblock(ex, 0);
1463 			eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1);
1464 		}
1465 
1466 		ex->e_blk = htole32(block);
1467 		ex->e_len = htole16(num);
1468 
1469 		ext4_ext_dirty(ip, path + depth);
1470 
1471 		ex--;
1472 		ex_blk = htole32(ex->e_blk);
1473 		ex_len = ext4_ext_get_actual_len(ex);
1474 	};
1475 
1476 	if (correct_index && le16toh(eh->eh_ecount))
1477 		error = ext4_ext_correct_indexes(ip, path);
1478 
1479 	/*
1480 	 * If this leaf is free, we should
1481 	 * remove it from index block above.
1482 	 */
1483 	if (error == 0 && eh->eh_ecount == 0 &&
1484 	    path[depth].ep_data != NULL)
1485 		error = ext4_ext_rm_index(ip, path + depth);
1486 
1487 out:
1488 	return (error);
1489 }
1490 
1491 static struct buf *
1492 ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk,
1493     int depth, int flags)
1494 {
1495 	struct m_ext2fs *fs;
1496 	struct ext4_extent_header *eh;
1497 	struct buf *bp;
1498 	int error;
1499 
1500 	fs = ip->i_e2fs;
1501 	error = bread(ip->i_devvp, fsbtodb(fs, pblk),
1502 	    fs->e2fs_bsize, NOCRED, &bp);
1503 	if (error) {
1504 		return (NULL);
1505 	}
1506 
1507 	eh = ext4_ext_block_header(bp->b_data);
1508 	if (le16toh(eh->eh_depth) != depth) {
1509 		SDT_PROBE2(ext2fs, , trace, extents, 1,
1510 		    "unexpected eh_depth");
1511 		goto err;
1512 	}
1513 
1514 	error = ext4_ext_check_header(ip, eh);
1515 	if (error)
1516 		goto err;
1517 
1518 	return (bp);
1519 
1520 err:
1521 	brelse(bp);
1522 	return (NULL);
1523 
1524 }
1525 
1526 static int inline
1527 ext4_ext_more_to_rm(struct ext4_extent_path *path)
1528 {
1529 
1530 	KASSERT(path->ep_index != NULL,
1531 	    ("ext4_ext_more_to_rm: bad index from path"));
1532 
1533 	if (path->ep_index < EXT_FIRST_INDEX(path->ep_header))
1534 		return (0);
1535 
1536 	if (le16toh(path->ep_header->eh_ecount) == path->index_count)
1537 		return (0);
1538 
1539 	return (1);
1540 }
1541 
1542 int
1543 ext4_ext_remove_space(struct inode *ip, off_t length, int flags,
1544     struct ucred *cred, struct thread *td)
1545 {
1546 	struct buf *bp;
1547 	struct ext4_extent_header *ehp;
1548 	struct ext4_extent_path *path;
1549 	int depth;
1550 	int i, error;
1551 
1552 	ehp = (struct ext4_extent_header *)ip->i_db;
1553 	depth = ext4_ext_inode_depth(ip);
1554 
1555 	error = ext4_ext_check_header(ip, ehp);
1556 	if(error)
1557 		return (error);
1558 
1559 	path = malloc(sizeof(struct ext4_extent_path) * (depth + 1),
1560 	    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
1561 	path[0].ep_header = ehp;
1562 	path[0].ep_depth = depth;
1563 	i = 0;
1564 	while (error == 0 && i >= 0) {
1565 		if (i == depth) {
1566 			/* This is leaf. */
1567 			error = ext4_ext_rm_leaf(ip, path, length);
1568 			if (error)
1569 				break;
1570 			free(path[i].ep_data, M_EXT2EXTENTS);
1571 			path[i].ep_data = NULL;
1572 			i--;
1573 			continue;
1574 		}
1575 
1576 		/* This is index. */
1577 		if (!path[i].ep_header)
1578 			path[i].ep_header =
1579 			    (struct ext4_extent_header *)path[i].ep_data;
1580 
1581 		if (!path[i].ep_index) {
1582 			/* This level hasn't touched yet. */
1583 			path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header);
1584 			path[i].index_count =
1585 			    le16toh(path[i].ep_header->eh_ecount) + 1;
1586 		} else {
1587 			/* We've already was here, see at next index. */
1588 			path[i].ep_index--;
1589 		}
1590 
1591 		if (ext4_ext_more_to_rm(path + i)) {
1592 			memset(path + i + 1, 0, sizeof(*path));
1593 			bp = ext4_read_extent_tree_block(ip,
1594 			    ext4_ext_index_pblock(path[i].ep_index),
1595 			    path[0].ep_depth - (i + 1), 0);
1596 			if (!bp) {
1597 				error = EIO;
1598 				break;
1599 			}
1600 
1601 			ext4_ext_fill_path_bdata(&path[i+1], bp,
1602 			    ext4_ext_index_pblock(path[i].ep_index));
1603 			brelse(bp);
1604 			path[i].index_count =
1605 			    le16toh(path[i].ep_header->eh_ecount);
1606 			i++;
1607 		} else {
1608 			if (path[i].ep_header->eh_ecount == 0 && i > 0) {
1609 				/* Index is empty, remove it. */
1610 				error = ext4_ext_rm_index(ip, path + i);
1611 			}
1612 			free(path[i].ep_data, M_EXT2EXTENTS);
1613 			path[i].ep_data = NULL;
1614 			i--;
1615 		}
1616 	}
1617 
1618 	if (path->ep_header->eh_ecount == 0) {
1619 		/*
1620 		 * Truncate the tree to zero.
1621 		 */
1622 		 ext4_ext_header(ip)->eh_depth = 0;
1623 		 ext4_ext_header(ip)->eh_max = htole16(ext4_ext_space_root(ip));
1624 		 ext4_ext_dirty(ip, path);
1625 	}
1626 
1627 	ext4_ext_drop_refs(path);
1628 	free(path, M_EXT2EXTENTS);
1629 
1630 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
1631 	return (error);
1632 }
1633