xref: /freebsd/sys/fs/ext2fs/ext2_extents.c (revision 84b89556)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Zheng Liu <lz@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/vnode.h>
37 #include <sys/bio.h>
38 #include <sys/buf.h>
39 #include <sys/conf.h>
40 #include <sys/sdt.h>
41 #include <sys/stat.h>
42 
43 #include <fs/ext2fs/ext2_mount.h>
44 #include <fs/ext2fs/fs.h>
45 #include <fs/ext2fs/inode.h>
46 #include <fs/ext2fs/ext2fs.h>
47 #include <fs/ext2fs/ext2_extents.h>
48 #include <fs/ext2fs/ext2_extern.h>
49 
50 SDT_PROVIDER_DECLARE(ext2fs);
51 /*
52  * ext2fs trace probe:
53  * arg0: verbosity. Higher numbers give more verbose messages
54  * arg1: Textual message
55  */
56 SDT_PROBE_DEFINE2(ext2fs, , trace, extents, "int", "char*");
57 
58 static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents");
59 
60 #ifdef EXT2FS_PRINT_EXTENTS
61 static void
62 ext4_ext_print_extent(struct ext4_extent *ep)
63 {
64 
65 	printf("    ext %p => (blk %u len %u start %ju)\n",
66 	    ep, ep->e_blk, ep->e_len,
67 	    (uint64_t)ep->e_start_hi << 32 | ep->e_start_lo);
68 }
69 
70 static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp);
71 
72 static void
73 ext4_ext_print_index(struct inode *ip, struct ext4_extent_index *ex, int do_walk)
74 {
75 	struct m_ext2fs *fs;
76 	struct buf *bp;
77 	int error;
78 
79 	fs = ip->i_e2fs;
80 
81 	printf("    index %p => (blk %u pblk %ju)\n",
82 	    ex, ex->ei_blk, (uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo);
83 
84 	if(!do_walk)
85 		return;
86 
87 	if ((error = bread(ip->i_devvp,
88 	    fsbtodb(fs, ((uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo)),
89 	    (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
90 		brelse(bp);
91 		return;
92 	}
93 
94 	ext4_ext_print_header(ip, (struct ext4_extent_header *)bp->b_data);
95 
96 	brelse(bp);
97 
98 }
99 
100 static void
101 ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp)
102 {
103 	int i;
104 
105 	printf("header %p => (magic 0x%x entries %d max %d depth %d gen %d)\n",
106 	    ehp, ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth,
107 	    ehp->eh_gen);
108 
109 	for (i = 0; i < ehp->eh_ecount; i++)
110 		if (ehp->eh_depth != 0)
111 			ext4_ext_print_index(ip,
112 			    (struct ext4_extent_index *)(ehp + 1 + i), 1);
113 		else
114 			ext4_ext_print_extent((struct ext4_extent *)(ehp + 1 + i));
115 }
116 
117 static void
118 ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path)
119 {
120 	int k, l;
121 
122 	l = path->ep_depth;
123 
124 	printf("ip=%ju, Path:\n", ip->i_number);
125 	for (k = 0; k <= l; k++, path++) {
126 		if (path->ep_index) {
127 			ext4_ext_print_index(ip, path->ep_index, 0);
128 		} else if (path->ep_ext) {
129 			ext4_ext_print_extent(path->ep_ext);
130 		}
131 	}
132 }
133 
134 void
135 ext4_ext_print_extent_tree_status(struct inode *ip)
136 {
137 	struct ext4_extent_header *ehp;
138 
139 	ehp = (struct ext4_extent_header *)(char *)ip->i_db;
140 
141 	printf("Extent status:ip=%ju\n", ip->i_number);
142 	if (!(ip->i_flag & IN_E4EXTENTS))
143 		return;
144 
145 	ext4_ext_print_header(ip, ehp);
146 
147 	return;
148 }
149 #endif
150 
151 static inline struct ext4_extent_header *
152 ext4_ext_inode_header(struct inode *ip)
153 {
154 
155 	return ((struct ext4_extent_header *)ip->i_db);
156 }
157 
158 static inline struct ext4_extent_header *
159 ext4_ext_block_header(char *bdata)
160 {
161 
162 	return ((struct ext4_extent_header *)bdata);
163 }
164 
165 static inline unsigned short
166 ext4_ext_inode_depth(struct inode *ip)
167 {
168 	struct ext4_extent_header *ehp;
169 
170 	ehp = (struct ext4_extent_header *)ip->i_data;
171 	return (ehp->eh_depth);
172 }
173 
174 static inline e4fs_daddr_t
175 ext4_ext_index_pblock(struct ext4_extent_index *index)
176 {
177 	e4fs_daddr_t blk;
178 
179 	blk = index->ei_leaf_lo;
180 	blk |= (e4fs_daddr_t)index->ei_leaf_hi << 32;
181 
182 	return (blk);
183 }
184 
185 static inline void
186 ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb)
187 {
188 
189 	index->ei_leaf_lo = pb & 0xffffffff;
190 	index->ei_leaf_hi = (pb >> 32) & 0xffff;
191 }
192 
193 
194 static inline e4fs_daddr_t
195 ext4_ext_extent_pblock(struct ext4_extent *extent)
196 {
197 	e4fs_daddr_t blk;
198 
199 	blk = extent->e_start_lo;
200 	blk |= (e4fs_daddr_t)extent->e_start_hi << 32;
201 
202 	return (blk);
203 }
204 
205 static inline void
206 ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb)
207 {
208 
209 	ex->e_start_lo = pb & 0xffffffff;
210 	ex->e_start_hi = (pb >> 32) & 0xffff;
211 }
212 
213 int
214 ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep)
215 {
216 	struct ext4_extent_cache *ecp;
217 	int ret = EXT4_EXT_CACHE_NO;
218 
219 	ecp = &ip->i_ext_cache;
220 	if (ecp->ec_type == EXT4_EXT_CACHE_NO)
221 		return (ret);
222 
223 	if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) {
224 		ep->e_blk = ecp->ec_blk;
225 		ep->e_start_lo = ecp->ec_start & 0xffffffff;
226 		ep->e_start_hi = ecp->ec_start >> 32 & 0xffff;
227 		ep->e_len = ecp->ec_len;
228 		ret = ecp->ec_type;
229 	}
230 	return (ret);
231 }
232 
233 static int
234 ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh)
235 {
236 	struct m_ext2fs *fs;
237 	char *error_msg;
238 
239 	fs = ip->i_e2fs;
240 
241 	if (eh->eh_magic != EXT4_EXT_MAGIC) {
242 		error_msg = "header: invalid magic";
243 		goto corrupted;
244 	}
245 	if (eh->eh_max == 0) {
246 		error_msg = "header: invalid eh_max";
247 		goto corrupted;
248 	}
249 	if (eh->eh_ecount > eh->eh_max) {
250 		error_msg = "header: invalid eh_entries";
251 		goto corrupted;
252 	}
253 
254 	return (0);
255 
256 corrupted:
257 	SDT_PROBE2(ext2fs, , trace, extents, 1, error_msg);
258 	return (EIO);
259 }
260 
261 static void
262 ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk)
263 {
264 	struct ext4_extent_header *eh;
265 	struct ext4_extent_index *r, *l, *m;
266 
267 	eh = path->ep_header;
268 
269 	KASSERT(eh->eh_ecount <= eh->eh_max && eh->eh_ecount > 0,
270 	    ("ext4_ext_binsearch_index: bad args"));
271 
272 	l = EXT_FIRST_INDEX(eh) + 1;
273 	r = EXT_FIRST_INDEX(eh) + eh->eh_ecount - 1;
274 	while (l <= r) {
275 		m = l + (r - l) / 2;
276 		if (blk < m->ei_blk)
277 			r = m - 1;
278 		else
279 			l = m + 1;
280 	}
281 
282 	path->ep_index = l - 1;
283 }
284 
285 static void
286 ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk)
287 {
288 	struct ext4_extent_header *eh;
289 	struct ext4_extent *r, *l, *m;
290 
291 	eh = path->ep_header;
292 
293 	KASSERT(eh->eh_ecount <= eh->eh_max,
294 	    ("ext4_ext_binsearch_ext: bad args"));
295 
296 	if (eh->eh_ecount == 0)
297 		return;
298 
299 	l = EXT_FIRST_EXTENT(eh) + 1;
300 	r = EXT_FIRST_EXTENT(eh) + eh->eh_ecount - 1;
301 
302 	while (l <= r) {
303 		m = l + (r - l) / 2;
304 		if (blk < m->e_blk)
305 			r = m - 1;
306 		else
307 			l = m + 1;
308 	}
309 
310 	path->ep_ext = l - 1;
311 }
312 
313 static int
314 ext4_ext_fill_path_bdata(struct ext4_extent_path *path,
315     struct buf *bp, uint64_t blk)
316 {
317 
318 	KASSERT(path->ep_data == NULL,
319 	    ("ext4_ext_fill_path_bdata: bad ep_data"));
320 
321 	path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK);
322 	if (!path->ep_data)
323 		return (ENOMEM);
324 
325 	memcpy(path->ep_data, bp->b_data, bp->b_bufsize);
326 	path->ep_blk = blk;
327 
328 	return (0);
329 }
330 
331 static void
332 ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp)
333 {
334 
335 	KASSERT(path->ep_data != NULL,
336 	    ("ext4_ext_fill_path_buf: bad ep_data"));
337 
338 	memcpy(bp->b_data, path->ep_data, bp->b_bufsize);
339 }
340 
341 static void
342 ext4_ext_drop_refs(struct ext4_extent_path *path)
343 {
344 	int depth, i;
345 
346 	if (!path)
347 		return;
348 
349 	depth = path->ep_depth;
350 	for (i = 0; i <= depth; i++, path++)
351 		if (path->ep_data) {
352 			free(path->ep_data, M_EXT2EXTENTS);
353 			path->ep_data = NULL;
354 		}
355 }
356 
357 void
358 ext4_ext_path_free(struct ext4_extent_path *path)
359 {
360 
361 	if (!path)
362 		return;
363 
364 	ext4_ext_drop_refs(path);
365 	free(path, M_EXT2EXTENTS);
366 }
367 
368 int
369 ext4_ext_find_extent(struct inode *ip, daddr_t block,
370     struct ext4_extent_path **ppath)
371 {
372 	struct m_ext2fs *fs;
373 	struct ext4_extent_header *eh;
374 	struct ext4_extent_path *path;
375 	struct buf *bp;
376 	uint64_t blk;
377 	int error, depth, i, ppos, alloc;
378 
379 	fs = ip->i_e2fs;
380 	eh = ext4_ext_inode_header(ip);
381 	depth = ext4_ext_inode_depth(ip);
382 	ppos = 0;
383 	alloc = 0;
384 
385 	error = ext4_ext_check_header(ip, eh);
386 	if (error)
387 		return (error);
388 
389 	if (ppath == NULL)
390 		return (EINVAL);
391 
392 	path = *ppath;
393 	if (path == NULL) {
394 		path = malloc(EXT4_EXT_DEPTH_MAX *
395 		    sizeof(struct ext4_extent_path),
396 		    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
397 		if (!path)
398 			return (ENOMEM);
399 
400 		*ppath = path;
401 		alloc = 1;
402 	}
403 
404 	path[0].ep_header = eh;
405 	path[0].ep_data = NULL;
406 
407 	/* Walk through the tree. */
408 	i = depth;
409 	while (i) {
410 		ext4_ext_binsearch_index(&path[ppos], block);
411 		blk = ext4_ext_index_pblock(path[ppos].ep_index);
412 		path[ppos].ep_depth = i;
413 		path[ppos].ep_ext = NULL;
414 
415 		error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk),
416 		    ip->i_e2fs->e2fs_bsize, NOCRED, &bp);
417 		if (error) {
418 			brelse(bp);
419 			goto error;
420 		}
421 
422 		ppos++;
423 		if (ppos > depth) {
424 			SDT_PROBE2(ext2fs, , trace, extents, 1,
425 			    "ppos > depth => extent corrupted");
426 			error = EIO;
427 			brelse(bp);
428 			goto error;
429 		}
430 
431 		ext4_ext_fill_path_bdata(&path[ppos], bp, blk);
432 		bqrelse(bp);
433 
434 		eh = ext4_ext_block_header(path[ppos].ep_data);
435 		if (ext4_ext_check_header(ip, eh) ||
436 		    ext2_extent_blk_csum_verify(ip, path[ppos].ep_data)) {
437 			error = EIO;
438 			goto error;
439 		}
440 
441 		path[ppos].ep_header = eh;
442 
443 		i--;
444 	}
445 
446 	error = ext4_ext_check_header(ip, eh);
447 	if (error)
448 		goto error;
449 
450 	/* Find extent. */
451 	path[ppos].ep_depth = i;
452 	path[ppos].ep_header = eh;
453 	path[ppos].ep_ext = NULL;
454 	path[ppos].ep_index = NULL;
455 	ext4_ext_binsearch_ext(&path[ppos], block);
456 	return (0);
457 
458 error:
459 	ext4_ext_drop_refs(path);
460 	if (alloc)
461 		free(path, M_EXT2EXTENTS);
462 
463 	*ppath = NULL;
464 
465 	return (error);
466 }
467 
468 static inline int
469 ext4_ext_space_root(struct inode *ip)
470 {
471 	int size;
472 
473 	size = sizeof(ip->i_data);
474 	size -= sizeof(struct ext4_extent_header);
475 	size /= sizeof(struct ext4_extent);
476 
477 	return (size);
478 }
479 
480 static inline int
481 ext4_ext_space_block(struct inode *ip)
482 {
483 	struct m_ext2fs *fs;
484 	int size;
485 
486 	fs = ip->i_e2fs;
487 
488 	size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
489 	    sizeof(struct ext4_extent);
490 
491 	return (size);
492 }
493 
494 static inline int
495 ext4_ext_space_block_index(struct inode *ip)
496 {
497 	struct m_ext2fs *fs;
498 	int size;
499 
500 	fs = ip->i_e2fs;
501 
502 	size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
503 	    sizeof(struct ext4_extent_index);
504 
505 	return (size);
506 }
507 
508 void
509 ext4_ext_tree_init(struct inode *ip)
510 {
511 	struct ext4_extent_header *ehp;
512 
513 	ip->i_flag |= IN_E4EXTENTS;
514 
515 	memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR);
516 	ehp = (struct ext4_extent_header *)ip->i_data;
517 	ehp->eh_magic = EXT4_EXT_MAGIC;
518 	ehp->eh_max = ext4_ext_space_root(ip);
519 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
520 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
521 	ext2_update(ip->i_vnode, 1);
522 }
523 
524 static inline void
525 ext4_ext_put_in_cache(struct inode *ip, uint32_t blk,
526 			uint32_t len, uint32_t start, int type)
527 {
528 
529 	KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input"));
530 
531 	ip->i_ext_cache.ec_type = type;
532 	ip->i_ext_cache.ec_blk = blk;
533 	ip->i_ext_cache.ec_len = len;
534 	ip->i_ext_cache.ec_start = start;
535 }
536 
537 static e4fs_daddr_t
538 ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path,
539     e4fs_daddr_t block)
540 {
541 	struct m_ext2fs *fs;
542 	struct ext4_extent *ex;
543 	e4fs_daddr_t bg_start;
544 	int depth;
545 
546 	fs = ip->i_e2fs;
547 
548 	if (path) {
549 		depth = path->ep_depth;
550 		ex = path[depth].ep_ext;
551 		if (ex) {
552 			e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex);
553 			e2fs_daddr_t blk = ex->e_blk;
554 
555 			if (block > blk)
556 				return (pblk + (block - blk));
557 			else
558 				return (pblk - (blk - block));
559 		}
560 
561 		/* Try to get block from index itself. */
562 		if (path[depth].ep_data)
563 			return (path[depth].ep_blk);
564 	}
565 
566 	/* Use inode's group. */
567 	bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
568 	    fs->e2fs->e2fs_first_dblock;
569 
570 	return (bg_start + block);
571 }
572 
573 static int inline
574 ext4_can_extents_be_merged(struct ext4_extent *ex1,
575     struct ext4_extent *ex2)
576 {
577 
578 	if (ex1->e_blk + ex1->e_len != ex2->e_blk)
579 		return (0);
580 
581 	if (ex1->e_len + ex2->e_len > EXT4_MAX_LEN)
582 		return (0);
583 
584 	if (ext4_ext_extent_pblock(ex1) + ex1->e_len ==
585 	    ext4_ext_extent_pblock(ex2))
586 		return (1);
587 
588 	return (0);
589 }
590 
591 static unsigned
592 ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path)
593 {
594 	int depth = path->ep_depth;
595 
596 	/* Empty tree */
597 	if (depth == 0)
598 		return (EXT4_MAX_BLOCKS);
599 
600 	/* Go to indexes. */
601 	depth--;
602 
603 	while (depth >= 0) {
604 		if (path[depth].ep_index !=
605 		    EXT_LAST_INDEX(path[depth].ep_header))
606 			return (path[depth].ep_index[1].ei_blk);
607 
608 		depth--;
609 	}
610 
611 	return (EXT4_MAX_BLOCKS);
612 }
613 
614 static int
615 ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path)
616 {
617 	struct m_ext2fs *fs;
618 	struct buf *bp;
619 	uint64_t blk;
620 	int error;
621 
622 	fs = ip->i_e2fs;
623 
624 	if (!path)
625 		return (EINVAL);
626 
627 	if (path->ep_data) {
628 		blk = path->ep_blk;
629 		bp = getblk(ip->i_devvp, fsbtodb(fs, blk),
630 		    fs->e2fs_bsize, 0, 0, 0);
631 		if (!bp)
632 			return (EIO);
633 		ext4_ext_fill_path_buf(path, bp);
634 		ext2_extent_blk_csum_set(ip, bp->b_data);
635 		error = bwrite(bp);
636 	} else {
637 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
638 		error = ext2_update(ip->i_vnode, 1);
639 	}
640 
641 	return (error);
642 }
643 
644 static int
645 ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path,
646     uint32_t lblk, e4fs_daddr_t blk)
647 {
648 	struct m_ext2fs *fs;
649 	struct ext4_extent_index *idx;
650 	int len;
651 
652 	fs = ip->i_e2fs;
653 
654 	if (lblk == path->ep_index->ei_blk) {
655 		SDT_PROBE2(ext2fs, , trace, extents, 1,
656 		    "lblk == index blk => extent corrupted");
657 		return (EIO);
658 	}
659 
660 	if (path->ep_header->eh_ecount >= path->ep_header->eh_max) {
661 		SDT_PROBE2(ext2fs, , trace, extents, 1,
662 		    "ecout > maxcount => extent corrupted");
663 		return (EIO);
664 	}
665 
666 	if (lblk > path->ep_index->ei_blk) {
667 		/* Insert after. */
668 		idx = path->ep_index + 1;
669 	} else {
670 		/* Insert before. */
671 		idx = path->ep_index;
672 	}
673 
674 	len = EXT_LAST_INDEX(path->ep_header) - idx + 1;
675 	if (len > 0)
676 		memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index));
677 
678 	if (idx > EXT_MAX_INDEX(path->ep_header)) {
679 		SDT_PROBE2(ext2fs, , trace, extents, 1,
680 		    "index is out of range => extent corrupted");
681 		return (EIO);
682 	}
683 
684 	idx->ei_blk = lblk;
685 	ext4_index_store_pblock(idx, blk);
686 	path->ep_header->eh_ecount++;
687 
688 	return (ext4_ext_dirty(ip, path));
689 }
690 
691 static e4fs_daddr_t
692 ext4_ext_alloc_meta(struct inode *ip)
693 {
694 	e4fs_daddr_t blk = ext2_alloc_meta(ip);
695 	if (blk) {
696 		ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize);
697 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
698 		ext2_update(ip->i_vnode, 1);
699 	}
700 
701 	return (blk);
702 }
703 
704 static void
705 ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags)
706 {
707 	struct m_ext2fs *fs;
708 	int i, blocksreleased;
709 
710 	fs = ip->i_e2fs;
711 	blocksreleased = count;
712 
713 	for(i = 0; i < count; i++)
714 		ext2_blkfree(ip, blk + i, fs->e2fs_bsize);
715 
716 	if (ip->i_blocks >= blocksreleased)
717 		ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased);
718 	else
719 		ip->i_blocks = 0;
720 
721 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
722 	ext2_update(ip->i_vnode, 1);
723 }
724 
725 static int
726 ext4_ext_split(struct inode *ip, struct ext4_extent_path *path,
727     struct ext4_extent *newext, int at)
728 {
729 	struct m_ext2fs *fs;
730 	struct  buf *bp;
731 	int depth = ext4_ext_inode_depth(ip);
732 	struct ext4_extent_header *neh;
733 	struct ext4_extent_index *fidx;
734 	struct ext4_extent *ex;
735 	int i = at, k, m, a;
736 	e4fs_daddr_t newblk, oldblk;
737 	uint32_t border;
738 	e4fs_daddr_t *ablks = NULL;
739 	int error = 0;
740 
741 	fs = ip->i_e2fs;
742 	bp = NULL;
743 
744 	/*
745 	 * We will split at current extent for now.
746 	 */
747 	if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) {
748 		SDT_PROBE2(ext2fs, , trace, extents, 1,
749 		    "extent is out of range => extent corrupted");
750 		return (EIO);
751 	}
752 
753 	if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header))
754 		border = path[depth].ep_ext[1].e_blk;
755 	else
756 		border = newext->e_blk;
757 
758 	/* Allocate new blocks. */
759 	ablks = malloc(sizeof(e4fs_daddr_t) * depth,
760 	    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
761 	if (!ablks)
762 		return (ENOMEM);
763 	for (a = 0; a < depth - at; a++) {
764 		newblk = ext4_ext_alloc_meta(ip);
765 		if (newblk == 0)
766 			goto cleanup;
767 		ablks[a] = newblk;
768 	}
769 
770 	newblk = ablks[--a];
771 	bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
772 	if (!bp) {
773 		error = EIO;
774 		goto cleanup;
775 	}
776 
777 	neh = ext4_ext_block_header(bp->b_data);
778 	neh->eh_ecount = 0;
779 	neh->eh_max = ext4_ext_space_block(ip);
780 	neh->eh_magic = EXT4_EXT_MAGIC;
781 	neh->eh_depth = 0;
782 	ex = EXT_FIRST_EXTENT(neh);
783 
784 	if (path[depth].ep_header->eh_ecount != path[depth].ep_header->eh_max) {
785 		SDT_PROBE2(ext2fs, , trace, extents, 1,
786 		    "extents count out of range => extent corrupted");
787 		error = EIO;
788 		goto cleanup;
789 	}
790 
791 	/* Start copy from next extent. */
792 	m = 0;
793 	path[depth].ep_ext++;
794 	while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) {
795 		path[depth].ep_ext++;
796 		m++;
797 	}
798 	if (m) {
799 		memmove(ex, path[depth].ep_ext - m,
800 		    sizeof(struct ext4_extent) * m);
801 		neh->eh_ecount = neh->eh_ecount + m;
802 	}
803 
804 	ext2_extent_blk_csum_set(ip, bp->b_data);
805 	bwrite(bp);
806 	bp = NULL;
807 
808 	/* Fix old leaf. */
809 	if (m) {
810 		path[depth].ep_header->eh_ecount =
811 		    path[depth].ep_header->eh_ecount - m;
812 		ext4_ext_dirty(ip, path + depth);
813 	}
814 
815 	/* Create intermediate indexes. */
816 	k = depth - at - 1;
817 	KASSERT(k >= 0, ("ext4_ext_split: negative k"));
818 
819 	/* Insert new index into current index block. */
820 	i = depth - 1;
821 	while (k--) {
822 		oldblk = newblk;
823 		newblk = ablks[--a];
824 		error = bread(ip->i_devvp, fsbtodb(fs, newblk),
825 		    (int)fs->e2fs_bsize, NOCRED, &bp);
826 		if (error) {
827 			brelse(bp);
828 			goto cleanup;
829 		}
830 
831 		neh = (struct ext4_extent_header *)bp->b_data;
832 		neh->eh_ecount = 1;
833 		neh->eh_magic = EXT4_EXT_MAGIC;
834 		neh->eh_max = ext4_ext_space_block_index(ip);
835 		neh->eh_depth = depth - i;
836 		fidx = EXT_FIRST_INDEX(neh);
837 		fidx->ei_blk = border;
838 		ext4_index_store_pblock(fidx, oldblk);
839 
840 		m = 0;
841 		path[i].ep_index++;
842 		while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) {
843 			path[i].ep_index++;
844 			m++;
845 		}
846 		if (m) {
847 			memmove(++fidx, path[i].ep_index - m,
848 			    sizeof(struct ext4_extent_index) * m);
849 			neh->eh_ecount = neh->eh_ecount + m;
850 		}
851 
852 		ext2_extent_blk_csum_set(ip, bp->b_data);
853 		bwrite(bp);
854 		bp = NULL;
855 
856 		/* Fix old index. */
857 		if (m) {
858 			path[i].ep_header->eh_ecount =
859 			    path[i].ep_header->eh_ecount - m;
860 			ext4_ext_dirty(ip, path + i);
861 		}
862 
863 		i--;
864 	}
865 
866 	error = ext4_ext_insert_index(ip, path + at, border, newblk);
867 
868 cleanup:
869 	if (bp)
870 		brelse(bp);
871 
872 	if (error) {
873 		for (i = 0; i < depth; i++) {
874 			if (!ablks[i])
875 				continue;
876 			ext4_ext_blkfree(ip, ablks[i], 1, 0);
877 		}
878 	}
879 
880 	free(ablks, M_EXT2EXTENTS);
881 
882 	return (error);
883 }
884 
885 static int
886 ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path,
887     struct ext4_extent *newext)
888 {
889 	struct m_ext2fs *fs;
890 	struct ext4_extent_path *curpath;
891 	struct ext4_extent_header *neh;
892 	struct buf *bp;
893 	e4fs_daddr_t newblk;
894 	int error = 0;
895 
896 	fs = ip->i_e2fs;
897 	curpath = path;
898 
899 	newblk = ext4_ext_alloc_meta(ip);
900 	if (newblk == 0)
901 		return (error);
902 
903 	bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
904 	if (!bp)
905 		return (EIO);
906 
907 	/* Move top-level index/leaf into new block. */
908 	memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data));
909 
910 	/* Set size of new block */
911 	neh = ext4_ext_block_header(bp->b_data);
912 	neh->eh_magic = EXT4_EXT_MAGIC;
913 
914 	if (ext4_ext_inode_depth(ip))
915 		neh->eh_max = ext4_ext_space_block_index(ip);
916 	else
917 		neh->eh_max = ext4_ext_space_block(ip);
918 
919 	ext2_extent_blk_csum_set(ip, bp->b_data);
920 	error = bwrite(bp);
921 	if (error)
922 		goto out;
923 
924 	bp = NULL;
925 
926 	curpath->ep_header->eh_magic = EXT4_EXT_MAGIC;
927 	curpath->ep_header->eh_max = ext4_ext_space_root(ip);
928 	curpath->ep_header->eh_ecount = 1;
929 	curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header);
930 	curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk;
931 	ext4_index_store_pblock(curpath->ep_index, newblk);
932 
933 	neh = ext4_ext_inode_header(ip);
934 	neh->eh_depth = path->ep_depth + 1;
935 	ext4_ext_dirty(ip, curpath);
936 out:
937 	brelse(bp);
938 
939 	return (error);
940 }
941 
942 static int
943 ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path,
944     struct ext4_extent *newext)
945 {
946 	struct ext4_extent_path *curpath;
947 	int depth, i, error;
948 
949 repeat:
950 	i = depth = ext4_ext_inode_depth(ip);
951 
952 	/* Look for free index entry int the tree */
953 	curpath = path + depth;
954 	while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) {
955 		i--;
956 		curpath--;
957 	}
958 
959 	/*
960 	 * We use already allocated block for index block,
961 	 * so subsequent data blocks should be contiguous.
962 	 */
963 	if (EXT_HAS_FREE_INDEX(curpath)) {
964 		error = ext4_ext_split(ip, path, newext, i);
965 		if (error)
966 			goto out;
967 
968 		/* Refill path. */
969 		ext4_ext_drop_refs(path);
970 		error = ext4_ext_find_extent(ip, newext->e_blk, &path);
971 		if (error)
972 			goto out;
973 	} else {
974 		/* Tree is full, do grow in depth. */
975 		error = ext4_ext_grow_indepth(ip, path, newext);
976 		if (error)
977 			goto out;
978 
979 		/* Refill path. */
980 		ext4_ext_drop_refs(path);
981 		error = ext4_ext_find_extent(ip, newext->e_blk, &path);
982 		if (error)
983 			goto out;
984 
985 		/* Check and split tree if required. */
986 		depth = ext4_ext_inode_depth(ip);
987 		if (path[depth].ep_header->eh_ecount ==
988 		    path[depth].ep_header->eh_max)
989 			goto repeat;
990 	}
991 
992 out:
993 	return (error);
994 }
995 
996 static int
997 ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path)
998 {
999 	struct ext4_extent_header *eh;
1000 	struct ext4_extent *ex;
1001 	int32_t border;
1002 	int depth, k;
1003 
1004 	depth = ext4_ext_inode_depth(ip);
1005 	eh = path[depth].ep_header;
1006 	ex = path[depth].ep_ext;
1007 
1008 	if (ex == NULL || eh == NULL)
1009 		return (EIO);
1010 
1011 	if (!depth)
1012 		return (0);
1013 
1014 	/* We will correct tree if first leaf got modified only. */
1015 	if (ex != EXT_FIRST_EXTENT(eh))
1016 		return (0);
1017 
1018 	k = depth - 1;
1019 	border = path[depth].ep_ext->e_blk;
1020 	path[k].ep_index->ei_blk = border;
1021 	ext4_ext_dirty(ip, path + k);
1022 	while (k--) {
1023 		/* Change all left-side indexes. */
1024 		if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header))
1025 			break;
1026 
1027 		path[k].ep_index->ei_blk = border;
1028 		ext4_ext_dirty(ip, path + k);
1029 	}
1030 
1031 	return (0);
1032 }
1033 
1034 static int
1035 ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path,
1036     struct ext4_extent *newext)
1037 {
1038 	struct ext4_extent_header * eh;
1039 	struct ext4_extent *ex, *nex, *nearex;
1040 	struct ext4_extent_path *npath;
1041 	int depth, len, error, next;
1042 
1043 	depth = ext4_ext_inode_depth(ip);
1044 	ex = path[depth].ep_ext;
1045 	npath = NULL;
1046 
1047 	if (newext->e_len == 0 || path[depth].ep_header == NULL)
1048 		return (EINVAL);
1049 
1050 	/* Insert block into found extent. */
1051 	if (ex && ext4_can_extents_be_merged(ex, newext)) {
1052 		ex->e_len = ex->e_len + newext->e_len;
1053 		eh = path[depth].ep_header;
1054 		nearex = ex;
1055 		goto merge;
1056 	}
1057 
1058 repeat:
1059 	depth = ext4_ext_inode_depth(ip);
1060 	eh = path[depth].ep_header;
1061 	if (eh->eh_ecount < eh->eh_max)
1062 		goto has_space;
1063 
1064 	/* Try next leaf */
1065 	nex = EXT_LAST_EXTENT(eh);
1066 	next = ext4_ext_next_leaf_block(ip, path);
1067 	if (newext->e_blk > nex->e_blk && next != EXT4_MAX_BLOCKS) {
1068 		KASSERT(npath == NULL,
1069 		    ("ext4_ext_insert_extent: bad path"));
1070 
1071 		error = ext4_ext_find_extent(ip, next, &npath);
1072 		if (error)
1073 			goto cleanup;
1074 
1075 		if (npath->ep_depth != path->ep_depth) {
1076 			error = EIO;
1077 			goto cleanup;
1078 		}
1079 
1080 		eh = npath[depth].ep_header;
1081 		if (eh->eh_ecount < eh->eh_max) {
1082 			path = npath;
1083 			goto repeat;
1084 		}
1085 	}
1086 
1087 	/*
1088 	 * There is no free space in the found leaf,
1089 	 * try to add a new leaf to the tree.
1090 	 */
1091 	error = ext4_ext_create_new_leaf(ip, path, newext);
1092 	if (error)
1093 		goto cleanup;
1094 
1095 	depth = ext4_ext_inode_depth(ip);
1096 	eh = path[depth].ep_header;
1097 
1098 has_space:
1099 	nearex = path[depth].ep_ext;
1100 	if (!nearex) {
1101 		/* Create new extent in the leaf. */
1102 		path[depth].ep_ext = EXT_FIRST_EXTENT(eh);
1103 	} else if (newext->e_blk > nearex->e_blk) {
1104 		if (nearex != EXT_LAST_EXTENT(eh)) {
1105 			len = EXT_MAX_EXTENT(eh) - nearex;
1106 			len = (len - 1) * sizeof(struct ext4_extent);
1107 			len = len < 0 ? 0 : len;
1108 			memmove(nearex + 2, nearex + 1, len);
1109 		}
1110 		path[depth].ep_ext = nearex + 1;
1111 	} else {
1112 		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1113 		len = len < 0 ? 0 : len;
1114 		memmove(nearex + 1, nearex, len);
1115 		path[depth].ep_ext = nearex;
1116 	}
1117 
1118 	eh->eh_ecount = eh->eh_ecount + 1;
1119 	nearex = path[depth].ep_ext;
1120 	nearex->e_blk = newext->e_blk;
1121 	nearex->e_start_lo = newext->e_start_lo;
1122 	nearex->e_start_hi = newext->e_start_hi;
1123 	nearex->e_len = newext->e_len;
1124 
1125 merge:
1126 	/* Try to merge extents to the right. */
1127 	while (nearex < EXT_LAST_EXTENT(eh)) {
1128 		if (!ext4_can_extents_be_merged(nearex, nearex + 1))
1129 			break;
1130 
1131 		/* Merge with next extent. */
1132 		nearex->e_len = nearex->e_len + nearex[1].e_len;
1133 		if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1134 			len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1135 			    sizeof(struct ext4_extent);
1136 			memmove(nearex + 1, nearex + 2, len);
1137 		}
1138 
1139 		eh->eh_ecount = eh->eh_ecount - 1;
1140 		KASSERT(eh->eh_ecount != 0,
1141 		    ("ext4_ext_insert_extent: bad ecount"));
1142 	}
1143 
1144 	/*
1145 	 * Try to merge extents to the left,
1146 	 * start from inexes correction.
1147 	 */
1148 	error = ext4_ext_correct_indexes(ip, path);
1149 	if (error)
1150 		goto cleanup;
1151 
1152 	ext4_ext_dirty(ip, path + depth);
1153 
1154 cleanup:
1155 	if (npath) {
1156 		ext4_ext_drop_refs(npath);
1157 		free(npath, M_EXT2EXTENTS);
1158 	}
1159 
1160 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
1161 	return (error);
1162 }
1163 
1164 static e4fs_daddr_t
1165 ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref,
1166     struct ucred *cred, unsigned long *count, int *perror)
1167 {
1168 	struct m_ext2fs *fs;
1169 	e4fs_daddr_t newblk;
1170 
1171 	/*
1172 	 * We will allocate only single block for now.
1173 	 */
1174 	if (*count > 1)
1175 		return (0);
1176 
1177 	fs = ip->i_e2fs;
1178 	EXT2_LOCK(ip->i_ump);
1179 	*perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk);
1180 	if (*perror)
1181 		return (0);
1182 
1183 	if (newblk) {
1184 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
1185 		ext2_update(ip->i_vnode, 1);
1186 	}
1187 
1188 	return (newblk);
1189 }
1190 
1191 int
1192 ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk,
1193     unsigned long max_blocks, struct ucred *cred, struct buf **bpp,
1194     int *pallocated, daddr_t *nb)
1195 {
1196 	struct m_ext2fs *fs;
1197 	struct buf *bp = NULL;
1198 	struct ext4_extent_path *path;
1199 	struct ext4_extent newex, *ex;
1200 	e4fs_daddr_t bpref, newblk = 0;
1201 	unsigned long allocated = 0;
1202 	int error = 0, depth;
1203 
1204 	if(bpp)
1205 		*bpp = NULL;
1206 	*pallocated = 0;
1207 
1208 	/* Check cache. */
1209 	path = NULL;
1210 	if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) {
1211 		if (bpref == EXT4_EXT_CACHE_IN) {
1212 			/* Block is already allocated. */
1213 			newblk = iblk - newex.e_blk +
1214 			    ext4_ext_extent_pblock(&newex);
1215 			allocated = newex.e_len - (iblk - newex.e_blk);
1216 			goto out;
1217 		} else {
1218 			error = EIO;
1219 			goto out2;
1220 		}
1221 	}
1222 
1223 	error = ext4_ext_find_extent(ip, iblk, &path);
1224 	if (error) {
1225 		goto out2;
1226 	}
1227 
1228 	depth = ext4_ext_inode_depth(ip);
1229 	if (path[depth].ep_ext == NULL && depth != 0) {
1230 		error = EIO;
1231 		goto out2;
1232 	}
1233 
1234 	if ((ex = path[depth].ep_ext)) {
1235 		uint64_t lblk = ex->e_blk;
1236 		uint16_t e_len  = ex->e_len;
1237 		e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex);
1238 
1239 		if (e_len > EXT4_MAX_LEN)
1240 			goto out2;
1241 
1242 		/* If we found extent covers block, simply return it. */
1243 		if (iblk >= lblk && iblk < lblk + e_len) {
1244 			newblk = iblk - lblk + e_start;
1245 			allocated = e_len - (iblk - lblk);
1246 			ext4_ext_put_in_cache(ip, lblk, e_len,
1247 			    e_start, EXT4_EXT_CACHE_IN);
1248 			goto out;
1249 		}
1250 	}
1251 
1252 	/* Allocate the new block. */
1253 	if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) {
1254 		ip->i_next_alloc_goal = 0;
1255 	}
1256 
1257 	bpref = ext4_ext_blkpref(ip, path, iblk);
1258 	allocated = max_blocks;
1259 	newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error);
1260 	if (!newblk)
1261 		goto out2;
1262 
1263 	/* Try to insert new extent into found leaf and return. */
1264 	newex.e_blk = iblk;
1265 	ext4_ext_store_pblock(&newex, newblk);
1266 	newex.e_len = allocated;
1267 	error = ext4_ext_insert_extent(ip, path, &newex);
1268 	if (error)
1269 		goto out2;
1270 
1271 	newblk = ext4_ext_extent_pblock(&newex);
1272 	ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN);
1273 	*pallocated = 1;
1274 
1275 out:
1276 	if (allocated > max_blocks)
1277 		allocated = max_blocks;
1278 
1279 	if (bpp)
1280 	{
1281 		fs = ip->i_e2fs;
1282 		error = bread(ip->i_devvp, fsbtodb(fs, newblk),
1283 		    fs->e2fs_bsize, cred, &bp);
1284 		if (error) {
1285 			brelse(bp);
1286 		} else {
1287 			*bpp = bp;
1288 		}
1289 	}
1290 
1291 out2:
1292 	if (path) {
1293 		ext4_ext_drop_refs(path);
1294 		free(path, M_EXT2EXTENTS);
1295 	}
1296 
1297 	if (nb)
1298 		*nb = newblk;
1299 
1300 	return (error);
1301 }
1302 
1303 static inline uint16_t
1304 ext4_ext_get_actual_len(struct ext4_extent *ext)
1305 {
1306 
1307 	return (ext->e_len <= EXT_INIT_MAX_LEN ?
1308 	    ext->e_len : (ext->e_len - EXT_INIT_MAX_LEN));
1309 }
1310 
1311 static inline struct ext4_extent_header *
1312 ext4_ext_header(struct inode *ip)
1313 {
1314 
1315 	return ((struct ext4_extent_header *)ip->i_db);
1316 }
1317 
1318 static int
1319 ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex,
1320     unsigned long from, unsigned long to)
1321 {
1322 	unsigned long num, start;
1323 
1324 	if (from >= ex->e_blk &&
1325 	    to == ex->e_blk + ext4_ext_get_actual_len(ex) - 1) {
1326 		/* Tail cleanup. */
1327 		num = ex->e_blk + ext4_ext_get_actual_len(ex) - from;
1328 		start = ext4_ext_extent_pblock(ex) +
1329 		    ext4_ext_get_actual_len(ex) - num;
1330 		ext4_ext_blkfree(ip, start, num, 0);
1331 	}
1332 
1333 	return (0);
1334 }
1335 
1336 static int
1337 ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path)
1338 {
1339 	e4fs_daddr_t leaf;
1340 
1341 	/* Free index block. */
1342 	path--;
1343 	leaf = ext4_ext_index_pblock(path->ep_index);
1344 	KASSERT(path->ep_header->eh_ecount != 0,
1345 	    ("ext4_ext_rm_index: bad ecount"));
1346 	path->ep_header->eh_ecount--;
1347 	ext4_ext_dirty(ip, path);
1348 	ext4_ext_blkfree(ip, leaf, 1, 0);
1349 	return (0);
1350 }
1351 
1352 static int
1353 ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path,
1354     uint64_t start)
1355 {
1356 	struct ext4_extent_header *eh;
1357 	struct ext4_extent *ex;
1358 	unsigned int a, b, block, num;
1359 	unsigned long ex_blk;
1360 	unsigned short ex_len;
1361 	int depth;
1362 	int error, correct_index;
1363 
1364 	depth = ext4_ext_inode_depth(ip);
1365 	if (!path[depth].ep_header) {
1366 		if (path[depth].ep_data == NULL)
1367 			return (EINVAL);
1368 		path[depth].ep_header =
1369 		    (struct ext4_extent_header* )path[depth].ep_data;
1370 	}
1371 
1372 	eh = path[depth].ep_header;
1373 	if (!eh) {
1374 		SDT_PROBE2(ext2fs, , trace, extents, 1,
1375 		    "bad header => extent corrupted");
1376 		return (EIO);
1377 	}
1378 
1379 	ex = EXT_LAST_EXTENT(eh);
1380 	ex_blk = ex->e_blk;
1381 	ex_len = ext4_ext_get_actual_len(ex);
1382 
1383 	error = 0;
1384 	correct_index = 0;
1385 	while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) {
1386 		path[depth].ep_ext = ex;
1387 		a = ex_blk > start ? ex_blk : start;
1388 		b = (uint64_t)ex_blk + ex_len - 1 <
1389 		    EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS;
1390 
1391 		if (a != ex_blk && b != ex_blk + ex_len - 1)
1392 			return (EINVAL);
1393 		else if (a != ex_blk) {
1394 			/* Remove tail of the extent. */
1395 			block = ex_blk;
1396 			num = a - block;
1397 		} else if (b != ex_blk + ex_len - 1) {
1398 			/* Remove head of the extent, not implemented. */
1399 			return (EINVAL);
1400 		} else {
1401 			/* Remove whole extent. */
1402 			block = ex_blk;
1403 			num = 0;
1404 		}
1405 
1406 		if (ex == EXT_FIRST_EXTENT(eh))
1407 			correct_index = 1;
1408 
1409 		error = ext4_remove_blocks(ip, ex, a, b);
1410 		if (error)
1411 			goto out;
1412 
1413 		if (num == 0) {
1414 			ext4_ext_store_pblock(ex, 0);
1415 			eh->eh_ecount--;
1416 		}
1417 
1418 		ex->e_blk = block;
1419 		ex->e_len = num;
1420 
1421 		ext4_ext_dirty(ip, path + depth);
1422 
1423 		ex--;
1424 		ex_blk = ex->e_blk;
1425 		ex_len = ext4_ext_get_actual_len(ex);
1426 	};
1427 
1428 	if (correct_index && eh->eh_ecount)
1429 		error = ext4_ext_correct_indexes(ip, path);
1430 
1431 	/*
1432 	 * If this leaf is free, we should
1433 	 * remove it from index block above.
1434 	 */
1435 	if (error == 0 && eh->eh_ecount == 0 && path[depth].ep_data != NULL)
1436 		error = ext4_ext_rm_index(ip, path + depth);
1437 
1438 out:
1439 	return (error);
1440 }
1441 
1442 static struct buf *
1443 ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk,
1444     int depth, int flags)
1445 {
1446 	struct m_ext2fs *fs;
1447 	struct ext4_extent_header *eh;
1448 	struct buf *bp;
1449 	int error;
1450 
1451 	fs = ip->i_e2fs;
1452 	error = bread(ip->i_devvp, fsbtodb(fs, pblk),
1453 	    fs->e2fs_bsize, NOCRED, &bp);
1454 	if (error) {
1455 		brelse(bp);
1456 		return (NULL);
1457 	}
1458 
1459 	eh = ext4_ext_block_header(bp->b_data);
1460 	if (eh->eh_depth != depth) {
1461 		SDT_PROBE2(ext2fs, , trace, extents, 1,
1462 		    "unexpected eh_depth");
1463 		goto err;
1464 	}
1465 
1466 	error = ext4_ext_check_header(ip, eh);
1467 	if (error)
1468 		goto err;
1469 
1470 	return (bp);
1471 
1472 err:
1473 	brelse(bp);
1474 	return (NULL);
1475 
1476 }
1477 
1478 static int inline
1479 ext4_ext_more_to_rm(struct ext4_extent_path *path)
1480 {
1481 
1482 	KASSERT(path->ep_index != NULL,
1483 	    ("ext4_ext_more_to_rm: bad index from path"));
1484 
1485 	if (path->ep_index < EXT_FIRST_INDEX(path->ep_header))
1486 		return (0);
1487 
1488 	if (path->ep_header->eh_ecount == path->index_count)
1489 		return (0);
1490 
1491 	return (1);
1492 }
1493 
1494 int
1495 ext4_ext_remove_space(struct inode *ip, off_t length, int flags,
1496     struct ucred *cred, struct thread *td)
1497 {
1498 	struct buf *bp;
1499 	struct ext4_extent_header *ehp;
1500 	struct ext4_extent_path *path;
1501 	int depth;
1502 	int i, error;
1503 
1504 	ehp = (struct ext4_extent_header *)ip->i_db;
1505 	depth = ext4_ext_inode_depth(ip);
1506 
1507 	error = ext4_ext_check_header(ip, ehp);
1508 	if(error)
1509 		return (error);
1510 
1511 	path = malloc(sizeof(struct ext4_extent_path) * (depth + 1),
1512 	    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
1513 	if (!path)
1514 		return (ENOMEM);
1515 
1516 	path[0].ep_header = ehp;
1517 	path[0].ep_depth = depth;
1518 	i = 0;
1519 	while (error == 0 && i >= 0) {
1520 		if (i == depth) {
1521 			/* This is leaf. */
1522 			error = ext4_ext_rm_leaf(ip, path, length);
1523 			if (error)
1524 				break;
1525 			free(path[i].ep_data, M_EXT2EXTENTS);
1526 			path[i].ep_data = NULL;
1527 			i--;
1528 			continue;
1529 		}
1530 
1531 		/* This is index. */
1532 		if (!path[i].ep_header)
1533 			path[i].ep_header =
1534 			    (struct ext4_extent_header *)path[i].ep_data;
1535 
1536 		if (!path[i].ep_index) {
1537 			/* This level hasn't touched yet. */
1538 			path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header);
1539 			path[i].index_count = path[i].ep_header->eh_ecount + 1;
1540 		} else {
1541 			/* We've already was here, see at next index. */
1542 			path[i].ep_index--;
1543 		}
1544 
1545 		if (ext4_ext_more_to_rm(path + i)) {
1546 			memset(path + i + 1, 0, sizeof(*path));
1547 			bp = ext4_read_extent_tree_block(ip,
1548 			    ext4_ext_index_pblock(path[i].ep_index),
1549 			    path[0].ep_depth - (i + 1), 0);
1550 			if (!bp) {
1551 				error = EIO;
1552 				break;
1553 			}
1554 
1555 			ext4_ext_fill_path_bdata(&path[i+1], bp,
1556 			    ext4_ext_index_pblock(path[i].ep_index));
1557 			brelse(bp);
1558 			path[i].index_count = path[i].ep_header->eh_ecount;
1559 			i++;
1560 		} else {
1561 			if (path[i].ep_header->eh_ecount == 0 && i > 0) {
1562 				/* Index is empty, remove it. */
1563 				error = ext4_ext_rm_index(ip, path + i);
1564 			}
1565 			free(path[i].ep_data, M_EXT2EXTENTS);
1566 			path[i].ep_data = NULL;
1567 			i--;
1568 		}
1569 	}
1570 
1571 	if (path->ep_header->eh_ecount == 0) {
1572 		/*
1573 		 * Truncate the tree to zero.
1574 		 */
1575 		 ext4_ext_header(ip)->eh_depth = 0;
1576 		 ext4_ext_header(ip)->eh_max = ext4_ext_space_root(ip);
1577 		 ext4_ext_dirty(ip, path);
1578 	}
1579 
1580 	ext4_ext_drop_refs(path);
1581 	free(path, M_EXT2EXTENTS);
1582 
1583 	return (error);
1584 }
1585