1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/file.h>
9 #include <linux/fs.h>
10 #include <linux/pagemap.h>
11 #include <linux/highmem.h>
12 #include <linux/time.h>
13 #include <linux/init.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sched/mm.h>
19 #include <linux/log2.h>
20 #include <crypto/hash.h>
21 #include "misc.h"
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "volumes.h"
27 #include "ordered-data.h"
28 #include "compression.h"
29 #include "extent_io.h"
30 #include "extent_map.h"
31
32 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
33
btrfs_compress_type2str(enum btrfs_compression_type type)34 const char* btrfs_compress_type2str(enum btrfs_compression_type type)
35 {
36 switch (type) {
37 case BTRFS_COMPRESS_ZLIB:
38 case BTRFS_COMPRESS_LZO:
39 case BTRFS_COMPRESS_ZSTD:
40 case BTRFS_COMPRESS_NONE:
41 return btrfs_compress_types[type];
42 default:
43 break;
44 }
45
46 return NULL;
47 }
48
btrfs_compress_is_valid_type(const char * str,size_t len)49 bool btrfs_compress_is_valid_type(const char *str, size_t len)
50 {
51 int i;
52
53 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
54 size_t comp_len = strlen(btrfs_compress_types[i]);
55
56 if (len < comp_len)
57 continue;
58
59 if (!strncmp(btrfs_compress_types[i], str, comp_len))
60 return true;
61 }
62 return false;
63 }
64
compression_compress_pages(int type,struct list_head * ws,struct address_space * mapping,u64 start,struct page ** pages,unsigned long * out_pages,unsigned long * total_in,unsigned long * total_out)65 static int compression_compress_pages(int type, struct list_head *ws,
66 struct address_space *mapping, u64 start, struct page **pages,
67 unsigned long *out_pages, unsigned long *total_in,
68 unsigned long *total_out)
69 {
70 switch (type) {
71 case BTRFS_COMPRESS_ZLIB:
72 return zlib_compress_pages(ws, mapping, start, pages,
73 out_pages, total_in, total_out);
74 case BTRFS_COMPRESS_LZO:
75 return lzo_compress_pages(ws, mapping, start, pages,
76 out_pages, total_in, total_out);
77 case BTRFS_COMPRESS_ZSTD:
78 return zstd_compress_pages(ws, mapping, start, pages,
79 out_pages, total_in, total_out);
80 case BTRFS_COMPRESS_NONE:
81 default:
82 /*
83 * This can happen when compression races with remount setting
84 * it to 'no compress', while caller doesn't call
85 * inode_need_compress() to check if we really need to
86 * compress.
87 *
88 * Not a big deal, just need to inform caller that we
89 * haven't allocated any pages yet.
90 */
91 *out_pages = 0;
92 return -E2BIG;
93 }
94 }
95
compression_decompress_bio(int type,struct list_head * ws,struct compressed_bio * cb)96 static int compression_decompress_bio(int type, struct list_head *ws,
97 struct compressed_bio *cb)
98 {
99 switch (type) {
100 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
101 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
102 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
103 case BTRFS_COMPRESS_NONE:
104 default:
105 /*
106 * This can't happen, the type is validated several times
107 * before we get here.
108 */
109 BUG();
110 }
111 }
112
compression_decompress(int type,struct list_head * ws,unsigned char * data_in,struct page * dest_page,unsigned long start_byte,size_t srclen,size_t destlen)113 static int compression_decompress(int type, struct list_head *ws,
114 unsigned char *data_in, struct page *dest_page,
115 unsigned long start_byte, size_t srclen, size_t destlen)
116 {
117 switch (type) {
118 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
119 start_byte, srclen, destlen);
120 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
121 start_byte, srclen, destlen);
122 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
123 start_byte, srclen, destlen);
124 case BTRFS_COMPRESS_NONE:
125 default:
126 /*
127 * This can't happen, the type is validated several times
128 * before we get here.
129 */
130 BUG();
131 }
132 }
133
134 static int btrfs_decompress_bio(struct compressed_bio *cb);
135
compressed_bio_size(struct btrfs_fs_info * fs_info,unsigned long disk_size)136 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
137 unsigned long disk_size)
138 {
139 return sizeof(struct compressed_bio) +
140 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
141 }
142
check_compressed_csum(struct btrfs_inode * inode,struct bio * bio,u64 disk_start)143 static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
144 u64 disk_start)
145 {
146 struct btrfs_fs_info *fs_info = inode->root->fs_info;
147 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
148 const u32 csum_size = fs_info->csum_size;
149 const u32 sectorsize = fs_info->sectorsize;
150 struct page *page;
151 unsigned long i;
152 char *kaddr;
153 u8 csum[BTRFS_CSUM_SIZE];
154 struct compressed_bio *cb = bio->bi_private;
155 u8 *cb_sum = cb->sums;
156
157 if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
158 return 0;
159
160 shash->tfm = fs_info->csum_shash;
161
162 for (i = 0; i < cb->nr_pages; i++) {
163 u32 pg_offset;
164 u32 bytes_left = PAGE_SIZE;
165 page = cb->compressed_pages[i];
166
167 /* Determine the remaining bytes inside the page first */
168 if (i == cb->nr_pages - 1)
169 bytes_left = cb->compressed_len - i * PAGE_SIZE;
170
171 /* Hash through the page sector by sector */
172 for (pg_offset = 0; pg_offset < bytes_left;
173 pg_offset += sectorsize) {
174 kaddr = kmap_atomic(page);
175 crypto_shash_digest(shash, kaddr + pg_offset,
176 sectorsize, csum);
177 kunmap_atomic(kaddr);
178
179 if (memcmp(&csum, cb_sum, csum_size) != 0) {
180 btrfs_print_data_csum_error(inode, disk_start,
181 csum, cb_sum, cb->mirror_num);
182 if (btrfs_io_bio(bio)->device)
183 btrfs_dev_stat_inc_and_print(
184 btrfs_io_bio(bio)->device,
185 BTRFS_DEV_STAT_CORRUPTION_ERRS);
186 return -EIO;
187 }
188 cb_sum += csum_size;
189 disk_start += sectorsize;
190 }
191 }
192 return 0;
193 }
194
195 /* when we finish reading compressed pages from the disk, we
196 * decompress them and then run the bio end_io routines on the
197 * decompressed pages (in the inode address space).
198 *
199 * This allows the checksumming and other IO error handling routines
200 * to work normally
201 *
202 * The compressed pages are freed here, and it must be run
203 * in process context
204 */
end_compressed_bio_read(struct bio * bio)205 static void end_compressed_bio_read(struct bio *bio)
206 {
207 struct compressed_bio *cb = bio->bi_private;
208 struct inode *inode;
209 struct page *page;
210 unsigned long index;
211 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
212 int ret = 0;
213
214 if (bio->bi_status)
215 cb->errors = 1;
216
217 /* if there are more bios still pending for this compressed
218 * extent, just exit
219 */
220 if (!refcount_dec_and_test(&cb->pending_bios))
221 goto out;
222
223 /*
224 * Record the correct mirror_num in cb->orig_bio so that
225 * read-repair can work properly.
226 */
227 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
228 cb->mirror_num = mirror;
229
230 /*
231 * Some IO in this cb have failed, just skip checksum as there
232 * is no way it could be correct.
233 */
234 if (cb->errors == 1)
235 goto csum_failed;
236
237 inode = cb->inode;
238 ret = check_compressed_csum(BTRFS_I(inode), bio,
239 bio->bi_iter.bi_sector << 9);
240 if (ret)
241 goto csum_failed;
242
243 /* ok, we're the last bio for this extent, lets start
244 * the decompression.
245 */
246 ret = btrfs_decompress_bio(cb);
247
248 csum_failed:
249 if (ret)
250 cb->errors = 1;
251
252 /* release the compressed pages */
253 index = 0;
254 for (index = 0; index < cb->nr_pages; index++) {
255 page = cb->compressed_pages[index];
256 page->mapping = NULL;
257 put_page(page);
258 }
259
260 /* do io completion on the original bio */
261 if (cb->errors) {
262 bio_io_error(cb->orig_bio);
263 } else {
264 struct bio_vec *bvec;
265 struct bvec_iter_all iter_all;
266
267 /*
268 * we have verified the checksum already, set page
269 * checked so the end_io handlers know about it
270 */
271 ASSERT(!bio_flagged(bio, BIO_CLONED));
272 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
273 SetPageChecked(bvec->bv_page);
274
275 bio_endio(cb->orig_bio);
276 }
277
278 /* finally free the cb struct */
279 kfree(cb->compressed_pages);
280 kfree(cb);
281 out:
282 bio_put(bio);
283 }
284
285 /*
286 * Clear the writeback bits on all of the file
287 * pages for a compressed write
288 */
end_compressed_writeback(struct inode * inode,const struct compressed_bio * cb)289 static noinline void end_compressed_writeback(struct inode *inode,
290 const struct compressed_bio *cb)
291 {
292 unsigned long index = cb->start >> PAGE_SHIFT;
293 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
294 struct page *pages[16];
295 unsigned long nr_pages = end_index - index + 1;
296 int i;
297 int ret;
298
299 if (cb->errors)
300 mapping_set_error(inode->i_mapping, -EIO);
301
302 while (nr_pages > 0) {
303 ret = find_get_pages_contig(inode->i_mapping, index,
304 min_t(unsigned long,
305 nr_pages, ARRAY_SIZE(pages)), pages);
306 if (ret == 0) {
307 nr_pages -= 1;
308 index += 1;
309 continue;
310 }
311 for (i = 0; i < ret; i++) {
312 if (cb->errors)
313 SetPageError(pages[i]);
314 end_page_writeback(pages[i]);
315 put_page(pages[i]);
316 }
317 nr_pages -= ret;
318 index += ret;
319 }
320 /* the inode may be gone now */
321 }
322
323 /*
324 * do the cleanup once all the compressed pages hit the disk.
325 * This will clear writeback on the file pages and free the compressed
326 * pages.
327 *
328 * This also calls the writeback end hooks for the file pages so that
329 * metadata and checksums can be updated in the file.
330 */
end_compressed_bio_write(struct bio * bio)331 static void end_compressed_bio_write(struct bio *bio)
332 {
333 struct compressed_bio *cb = bio->bi_private;
334 struct inode *inode;
335 struct page *page;
336 unsigned long index;
337
338 if (bio->bi_status)
339 cb->errors = 1;
340
341 /* if there are more bios still pending for this compressed
342 * extent, just exit
343 */
344 if (!refcount_dec_and_test(&cb->pending_bios))
345 goto out;
346
347 /* ok, we're the last bio for this extent, step one is to
348 * call back into the FS and do all the end_io operations
349 */
350 inode = cb->inode;
351 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
352 btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
353 cb->start, cb->start + cb->len - 1,
354 bio->bi_status == BLK_STS_OK);
355 cb->compressed_pages[0]->mapping = NULL;
356
357 end_compressed_writeback(inode, cb);
358 /* note, our inode could be gone now */
359
360 /*
361 * release the compressed pages, these came from alloc_page and
362 * are not attached to the inode at all
363 */
364 index = 0;
365 for (index = 0; index < cb->nr_pages; index++) {
366 page = cb->compressed_pages[index];
367 page->mapping = NULL;
368 put_page(page);
369 }
370
371 /* finally free the cb struct */
372 kfree(cb->compressed_pages);
373 kfree(cb);
374 out:
375 bio_put(bio);
376 }
377
378 /*
379 * worker function to build and submit bios for previously compressed pages.
380 * The corresponding pages in the inode should be marked for writeback
381 * and the compressed pages should have a reference on them for dropping
382 * when the IO is complete.
383 *
384 * This also checksums the file bytes and gets things ready for
385 * the end io hooks.
386 */
btrfs_submit_compressed_write(struct btrfs_inode * inode,u64 start,unsigned long len,u64 disk_start,unsigned long compressed_len,struct page ** compressed_pages,unsigned long nr_pages,unsigned int write_flags,struct cgroup_subsys_state * blkcg_css)387 blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
388 unsigned long len, u64 disk_start,
389 unsigned long compressed_len,
390 struct page **compressed_pages,
391 unsigned long nr_pages,
392 unsigned int write_flags,
393 struct cgroup_subsys_state *blkcg_css)
394 {
395 struct btrfs_fs_info *fs_info = inode->root->fs_info;
396 struct bio *bio = NULL;
397 struct compressed_bio *cb;
398 unsigned long bytes_left;
399 int pg_index = 0;
400 struct page *page;
401 u64 first_byte = disk_start;
402 blk_status_t ret;
403 int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
404
405 WARN_ON(!PAGE_ALIGNED(start));
406 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
407 if (!cb)
408 return BLK_STS_RESOURCE;
409 refcount_set(&cb->pending_bios, 0);
410 cb->errors = 0;
411 cb->inode = &inode->vfs_inode;
412 cb->start = start;
413 cb->len = len;
414 cb->mirror_num = 0;
415 cb->compressed_pages = compressed_pages;
416 cb->compressed_len = compressed_len;
417 cb->orig_bio = NULL;
418 cb->nr_pages = nr_pages;
419
420 bio = btrfs_bio_alloc(first_byte);
421 bio->bi_opf = REQ_OP_WRITE | write_flags;
422 bio->bi_private = cb;
423 bio->bi_end_io = end_compressed_bio_write;
424
425 if (blkcg_css) {
426 bio->bi_opf |= REQ_CGROUP_PUNT;
427 kthread_associate_blkcg(blkcg_css);
428 }
429 refcount_set(&cb->pending_bios, 1);
430
431 /* create and submit bios for the compressed pages */
432 bytes_left = compressed_len;
433 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
434 int submit = 0;
435
436 page = compressed_pages[pg_index];
437 page->mapping = inode->vfs_inode.i_mapping;
438 if (bio->bi_iter.bi_size)
439 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
440 0);
441
442 page->mapping = NULL;
443 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
444 PAGE_SIZE) {
445 /*
446 * inc the count before we submit the bio so
447 * we know the end IO handler won't happen before
448 * we inc the count. Otherwise, the cb might get
449 * freed before we're done setting it up
450 */
451 refcount_inc(&cb->pending_bios);
452 ret = btrfs_bio_wq_end_io(fs_info, bio,
453 BTRFS_WQ_ENDIO_DATA);
454 BUG_ON(ret); /* -ENOMEM */
455
456 if (!skip_sum) {
457 ret = btrfs_csum_one_bio(inode, bio, start, 1);
458 BUG_ON(ret); /* -ENOMEM */
459 }
460
461 ret = btrfs_map_bio(fs_info, bio, 0);
462 if (ret) {
463 bio->bi_status = ret;
464 bio_endio(bio);
465 }
466
467 bio = btrfs_bio_alloc(first_byte);
468 bio->bi_opf = REQ_OP_WRITE | write_flags;
469 bio->bi_private = cb;
470 bio->bi_end_io = end_compressed_bio_write;
471 if (blkcg_css)
472 bio->bi_opf |= REQ_CGROUP_PUNT;
473 bio_add_page(bio, page, PAGE_SIZE, 0);
474 }
475 if (bytes_left < PAGE_SIZE) {
476 btrfs_info(fs_info,
477 "bytes left %lu compress len %lu nr %lu",
478 bytes_left, cb->compressed_len, cb->nr_pages);
479 }
480 bytes_left -= PAGE_SIZE;
481 first_byte += PAGE_SIZE;
482 cond_resched();
483 }
484
485 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
486 BUG_ON(ret); /* -ENOMEM */
487
488 if (!skip_sum) {
489 ret = btrfs_csum_one_bio(inode, bio, start, 1);
490 BUG_ON(ret); /* -ENOMEM */
491 }
492
493 ret = btrfs_map_bio(fs_info, bio, 0);
494 if (ret) {
495 bio->bi_status = ret;
496 bio_endio(bio);
497 }
498
499 if (blkcg_css)
500 kthread_associate_blkcg(NULL);
501
502 return 0;
503 }
504
bio_end_offset(struct bio * bio)505 static u64 bio_end_offset(struct bio *bio)
506 {
507 struct bio_vec *last = bio_last_bvec_all(bio);
508
509 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
510 }
511
add_ra_bio_pages(struct inode * inode,u64 compressed_end,struct compressed_bio * cb)512 static noinline int add_ra_bio_pages(struct inode *inode,
513 u64 compressed_end,
514 struct compressed_bio *cb)
515 {
516 unsigned long end_index;
517 unsigned long pg_index;
518 u64 last_offset;
519 u64 isize = i_size_read(inode);
520 int ret;
521 struct page *page;
522 unsigned long nr_pages = 0;
523 struct extent_map *em;
524 struct address_space *mapping = inode->i_mapping;
525 struct extent_map_tree *em_tree;
526 struct extent_io_tree *tree;
527 u64 end;
528 int misses = 0;
529
530 last_offset = bio_end_offset(cb->orig_bio);
531 em_tree = &BTRFS_I(inode)->extent_tree;
532 tree = &BTRFS_I(inode)->io_tree;
533
534 if (isize == 0)
535 return 0;
536
537 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
538
539 while (last_offset < compressed_end) {
540 pg_index = last_offset >> PAGE_SHIFT;
541
542 if (pg_index > end_index)
543 break;
544
545 page = xa_load(&mapping->i_pages, pg_index);
546 if (page && !xa_is_value(page)) {
547 misses++;
548 if (misses > 4)
549 break;
550 goto next;
551 }
552
553 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
554 ~__GFP_FS));
555 if (!page)
556 break;
557
558 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
559 put_page(page);
560 goto next;
561 }
562
563 /*
564 * at this point, we have a locked page in the page cache
565 * for these bytes in the file. But, we have to make
566 * sure they map to this compressed extent on disk.
567 */
568 ret = set_page_extent_mapped(page);
569 if (ret < 0) {
570 unlock_page(page);
571 put_page(page);
572 break;
573 }
574
575 end = last_offset + PAGE_SIZE - 1;
576 lock_extent(tree, last_offset, end);
577 read_lock(&em_tree->lock);
578 em = lookup_extent_mapping(em_tree, last_offset,
579 PAGE_SIZE);
580 read_unlock(&em_tree->lock);
581
582 if (!em || last_offset < em->start ||
583 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
584 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
585 free_extent_map(em);
586 unlock_extent(tree, last_offset, end);
587 unlock_page(page);
588 put_page(page);
589 break;
590 }
591 free_extent_map(em);
592
593 if (page->index == end_index) {
594 size_t zero_offset = offset_in_page(isize);
595
596 if (zero_offset) {
597 int zeros;
598 zeros = PAGE_SIZE - zero_offset;
599 memzero_page(page, zero_offset, zeros);
600 flush_dcache_page(page);
601 }
602 }
603
604 ret = bio_add_page(cb->orig_bio, page,
605 PAGE_SIZE, 0);
606
607 if (ret == PAGE_SIZE) {
608 nr_pages++;
609 put_page(page);
610 } else {
611 unlock_extent(tree, last_offset, end);
612 unlock_page(page);
613 put_page(page);
614 break;
615 }
616 next:
617 last_offset += PAGE_SIZE;
618 }
619 return 0;
620 }
621
622 /*
623 * for a compressed read, the bio we get passed has all the inode pages
624 * in it. We don't actually do IO on those pages but allocate new ones
625 * to hold the compressed pages on disk.
626 *
627 * bio->bi_iter.bi_sector points to the compressed extent on disk
628 * bio->bi_io_vec points to all of the inode pages
629 *
630 * After the compressed pages are read, we copy the bytes into the
631 * bio we were passed and then call the bio end_io calls
632 */
btrfs_submit_compressed_read(struct inode * inode,struct bio * bio,int mirror_num,unsigned long bio_flags)633 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
634 int mirror_num, unsigned long bio_flags)
635 {
636 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
637 struct extent_map_tree *em_tree;
638 struct compressed_bio *cb;
639 unsigned long compressed_len;
640 unsigned long nr_pages;
641 unsigned long pg_index;
642 struct page *page;
643 struct bio *comp_bio;
644 u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
645 u64 em_len;
646 u64 em_start;
647 struct extent_map *em;
648 blk_status_t ret = BLK_STS_RESOURCE;
649 int faili = 0;
650 u8 *sums;
651
652 em_tree = &BTRFS_I(inode)->extent_tree;
653
654 /* we need the actual starting offset of this extent in the file */
655 read_lock(&em_tree->lock);
656 em = lookup_extent_mapping(em_tree,
657 page_offset(bio_first_page_all(bio)),
658 fs_info->sectorsize);
659 read_unlock(&em_tree->lock);
660 if (!em)
661 return BLK_STS_IOERR;
662
663 compressed_len = em->block_len;
664 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
665 if (!cb)
666 goto out;
667
668 refcount_set(&cb->pending_bios, 0);
669 cb->errors = 0;
670 cb->inode = inode;
671 cb->mirror_num = mirror_num;
672 sums = cb->sums;
673
674 cb->start = em->orig_start;
675 em_len = em->len;
676 em_start = em->start;
677
678 free_extent_map(em);
679 em = NULL;
680
681 cb->len = bio->bi_iter.bi_size;
682 cb->compressed_len = compressed_len;
683 cb->compress_type = extent_compress_type(bio_flags);
684 cb->orig_bio = bio;
685
686 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
687 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
688 GFP_NOFS);
689 if (!cb->compressed_pages)
690 goto fail1;
691
692 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
693 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
694 __GFP_HIGHMEM);
695 if (!cb->compressed_pages[pg_index]) {
696 faili = pg_index - 1;
697 ret = BLK_STS_RESOURCE;
698 goto fail2;
699 }
700 }
701 faili = nr_pages - 1;
702 cb->nr_pages = nr_pages;
703
704 add_ra_bio_pages(inode, em_start + em_len, cb);
705
706 /* include any pages we added in add_ra-bio_pages */
707 cb->len = bio->bi_iter.bi_size;
708
709 comp_bio = btrfs_bio_alloc(cur_disk_byte);
710 comp_bio->bi_opf = REQ_OP_READ;
711 comp_bio->bi_private = cb;
712 comp_bio->bi_end_io = end_compressed_bio_read;
713 refcount_set(&cb->pending_bios, 1);
714
715 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
716 u32 pg_len = PAGE_SIZE;
717 int submit = 0;
718
719 /*
720 * To handle subpage case, we need to make sure the bio only
721 * covers the range we need.
722 *
723 * If we're at the last page, truncate the length to only cover
724 * the remaining part.
725 */
726 if (pg_index == nr_pages - 1)
727 pg_len = min_t(u32, PAGE_SIZE,
728 compressed_len - pg_index * PAGE_SIZE);
729
730 page = cb->compressed_pages[pg_index];
731 page->mapping = inode->i_mapping;
732 page->index = em_start >> PAGE_SHIFT;
733
734 if (comp_bio->bi_iter.bi_size)
735 submit = btrfs_bio_fits_in_stripe(page, pg_len,
736 comp_bio, 0);
737
738 page->mapping = NULL;
739 if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
740 unsigned int nr_sectors;
741
742 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
743 BTRFS_WQ_ENDIO_DATA);
744 BUG_ON(ret); /* -ENOMEM */
745
746 /*
747 * inc the count before we submit the bio so
748 * we know the end IO handler won't happen before
749 * we inc the count. Otherwise, the cb might get
750 * freed before we're done setting it up
751 */
752 refcount_inc(&cb->pending_bios);
753
754 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
755 BUG_ON(ret); /* -ENOMEM */
756
757 nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
758 fs_info->sectorsize);
759 sums += fs_info->csum_size * nr_sectors;
760
761 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
762 if (ret) {
763 comp_bio->bi_status = ret;
764 bio_endio(comp_bio);
765 }
766
767 comp_bio = btrfs_bio_alloc(cur_disk_byte);
768 comp_bio->bi_opf = REQ_OP_READ;
769 comp_bio->bi_private = cb;
770 comp_bio->bi_end_io = end_compressed_bio_read;
771
772 bio_add_page(comp_bio, page, pg_len, 0);
773 }
774 cur_disk_byte += pg_len;
775 }
776
777 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
778 BUG_ON(ret); /* -ENOMEM */
779
780 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
781 BUG_ON(ret); /* -ENOMEM */
782
783 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
784 if (ret) {
785 comp_bio->bi_status = ret;
786 bio_endio(comp_bio);
787 }
788
789 return 0;
790
791 fail2:
792 while (faili >= 0) {
793 __free_page(cb->compressed_pages[faili]);
794 faili--;
795 }
796
797 kfree(cb->compressed_pages);
798 fail1:
799 kfree(cb);
800 out:
801 free_extent_map(em);
802 return ret;
803 }
804
805 /*
806 * Heuristic uses systematic sampling to collect data from the input data
807 * range, the logic can be tuned by the following constants:
808 *
809 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
810 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
811 */
812 #define SAMPLING_READ_SIZE (16)
813 #define SAMPLING_INTERVAL (256)
814
815 /*
816 * For statistical analysis of the input data we consider bytes that form a
817 * Galois Field of 256 objects. Each object has an attribute count, ie. how
818 * many times the object appeared in the sample.
819 */
820 #define BUCKET_SIZE (256)
821
822 /*
823 * The size of the sample is based on a statistical sampling rule of thumb.
824 * The common way is to perform sampling tests as long as the number of
825 * elements in each cell is at least 5.
826 *
827 * Instead of 5, we choose 32 to obtain more accurate results.
828 * If the data contain the maximum number of symbols, which is 256, we obtain a
829 * sample size bound by 8192.
830 *
831 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
832 * from up to 512 locations.
833 */
834 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
835 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
836
837 struct bucket_item {
838 u32 count;
839 };
840
841 struct heuristic_ws {
842 /* Partial copy of input data */
843 u8 *sample;
844 u32 sample_size;
845 /* Buckets store counters for each byte value */
846 struct bucket_item *bucket;
847 /* Sorting buffer */
848 struct bucket_item *bucket_b;
849 struct list_head list;
850 };
851
852 static struct workspace_manager heuristic_wsm;
853
free_heuristic_ws(struct list_head * ws)854 static void free_heuristic_ws(struct list_head *ws)
855 {
856 struct heuristic_ws *workspace;
857
858 workspace = list_entry(ws, struct heuristic_ws, list);
859
860 kvfree(workspace->sample);
861 kfree(workspace->bucket);
862 kfree(workspace->bucket_b);
863 kfree(workspace);
864 }
865
alloc_heuristic_ws(unsigned int level)866 static struct list_head *alloc_heuristic_ws(unsigned int level)
867 {
868 struct heuristic_ws *ws;
869
870 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
871 if (!ws)
872 return ERR_PTR(-ENOMEM);
873
874 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
875 if (!ws->sample)
876 goto fail;
877
878 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
879 if (!ws->bucket)
880 goto fail;
881
882 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
883 if (!ws->bucket_b)
884 goto fail;
885
886 INIT_LIST_HEAD(&ws->list);
887 return &ws->list;
888 fail:
889 free_heuristic_ws(&ws->list);
890 return ERR_PTR(-ENOMEM);
891 }
892
893 const struct btrfs_compress_op btrfs_heuristic_compress = {
894 .workspace_manager = &heuristic_wsm,
895 };
896
897 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
898 /* The heuristic is represented as compression type 0 */
899 &btrfs_heuristic_compress,
900 &btrfs_zlib_compress,
901 &btrfs_lzo_compress,
902 &btrfs_zstd_compress,
903 };
904
alloc_workspace(int type,unsigned int level)905 static struct list_head *alloc_workspace(int type, unsigned int level)
906 {
907 switch (type) {
908 case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
909 case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
910 case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
911 case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
912 default:
913 /*
914 * This can't happen, the type is validated several times
915 * before we get here.
916 */
917 BUG();
918 }
919 }
920
free_workspace(int type,struct list_head * ws)921 static void free_workspace(int type, struct list_head *ws)
922 {
923 switch (type) {
924 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
925 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
926 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
927 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
928 default:
929 /*
930 * This can't happen, the type is validated several times
931 * before we get here.
932 */
933 BUG();
934 }
935 }
936
btrfs_init_workspace_manager(int type)937 static void btrfs_init_workspace_manager(int type)
938 {
939 struct workspace_manager *wsm;
940 struct list_head *workspace;
941
942 wsm = btrfs_compress_op[type]->workspace_manager;
943 INIT_LIST_HEAD(&wsm->idle_ws);
944 spin_lock_init(&wsm->ws_lock);
945 atomic_set(&wsm->total_ws, 0);
946 init_waitqueue_head(&wsm->ws_wait);
947
948 /*
949 * Preallocate one workspace for each compression type so we can
950 * guarantee forward progress in the worst case
951 */
952 workspace = alloc_workspace(type, 0);
953 if (IS_ERR(workspace)) {
954 pr_warn(
955 "BTRFS: cannot preallocate compression workspace, will try later\n");
956 } else {
957 atomic_set(&wsm->total_ws, 1);
958 wsm->free_ws = 1;
959 list_add(workspace, &wsm->idle_ws);
960 }
961 }
962
btrfs_cleanup_workspace_manager(int type)963 static void btrfs_cleanup_workspace_manager(int type)
964 {
965 struct workspace_manager *wsman;
966 struct list_head *ws;
967
968 wsman = btrfs_compress_op[type]->workspace_manager;
969 while (!list_empty(&wsman->idle_ws)) {
970 ws = wsman->idle_ws.next;
971 list_del(ws);
972 free_workspace(type, ws);
973 atomic_dec(&wsman->total_ws);
974 }
975 }
976
977 /*
978 * This finds an available workspace or allocates a new one.
979 * If it's not possible to allocate a new one, waits until there's one.
980 * Preallocation makes a forward progress guarantees and we do not return
981 * errors.
982 */
btrfs_get_workspace(int type,unsigned int level)983 struct list_head *btrfs_get_workspace(int type, unsigned int level)
984 {
985 struct workspace_manager *wsm;
986 struct list_head *workspace;
987 int cpus = num_online_cpus();
988 unsigned nofs_flag;
989 struct list_head *idle_ws;
990 spinlock_t *ws_lock;
991 atomic_t *total_ws;
992 wait_queue_head_t *ws_wait;
993 int *free_ws;
994
995 wsm = btrfs_compress_op[type]->workspace_manager;
996 idle_ws = &wsm->idle_ws;
997 ws_lock = &wsm->ws_lock;
998 total_ws = &wsm->total_ws;
999 ws_wait = &wsm->ws_wait;
1000 free_ws = &wsm->free_ws;
1001
1002 again:
1003 spin_lock(ws_lock);
1004 if (!list_empty(idle_ws)) {
1005 workspace = idle_ws->next;
1006 list_del(workspace);
1007 (*free_ws)--;
1008 spin_unlock(ws_lock);
1009 return workspace;
1010
1011 }
1012 if (atomic_read(total_ws) > cpus) {
1013 DEFINE_WAIT(wait);
1014
1015 spin_unlock(ws_lock);
1016 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1017 if (atomic_read(total_ws) > cpus && !*free_ws)
1018 schedule();
1019 finish_wait(ws_wait, &wait);
1020 goto again;
1021 }
1022 atomic_inc(total_ws);
1023 spin_unlock(ws_lock);
1024
1025 /*
1026 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1027 * to turn it off here because we might get called from the restricted
1028 * context of btrfs_compress_bio/btrfs_compress_pages
1029 */
1030 nofs_flag = memalloc_nofs_save();
1031 workspace = alloc_workspace(type, level);
1032 memalloc_nofs_restore(nofs_flag);
1033
1034 if (IS_ERR(workspace)) {
1035 atomic_dec(total_ws);
1036 wake_up(ws_wait);
1037
1038 /*
1039 * Do not return the error but go back to waiting. There's a
1040 * workspace preallocated for each type and the compression
1041 * time is bounded so we get to a workspace eventually. This
1042 * makes our caller's life easier.
1043 *
1044 * To prevent silent and low-probability deadlocks (when the
1045 * initial preallocation fails), check if there are any
1046 * workspaces at all.
1047 */
1048 if (atomic_read(total_ws) == 0) {
1049 static DEFINE_RATELIMIT_STATE(_rs,
1050 /* once per minute */ 60 * HZ,
1051 /* no burst */ 1);
1052
1053 if (__ratelimit(&_rs)) {
1054 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1055 }
1056 }
1057 goto again;
1058 }
1059 return workspace;
1060 }
1061
get_workspace(int type,int level)1062 static struct list_head *get_workspace(int type, int level)
1063 {
1064 switch (type) {
1065 case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1066 case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1067 case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
1068 case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1069 default:
1070 /*
1071 * This can't happen, the type is validated several times
1072 * before we get here.
1073 */
1074 BUG();
1075 }
1076 }
1077
1078 /*
1079 * put a workspace struct back on the list or free it if we have enough
1080 * idle ones sitting around
1081 */
btrfs_put_workspace(int type,struct list_head * ws)1082 void btrfs_put_workspace(int type, struct list_head *ws)
1083 {
1084 struct workspace_manager *wsm;
1085 struct list_head *idle_ws;
1086 spinlock_t *ws_lock;
1087 atomic_t *total_ws;
1088 wait_queue_head_t *ws_wait;
1089 int *free_ws;
1090
1091 wsm = btrfs_compress_op[type]->workspace_manager;
1092 idle_ws = &wsm->idle_ws;
1093 ws_lock = &wsm->ws_lock;
1094 total_ws = &wsm->total_ws;
1095 ws_wait = &wsm->ws_wait;
1096 free_ws = &wsm->free_ws;
1097
1098 spin_lock(ws_lock);
1099 if (*free_ws <= num_online_cpus()) {
1100 list_add(ws, idle_ws);
1101 (*free_ws)++;
1102 spin_unlock(ws_lock);
1103 goto wake;
1104 }
1105 spin_unlock(ws_lock);
1106
1107 free_workspace(type, ws);
1108 atomic_dec(total_ws);
1109 wake:
1110 cond_wake_up(ws_wait);
1111 }
1112
put_workspace(int type,struct list_head * ws)1113 static void put_workspace(int type, struct list_head *ws)
1114 {
1115 switch (type) {
1116 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1117 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1118 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
1119 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1120 default:
1121 /*
1122 * This can't happen, the type is validated several times
1123 * before we get here.
1124 */
1125 BUG();
1126 }
1127 }
1128
1129 /*
1130 * Adjust @level according to the limits of the compression algorithm or
1131 * fallback to default
1132 */
btrfs_compress_set_level(int type,unsigned level)1133 static unsigned int btrfs_compress_set_level(int type, unsigned level)
1134 {
1135 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1136
1137 if (level == 0)
1138 level = ops->default_level;
1139 else
1140 level = min(level, ops->max_level);
1141
1142 return level;
1143 }
1144
1145 /*
1146 * Given an address space and start and length, compress the bytes into @pages
1147 * that are allocated on demand.
1148 *
1149 * @type_level is encoded algorithm and level, where level 0 means whatever
1150 * default the algorithm chooses and is opaque here;
1151 * - compression algo are 0-3
1152 * - the level are bits 4-7
1153 *
1154 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1155 * and returns number of actually allocated pages
1156 *
1157 * @total_in is used to return the number of bytes actually read. It
1158 * may be smaller than the input length if we had to exit early because we
1159 * ran out of room in the pages array or because we cross the
1160 * max_out threshold.
1161 *
1162 * @total_out is an in/out parameter, must be set to the input length and will
1163 * be also used to return the total number of compressed bytes
1164 *
1165 * @max_out tells us the max number of bytes that we're allowed to
1166 * stuff into pages
1167 */
btrfs_compress_pages(unsigned int type_level,struct address_space * mapping,u64 start,struct page ** pages,unsigned long * out_pages,unsigned long * total_in,unsigned long * total_out)1168 int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1169 u64 start, struct page **pages,
1170 unsigned long *out_pages,
1171 unsigned long *total_in,
1172 unsigned long *total_out)
1173 {
1174 int type = btrfs_compress_type(type_level);
1175 int level = btrfs_compress_level(type_level);
1176 struct list_head *workspace;
1177 int ret;
1178
1179 level = btrfs_compress_set_level(type, level);
1180 workspace = get_workspace(type, level);
1181 ret = compression_compress_pages(type, workspace, mapping, start, pages,
1182 out_pages, total_in, total_out);
1183 put_workspace(type, workspace);
1184 return ret;
1185 }
1186
1187 /*
1188 * pages_in is an array of pages with compressed data.
1189 *
1190 * disk_start is the starting logical offset of this array in the file
1191 *
1192 * orig_bio contains the pages from the file that we want to decompress into
1193 *
1194 * srclen is the number of bytes in pages_in
1195 *
1196 * The basic idea is that we have a bio that was created by readpages.
1197 * The pages in the bio are for the uncompressed data, and they may not
1198 * be contiguous. They all correspond to the range of bytes covered by
1199 * the compressed extent.
1200 */
btrfs_decompress_bio(struct compressed_bio * cb)1201 static int btrfs_decompress_bio(struct compressed_bio *cb)
1202 {
1203 struct list_head *workspace;
1204 int ret;
1205 int type = cb->compress_type;
1206
1207 workspace = get_workspace(type, 0);
1208 ret = compression_decompress_bio(type, workspace, cb);
1209 put_workspace(type, workspace);
1210
1211 return ret;
1212 }
1213
1214 /*
1215 * a less complex decompression routine. Our compressed data fits in a
1216 * single page, and we want to read a single page out of it.
1217 * start_byte tells us the offset into the compressed data we're interested in
1218 */
btrfs_decompress(int type,unsigned char * data_in,struct page * dest_page,unsigned long start_byte,size_t srclen,size_t destlen)1219 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1220 unsigned long start_byte, size_t srclen, size_t destlen)
1221 {
1222 struct list_head *workspace;
1223 int ret;
1224
1225 workspace = get_workspace(type, 0);
1226 ret = compression_decompress(type, workspace, data_in, dest_page,
1227 start_byte, srclen, destlen);
1228 put_workspace(type, workspace);
1229
1230 return ret;
1231 }
1232
btrfs_init_compress(void)1233 void __init btrfs_init_compress(void)
1234 {
1235 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1236 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1237 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1238 zstd_init_workspace_manager();
1239 }
1240
btrfs_exit_compress(void)1241 void __cold btrfs_exit_compress(void)
1242 {
1243 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1244 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1245 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1246 zstd_cleanup_workspace_manager();
1247 }
1248
1249 /*
1250 * Copy uncompressed data from working buffer to pages.
1251 *
1252 * buf_start is the byte offset we're of the start of our workspace buffer.
1253 *
1254 * total_out is the last byte of the buffer
1255 */
btrfs_decompress_buf2page(const char * buf,unsigned long buf_start,unsigned long total_out,u64 disk_start,struct bio * bio)1256 int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1257 unsigned long total_out, u64 disk_start,
1258 struct bio *bio)
1259 {
1260 unsigned long buf_offset;
1261 unsigned long current_buf_start;
1262 unsigned long start_byte;
1263 unsigned long prev_start_byte;
1264 unsigned long working_bytes = total_out - buf_start;
1265 unsigned long bytes;
1266 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1267
1268 /*
1269 * start byte is the first byte of the page we're currently
1270 * copying into relative to the start of the compressed data.
1271 */
1272 start_byte = page_offset(bvec.bv_page) - disk_start;
1273
1274 /* we haven't yet hit data corresponding to this page */
1275 if (total_out <= start_byte)
1276 return 1;
1277
1278 /*
1279 * the start of the data we care about is offset into
1280 * the middle of our working buffer
1281 */
1282 if (total_out > start_byte && buf_start < start_byte) {
1283 buf_offset = start_byte - buf_start;
1284 working_bytes -= buf_offset;
1285 } else {
1286 buf_offset = 0;
1287 }
1288 current_buf_start = buf_start;
1289
1290 /* copy bytes from the working buffer into the pages */
1291 while (working_bytes > 0) {
1292 bytes = min_t(unsigned long, bvec.bv_len,
1293 PAGE_SIZE - (buf_offset % PAGE_SIZE));
1294 bytes = min(bytes, working_bytes);
1295
1296 memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
1297 bytes);
1298 flush_dcache_page(bvec.bv_page);
1299
1300 buf_offset += bytes;
1301 working_bytes -= bytes;
1302 current_buf_start += bytes;
1303
1304 /* check if we need to pick another page */
1305 bio_advance(bio, bytes);
1306 if (!bio->bi_iter.bi_size)
1307 return 0;
1308 bvec = bio_iter_iovec(bio, bio->bi_iter);
1309 prev_start_byte = start_byte;
1310 start_byte = page_offset(bvec.bv_page) - disk_start;
1311
1312 /*
1313 * We need to make sure we're only adjusting
1314 * our offset into compression working buffer when
1315 * we're switching pages. Otherwise we can incorrectly
1316 * keep copying when we were actually done.
1317 */
1318 if (start_byte != prev_start_byte) {
1319 /*
1320 * make sure our new page is covered by this
1321 * working buffer
1322 */
1323 if (total_out <= start_byte)
1324 return 1;
1325
1326 /*
1327 * the next page in the biovec might not be adjacent
1328 * to the last page, but it might still be found
1329 * inside this working buffer. bump our offset pointer
1330 */
1331 if (total_out > start_byte &&
1332 current_buf_start < start_byte) {
1333 buf_offset = start_byte - buf_start;
1334 working_bytes = total_out - start_byte;
1335 current_buf_start = buf_start + buf_offset;
1336 }
1337 }
1338 }
1339
1340 return 1;
1341 }
1342
1343 /*
1344 * Shannon Entropy calculation
1345 *
1346 * Pure byte distribution analysis fails to determine compressibility of data.
1347 * Try calculating entropy to estimate the average minimum number of bits
1348 * needed to encode the sampled data.
1349 *
1350 * For convenience, return the percentage of needed bits, instead of amount of
1351 * bits directly.
1352 *
1353 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1354 * and can be compressible with high probability
1355 *
1356 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1357 *
1358 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1359 */
1360 #define ENTROPY_LVL_ACEPTABLE (65)
1361 #define ENTROPY_LVL_HIGH (80)
1362
1363 /*
1364 * For increasead precision in shannon_entropy calculation,
1365 * let's do pow(n, M) to save more digits after comma:
1366 *
1367 * - maximum int bit length is 64
1368 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1369 * - 13 * 4 = 52 < 64 -> M = 4
1370 *
1371 * So use pow(n, 4).
1372 */
ilog2_w(u64 n)1373 static inline u32 ilog2_w(u64 n)
1374 {
1375 return ilog2(n * n * n * n);
1376 }
1377
shannon_entropy(struct heuristic_ws * ws)1378 static u32 shannon_entropy(struct heuristic_ws *ws)
1379 {
1380 const u32 entropy_max = 8 * ilog2_w(2);
1381 u32 entropy_sum = 0;
1382 u32 p, p_base, sz_base;
1383 u32 i;
1384
1385 sz_base = ilog2_w(ws->sample_size);
1386 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1387 p = ws->bucket[i].count;
1388 p_base = ilog2_w(p);
1389 entropy_sum += p * (sz_base - p_base);
1390 }
1391
1392 entropy_sum /= ws->sample_size;
1393 return entropy_sum * 100 / entropy_max;
1394 }
1395
1396 #define RADIX_BASE 4U
1397 #define COUNTERS_SIZE (1U << RADIX_BASE)
1398
get4bits(u64 num,int shift)1399 static u8 get4bits(u64 num, int shift) {
1400 u8 low4bits;
1401
1402 num >>= shift;
1403 /* Reverse order */
1404 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1405 return low4bits;
1406 }
1407
1408 /*
1409 * Use 4 bits as radix base
1410 * Use 16 u32 counters for calculating new position in buf array
1411 *
1412 * @array - array that will be sorted
1413 * @array_buf - buffer array to store sorting results
1414 * must be equal in size to @array
1415 * @num - array size
1416 */
radix_sort(struct bucket_item * array,struct bucket_item * array_buf,int num)1417 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1418 int num)
1419 {
1420 u64 max_num;
1421 u64 buf_num;
1422 u32 counters[COUNTERS_SIZE];
1423 u32 new_addr;
1424 u32 addr;
1425 int bitlen;
1426 int shift;
1427 int i;
1428
1429 /*
1430 * Try avoid useless loop iterations for small numbers stored in big
1431 * counters. Example: 48 33 4 ... in 64bit array
1432 */
1433 max_num = array[0].count;
1434 for (i = 1; i < num; i++) {
1435 buf_num = array[i].count;
1436 if (buf_num > max_num)
1437 max_num = buf_num;
1438 }
1439
1440 buf_num = ilog2(max_num);
1441 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1442
1443 shift = 0;
1444 while (shift < bitlen) {
1445 memset(counters, 0, sizeof(counters));
1446
1447 for (i = 0; i < num; i++) {
1448 buf_num = array[i].count;
1449 addr = get4bits(buf_num, shift);
1450 counters[addr]++;
1451 }
1452
1453 for (i = 1; i < COUNTERS_SIZE; i++)
1454 counters[i] += counters[i - 1];
1455
1456 for (i = num - 1; i >= 0; i--) {
1457 buf_num = array[i].count;
1458 addr = get4bits(buf_num, shift);
1459 counters[addr]--;
1460 new_addr = counters[addr];
1461 array_buf[new_addr] = array[i];
1462 }
1463
1464 shift += RADIX_BASE;
1465
1466 /*
1467 * Normal radix expects to move data from a temporary array, to
1468 * the main one. But that requires some CPU time. Avoid that
1469 * by doing another sort iteration to original array instead of
1470 * memcpy()
1471 */
1472 memset(counters, 0, sizeof(counters));
1473
1474 for (i = 0; i < num; i ++) {
1475 buf_num = array_buf[i].count;
1476 addr = get4bits(buf_num, shift);
1477 counters[addr]++;
1478 }
1479
1480 for (i = 1; i < COUNTERS_SIZE; i++)
1481 counters[i] += counters[i - 1];
1482
1483 for (i = num - 1; i >= 0; i--) {
1484 buf_num = array_buf[i].count;
1485 addr = get4bits(buf_num, shift);
1486 counters[addr]--;
1487 new_addr = counters[addr];
1488 array[new_addr] = array_buf[i];
1489 }
1490
1491 shift += RADIX_BASE;
1492 }
1493 }
1494
1495 /*
1496 * Size of the core byte set - how many bytes cover 90% of the sample
1497 *
1498 * There are several types of structured binary data that use nearly all byte
1499 * values. The distribution can be uniform and counts in all buckets will be
1500 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1501 *
1502 * Other possibility is normal (Gaussian) distribution, where the data could
1503 * be potentially compressible, but we have to take a few more steps to decide
1504 * how much.
1505 *
1506 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1507 * compression algo can easy fix that
1508 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1509 * probability is not compressible
1510 */
1511 #define BYTE_CORE_SET_LOW (64)
1512 #define BYTE_CORE_SET_HIGH (200)
1513
byte_core_set_size(struct heuristic_ws * ws)1514 static int byte_core_set_size(struct heuristic_ws *ws)
1515 {
1516 u32 i;
1517 u32 coreset_sum = 0;
1518 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1519 struct bucket_item *bucket = ws->bucket;
1520
1521 /* Sort in reverse order */
1522 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1523
1524 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1525 coreset_sum += bucket[i].count;
1526
1527 if (coreset_sum > core_set_threshold)
1528 return i;
1529
1530 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1531 coreset_sum += bucket[i].count;
1532 if (coreset_sum > core_set_threshold)
1533 break;
1534 }
1535
1536 return i;
1537 }
1538
1539 /*
1540 * Count byte values in buckets.
1541 * This heuristic can detect textual data (configs, xml, json, html, etc).
1542 * Because in most text-like data byte set is restricted to limited number of
1543 * possible characters, and that restriction in most cases makes data easy to
1544 * compress.
1545 *
1546 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1547 * less - compressible
1548 * more - need additional analysis
1549 */
1550 #define BYTE_SET_THRESHOLD (64)
1551
byte_set_size(const struct heuristic_ws * ws)1552 static u32 byte_set_size(const struct heuristic_ws *ws)
1553 {
1554 u32 i;
1555 u32 byte_set_size = 0;
1556
1557 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1558 if (ws->bucket[i].count > 0)
1559 byte_set_size++;
1560 }
1561
1562 /*
1563 * Continue collecting count of byte values in buckets. If the byte
1564 * set size is bigger then the threshold, it's pointless to continue,
1565 * the detection technique would fail for this type of data.
1566 */
1567 for (; i < BUCKET_SIZE; i++) {
1568 if (ws->bucket[i].count > 0) {
1569 byte_set_size++;
1570 if (byte_set_size > BYTE_SET_THRESHOLD)
1571 return byte_set_size;
1572 }
1573 }
1574
1575 return byte_set_size;
1576 }
1577
sample_repeated_patterns(struct heuristic_ws * ws)1578 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1579 {
1580 const u32 half_of_sample = ws->sample_size / 2;
1581 const u8 *data = ws->sample;
1582
1583 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1584 }
1585
heuristic_collect_sample(struct inode * inode,u64 start,u64 end,struct heuristic_ws * ws)1586 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1587 struct heuristic_ws *ws)
1588 {
1589 struct page *page;
1590 u64 index, index_end;
1591 u32 i, curr_sample_pos;
1592 u8 *in_data;
1593
1594 /*
1595 * Compression handles the input data by chunks of 128KiB
1596 * (defined by BTRFS_MAX_UNCOMPRESSED)
1597 *
1598 * We do the same for the heuristic and loop over the whole range.
1599 *
1600 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1601 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1602 */
1603 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1604 end = start + BTRFS_MAX_UNCOMPRESSED;
1605
1606 index = start >> PAGE_SHIFT;
1607 index_end = end >> PAGE_SHIFT;
1608
1609 /* Don't miss unaligned end */
1610 if (!IS_ALIGNED(end, PAGE_SIZE))
1611 index_end++;
1612
1613 curr_sample_pos = 0;
1614 while (index < index_end) {
1615 page = find_get_page(inode->i_mapping, index);
1616 in_data = kmap_local_page(page);
1617 /* Handle case where the start is not aligned to PAGE_SIZE */
1618 i = start % PAGE_SIZE;
1619 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1620 /* Don't sample any garbage from the last page */
1621 if (start > end - SAMPLING_READ_SIZE)
1622 break;
1623 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1624 SAMPLING_READ_SIZE);
1625 i += SAMPLING_INTERVAL;
1626 start += SAMPLING_INTERVAL;
1627 curr_sample_pos += SAMPLING_READ_SIZE;
1628 }
1629 kunmap_local(in_data);
1630 put_page(page);
1631
1632 index++;
1633 }
1634
1635 ws->sample_size = curr_sample_pos;
1636 }
1637
1638 /*
1639 * Compression heuristic.
1640 *
1641 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1642 * quickly (compared to direct compression) detect data characteristics
1643 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1644 * data.
1645 *
1646 * The following types of analysis can be performed:
1647 * - detect mostly zero data
1648 * - detect data with low "byte set" size (text, etc)
1649 * - detect data with low/high "core byte" set
1650 *
1651 * Return non-zero if the compression should be done, 0 otherwise.
1652 */
btrfs_compress_heuristic(struct inode * inode,u64 start,u64 end)1653 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1654 {
1655 struct list_head *ws_list = get_workspace(0, 0);
1656 struct heuristic_ws *ws;
1657 u32 i;
1658 u8 byte;
1659 int ret = 0;
1660
1661 ws = list_entry(ws_list, struct heuristic_ws, list);
1662
1663 heuristic_collect_sample(inode, start, end, ws);
1664
1665 if (sample_repeated_patterns(ws)) {
1666 ret = 1;
1667 goto out;
1668 }
1669
1670 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1671
1672 for (i = 0; i < ws->sample_size; i++) {
1673 byte = ws->sample[i];
1674 ws->bucket[byte].count++;
1675 }
1676
1677 i = byte_set_size(ws);
1678 if (i < BYTE_SET_THRESHOLD) {
1679 ret = 2;
1680 goto out;
1681 }
1682
1683 i = byte_core_set_size(ws);
1684 if (i <= BYTE_CORE_SET_LOW) {
1685 ret = 3;
1686 goto out;
1687 }
1688
1689 if (i >= BYTE_CORE_SET_HIGH) {
1690 ret = 0;
1691 goto out;
1692 }
1693
1694 i = shannon_entropy(ws);
1695 if (i <= ENTROPY_LVL_ACEPTABLE) {
1696 ret = 4;
1697 goto out;
1698 }
1699
1700 /*
1701 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1702 * needed to give green light to compression.
1703 *
1704 * For now just assume that compression at that level is not worth the
1705 * resources because:
1706 *
1707 * 1. it is possible to defrag the data later
1708 *
1709 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1710 * values, every bucket has counter at level ~54. The heuristic would
1711 * be confused. This can happen when data have some internal repeated
1712 * patterns like "abbacbbc...". This can be detected by analyzing
1713 * pairs of bytes, which is too costly.
1714 */
1715 if (i < ENTROPY_LVL_HIGH) {
1716 ret = 5;
1717 goto out;
1718 } else {
1719 ret = 0;
1720 goto out;
1721 }
1722
1723 out:
1724 put_workspace(0, ws_list);
1725 return ret;
1726 }
1727
1728 /*
1729 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1730 * level, unrecognized string will set the default level
1731 */
btrfs_compress_str2level(unsigned int type,const char * str)1732 unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1733 {
1734 unsigned int level = 0;
1735 int ret;
1736
1737 if (!type)
1738 return 0;
1739
1740 if (str[0] == ':') {
1741 ret = kstrtouint(str + 1, 10, &level);
1742 if (ret)
1743 level = 0;
1744 }
1745
1746 level = btrfs_compress_set_level(type, level);
1747
1748 return level;
1749 }
1750