xref: /linux/fs/erofs/decompressor_deflate.c (revision db10cb9b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/module.h>
3 #include <linux/zlib.h>
4 #include "compress.h"
5 
6 struct z_erofs_deflate {
7 	struct z_erofs_deflate *next;
8 	struct z_stream_s z;
9 	u8 bounce[PAGE_SIZE];
10 };
11 
12 static DEFINE_SPINLOCK(z_erofs_deflate_lock);
13 static unsigned int z_erofs_deflate_nstrms, z_erofs_deflate_avail_strms;
14 static struct z_erofs_deflate *z_erofs_deflate_head;
15 static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq);
16 
17 module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444);
18 
19 void z_erofs_deflate_exit(void)
20 {
21 	/* there should be no running fs instance */
22 	while (z_erofs_deflate_avail_strms) {
23 		struct z_erofs_deflate *strm;
24 
25 		spin_lock(&z_erofs_deflate_lock);
26 		strm = z_erofs_deflate_head;
27 		if (!strm) {
28 			spin_unlock(&z_erofs_deflate_lock);
29 			continue;
30 		}
31 		z_erofs_deflate_head = NULL;
32 		spin_unlock(&z_erofs_deflate_lock);
33 
34 		while (strm) {
35 			struct z_erofs_deflate *n = strm->next;
36 
37 			vfree(strm->z.workspace);
38 			kfree(strm);
39 			--z_erofs_deflate_avail_strms;
40 			strm = n;
41 		}
42 	}
43 }
44 
45 int __init z_erofs_deflate_init(void)
46 {
47 	/* by default, use # of possible CPUs instead */
48 	if (!z_erofs_deflate_nstrms)
49 		z_erofs_deflate_nstrms = num_possible_cpus();
50 
51 	for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
52 	     ++z_erofs_deflate_avail_strms) {
53 		struct z_erofs_deflate *strm;
54 
55 		strm = kzalloc(sizeof(*strm), GFP_KERNEL);
56 		if (!strm)
57 			goto out_failed;
58 
59 		/* XXX: in-kernel zlib cannot shrink windowbits currently */
60 		strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
61 		if (!strm->z.workspace) {
62 			kfree(strm);
63 			goto out_failed;
64 		}
65 
66 		spin_lock(&z_erofs_deflate_lock);
67 		strm->next = z_erofs_deflate_head;
68 		z_erofs_deflate_head = strm;
69 		spin_unlock(&z_erofs_deflate_lock);
70 	}
71 	return 0;
72 
73 out_failed:
74 	pr_err("failed to allocate zlib workspace\n");
75 	z_erofs_deflate_exit();
76 	return -ENOMEM;
77 }
78 
79 int z_erofs_load_deflate_config(struct super_block *sb,
80 				struct erofs_super_block *dsb,
81 				struct z_erofs_deflate_cfgs *dfl, int size)
82 {
83 	if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
84 		erofs_err(sb, "invalid deflate cfgs, size=%u", size);
85 		return -EINVAL;
86 	}
87 
88 	if (dfl->windowbits > MAX_WBITS) {
89 		erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
90 		return -EOPNOTSUPP;
91 	}
92 
93 	erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
94 	return 0;
95 }
96 
97 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
98 			       struct page **pagepool)
99 {
100 	const unsigned int nrpages_out =
101 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
102 	const unsigned int nrpages_in =
103 		PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
104 	struct super_block *sb = rq->sb;
105 	unsigned int insz, outsz, pofs;
106 	struct z_erofs_deflate *strm;
107 	u8 *kin, *kout = NULL;
108 	bool bounced = false;
109 	int no = -1, ni = 0, j = 0, zerr, err;
110 
111 	/* 1. get the exact DEFLATE compressed size */
112 	kin = kmap_local_page(*rq->in);
113 	err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
114 			min_t(unsigned int, rq->inputsize,
115 			      sb->s_blocksize - rq->pageofs_in));
116 	if (err) {
117 		kunmap_local(kin);
118 		return err;
119 	}
120 
121 	/* 2. get an available DEFLATE context */
122 again:
123 	spin_lock(&z_erofs_deflate_lock);
124 	strm = z_erofs_deflate_head;
125 	if (!strm) {
126 		spin_unlock(&z_erofs_deflate_lock);
127 		wait_event(z_erofs_deflate_wq, READ_ONCE(z_erofs_deflate_head));
128 		goto again;
129 	}
130 	z_erofs_deflate_head = strm->next;
131 	spin_unlock(&z_erofs_deflate_lock);
132 
133 	/* 3. multi-call decompress */
134 	insz = rq->inputsize;
135 	outsz = rq->outputsize;
136 	zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
137 	if (zerr != Z_OK) {
138 		err = -EIO;
139 		goto failed_zinit;
140 	}
141 
142 	pofs = rq->pageofs_out;
143 	strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in);
144 	insz -= strm->z.avail_in;
145 	strm->z.next_in = kin + rq->pageofs_in;
146 	strm->z.avail_out = 0;
147 
148 	while (1) {
149 		if (!strm->z.avail_out) {
150 			if (++no >= nrpages_out || !outsz) {
151 				erofs_err(sb, "insufficient space for decompressed data");
152 				err = -EFSCORRUPTED;
153 				break;
154 			}
155 
156 			if (kout)
157 				kunmap_local(kout);
158 			strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
159 			outsz -= strm->z.avail_out;
160 			if (!rq->out[no]) {
161 				rq->out[no] = erofs_allocpage(pagepool,
162 						GFP_KERNEL | __GFP_NOFAIL);
163 				set_page_private(rq->out[no],
164 						 Z_EROFS_SHORTLIVED_PAGE);
165 			}
166 			kout = kmap_local_page(rq->out[no]);
167 			strm->z.next_out = kout + pofs;
168 			pofs = 0;
169 		}
170 
171 		if (!strm->z.avail_in && insz) {
172 			if (++ni >= nrpages_in) {
173 				erofs_err(sb, "invalid compressed data");
174 				err = -EFSCORRUPTED;
175 				break;
176 			}
177 
178 			if (kout) { /* unlike kmap(), take care of the orders */
179 				j = strm->z.next_out - kout;
180 				kunmap_local(kout);
181 			}
182 			kunmap_local(kin);
183 			strm->z.avail_in = min_t(u32, insz, PAGE_SIZE);
184 			insz -= strm->z.avail_in;
185 			kin = kmap_local_page(rq->in[ni]);
186 			strm->z.next_in = kin;
187 			bounced = false;
188 			if (kout) {
189 				kout = kmap_local_page(rq->out[no]);
190 				strm->z.next_out = kout + j;
191 			}
192 		}
193 
194 		/*
195 		 * Handle overlapping: Use bounced buffer if the compressed
196 		 * data is under processing; Or use short-lived pages from the
197 		 * on-stack pagepool where pages share among the same request
198 		 * and not _all_ inplace I/O pages are needed to be doubled.
199 		 */
200 		if (!bounced && rq->out[no] == rq->in[ni]) {
201 			memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in);
202 			strm->z.next_in = strm->bounce;
203 			bounced = true;
204 		}
205 
206 		for (j = ni + 1; j < nrpages_in; ++j) {
207 			struct page *tmppage;
208 
209 			if (rq->out[no] != rq->in[j])
210 				continue;
211 
212 			DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
213 							rq->in[j]));
214 			tmppage = erofs_allocpage(pagepool,
215 						  GFP_KERNEL | __GFP_NOFAIL);
216 			set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
217 			copy_highpage(tmppage, rq->in[j]);
218 			rq->in[j] = tmppage;
219 		}
220 
221 		zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH);
222 		if (zerr != Z_OK || !(outsz + strm->z.avail_out)) {
223 			if (zerr == Z_OK && rq->partial_decoding)
224 				break;
225 			if (zerr == Z_STREAM_END && !outsz)
226 				break;
227 			erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
228 				  zerr, rq->inputsize, rq->outputsize);
229 			err = -EFSCORRUPTED;
230 			break;
231 		}
232 	}
233 
234 	if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
235 		err = -EIO;
236 	if (kout)
237 		kunmap_local(kout);
238 failed_zinit:
239 	kunmap_local(kin);
240 	/* 4. push back DEFLATE stream context to the global list */
241 	spin_lock(&z_erofs_deflate_lock);
242 	strm->next = z_erofs_deflate_head;
243 	z_erofs_deflate_head = strm;
244 	spin_unlock(&z_erofs_deflate_lock);
245 	wake_up(&z_erofs_deflate_wq);
246 	return err;
247 }
248