xref: /linux/fs/sysv/itree.c (revision 1e525507)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/sysv/itree.c
4  *
5  *  Handling of indirect blocks' trees.
6  *  AV, Sep--Dec 2000
7  */
8 
9 #include <linux/buffer_head.h>
10 #include <linux/mount.h>
11 #include <linux/mpage.h>
12 #include <linux/string.h>
13 #include "sysv.h"
14 
15 enum {DIRECT = 10, DEPTH = 4};	/* Have triple indirect */
16 
17 static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode)
18 {
19 	mark_buffer_dirty_inode(bh, inode);
20 	if (IS_SYNC(inode))
21 		sync_dirty_buffer(bh);
22 }
23 
24 static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
25 {
26 	struct super_block *sb = inode->i_sb;
27 	struct sysv_sb_info *sbi = SYSV_SB(sb);
28 	int ptrs_bits = sbi->s_ind_per_block_bits;
29 	unsigned long	indirect_blocks = sbi->s_ind_per_block,
30 			double_blocks = sbi->s_ind_per_block_2;
31 	int n = 0;
32 
33 	if (block < 0) {
34 		printk("sysv_block_map: block < 0\n");
35 	} else if (block < DIRECT) {
36 		offsets[n++] = block;
37 	} else if ( (block -= DIRECT) < indirect_blocks) {
38 		offsets[n++] = DIRECT;
39 		offsets[n++] = block;
40 	} else if ((block -= indirect_blocks) < double_blocks) {
41 		offsets[n++] = DIRECT+1;
42 		offsets[n++] = block >> ptrs_bits;
43 		offsets[n++] = block & (indirect_blocks - 1);
44 	} else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) {
45 		offsets[n++] = DIRECT+2;
46 		offsets[n++] = block >> (ptrs_bits * 2);
47 		offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1);
48 		offsets[n++] = block & (indirect_blocks - 1);
49 	} else {
50 		/* nothing */;
51 	}
52 	return n;
53 }
54 
55 static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr)
56 {
57 	return sbi->s_block_base + fs32_to_cpu(sbi, nr);
58 }
59 
60 typedef struct {
61 	sysv_zone_t     *p;
62 	sysv_zone_t     key;
63 	struct buffer_head *bh;
64 } Indirect;
65 
66 static DEFINE_RWLOCK(pointers_lock);
67 
68 static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v)
69 {
70 	p->key = *(p->p = v);
71 	p->bh = bh;
72 }
73 
74 static inline int verify_chain(Indirect *from, Indirect *to)
75 {
76 	while (from <= to && from->key == *from->p)
77 		from++;
78 	return (from > to);
79 }
80 
81 static inline sysv_zone_t *block_end(struct buffer_head *bh)
82 {
83 	return (sysv_zone_t*)((char*)bh->b_data + bh->b_size);
84 }
85 
86 static Indirect *get_branch(struct inode *inode,
87 			    int depth,
88 			    int offsets[],
89 			    Indirect chain[],
90 			    int *err)
91 {
92 	struct super_block *sb = inode->i_sb;
93 	Indirect *p = chain;
94 	struct buffer_head *bh;
95 
96 	*err = 0;
97 	add_chain(chain, NULL, SYSV_I(inode)->i_data + *offsets);
98 	if (!p->key)
99 		goto no_block;
100 	while (--depth) {
101 		int block = block_to_cpu(SYSV_SB(sb), p->key);
102 		bh = sb_bread(sb, block);
103 		if (!bh)
104 			goto failure;
105 		read_lock(&pointers_lock);
106 		if (!verify_chain(chain, p))
107 			goto changed;
108 		add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
109 		read_unlock(&pointers_lock);
110 		if (!p->key)
111 			goto no_block;
112 	}
113 	return NULL;
114 
115 changed:
116 	read_unlock(&pointers_lock);
117 	brelse(bh);
118 	*err = -EAGAIN;
119 	goto no_block;
120 failure:
121 	*err = -EIO;
122 no_block:
123 	return p;
124 }
125 
126 static int alloc_branch(struct inode *inode,
127 			int num,
128 			int *offsets,
129 			Indirect *branch)
130 {
131 	int blocksize = inode->i_sb->s_blocksize;
132 	int n = 0;
133 	int i;
134 
135 	branch[0].key = sysv_new_block(inode->i_sb);
136 	if (branch[0].key) for (n = 1; n < num; n++) {
137 		struct buffer_head *bh;
138 		int parent;
139 		/* Allocate the next block */
140 		branch[n].key = sysv_new_block(inode->i_sb);
141 		if (!branch[n].key)
142 			break;
143 		/*
144 		 * Get buffer_head for parent block, zero it out and set
145 		 * the pointer to new one, then send parent to disk.
146 		 */
147 		parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key);
148 		bh = sb_getblk(inode->i_sb, parent);
149 		if (!bh) {
150 			sysv_free_block(inode->i_sb, branch[n].key);
151 			break;
152 		}
153 		lock_buffer(bh);
154 		memset(bh->b_data, 0, blocksize);
155 		branch[n].bh = bh;
156 		branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n];
157 		*branch[n].p = branch[n].key;
158 		set_buffer_uptodate(bh);
159 		unlock_buffer(bh);
160 		dirty_indirect(bh, inode);
161 	}
162 	if (n == num)
163 		return 0;
164 
165 	/* Allocation failed, free what we already allocated */
166 	for (i = 1; i < n; i++)
167 		bforget(branch[i].bh);
168 	for (i = 0; i < n; i++)
169 		sysv_free_block(inode->i_sb, branch[i].key);
170 	return -ENOSPC;
171 }
172 
173 static inline int splice_branch(struct inode *inode,
174 				Indirect chain[],
175 				Indirect *where,
176 				int num)
177 {
178 	int i;
179 
180 	/* Verify that place we are splicing to is still there and vacant */
181 	write_lock(&pointers_lock);
182 	if (!verify_chain(chain, where-1) || *where->p)
183 		goto changed;
184 	*where->p = where->key;
185 	write_unlock(&pointers_lock);
186 
187 	inode_set_ctime_current(inode);
188 
189 	/* had we spliced it onto indirect block? */
190 	if (where->bh)
191 		dirty_indirect(where->bh, inode);
192 
193 	if (IS_SYNC(inode))
194 		sysv_sync_inode(inode);
195 	else
196 		mark_inode_dirty(inode);
197 	return 0;
198 
199 changed:
200 	write_unlock(&pointers_lock);
201 	for (i = 1; i < num; i++)
202 		bforget(where[i].bh);
203 	for (i = 0; i < num; i++)
204 		sysv_free_block(inode->i_sb, where[i].key);
205 	return -EAGAIN;
206 }
207 
208 static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
209 {
210 	int err = -EIO;
211 	int offsets[DEPTH];
212 	Indirect chain[DEPTH];
213 	struct super_block *sb = inode->i_sb;
214 	Indirect *partial;
215 	int left;
216 	int depth = block_to_path(inode, iblock, offsets);
217 
218 	if (depth == 0)
219 		goto out;
220 
221 reread:
222 	partial = get_branch(inode, depth, offsets, chain, &err);
223 
224 	/* Simplest case - block found, no allocation needed */
225 	if (!partial) {
226 got_it:
227 		map_bh(bh_result, sb, block_to_cpu(SYSV_SB(sb),
228 					chain[depth-1].key));
229 		/* Clean up and exit */
230 		partial = chain+depth-1; /* the whole chain */
231 		goto cleanup;
232 	}
233 
234 	/* Next simple case - plain lookup or failed read of indirect block */
235 	if (!create || err == -EIO) {
236 cleanup:
237 		while (partial > chain) {
238 			brelse(partial->bh);
239 			partial--;
240 		}
241 out:
242 		return err;
243 	}
244 
245 	/*
246 	 * Indirect block might be removed by truncate while we were
247 	 * reading it. Handling of that case (forget what we've got and
248 	 * reread) is taken out of the main path.
249 	 */
250 	if (err == -EAGAIN)
251 		goto changed;
252 
253 	left = (chain + depth) - partial;
254 	err = alloc_branch(inode, left, offsets+(partial-chain), partial);
255 	if (err)
256 		goto cleanup;
257 
258 	if (splice_branch(inode, chain, partial, left) < 0)
259 		goto changed;
260 
261 	set_buffer_new(bh_result);
262 	goto got_it;
263 
264 changed:
265 	while (partial > chain) {
266 		brelse(partial->bh);
267 		partial--;
268 	}
269 	goto reread;
270 }
271 
272 static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
273 {
274 	while (p < q)
275 		if (*p++)
276 			return 0;
277 	return 1;
278 }
279 
280 static Indirect *find_shared(struct inode *inode,
281 				int depth,
282 				int offsets[],
283 				Indirect chain[],
284 				sysv_zone_t *top)
285 {
286 	Indirect *partial, *p;
287 	int k, err;
288 
289 	*top = 0;
290 	for (k = depth; k > 1 && !offsets[k-1]; k--)
291 		;
292 	partial = get_branch(inode, k, offsets, chain, &err);
293 
294 	write_lock(&pointers_lock);
295 	if (!partial)
296 		partial = chain + k-1;
297 	/*
298 	 * If the branch acquired continuation since we've looked at it -
299 	 * fine, it should all survive and (new) top doesn't belong to us.
300 	 */
301 	if (!partial->key && *partial->p) {
302 		write_unlock(&pointers_lock);
303 		goto no_top;
304 	}
305 	for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--)
306 		;
307 	/*
308 	 * OK, we've found the last block that must survive. The rest of our
309 	 * branch should be detached before unlocking. However, if that rest
310 	 * of branch is all ours and does not grow immediately from the inode
311 	 * it's easier to cheat and just decrement partial->p.
312 	 */
313 	if (p == chain + k - 1 && p > chain) {
314 		p->p--;
315 	} else {
316 		*top = *p->p;
317 		*p->p = 0;
318 	}
319 	write_unlock(&pointers_lock);
320 
321 	while (partial > p) {
322 		brelse(partial->bh);
323 		partial--;
324 	}
325 no_top:
326 	return partial;
327 }
328 
329 static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
330 {
331 	for ( ; p < q ; p++) {
332 		sysv_zone_t nr = *p;
333 		if (nr) {
334 			*p = 0;
335 			sysv_free_block(inode->i_sb, nr);
336 			mark_inode_dirty(inode);
337 		}
338 	}
339 }
340 
341 static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
342 {
343 	struct buffer_head * bh;
344 	struct super_block *sb = inode->i_sb;
345 
346 	if (depth--) {
347 		for ( ; p < q ; p++) {
348 			int block;
349 			sysv_zone_t nr = *p;
350 			if (!nr)
351 				continue;
352 			*p = 0;
353 			block = block_to_cpu(SYSV_SB(sb), nr);
354 			bh = sb_bread(sb, block);
355 			if (!bh)
356 				continue;
357 			free_branches(inode, (sysv_zone_t*)bh->b_data,
358 					block_end(bh), depth);
359 			bforget(bh);
360 			sysv_free_block(sb, nr);
361 			mark_inode_dirty(inode);
362 		}
363 	} else
364 		free_data(inode, p, q);
365 }
366 
367 void sysv_truncate (struct inode * inode)
368 {
369 	sysv_zone_t *i_data = SYSV_I(inode)->i_data;
370 	int offsets[DEPTH];
371 	Indirect chain[DEPTH];
372 	Indirect *partial;
373 	sysv_zone_t nr = 0;
374 	int n;
375 	long iblock;
376 	unsigned blocksize;
377 
378 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
379 	    S_ISLNK(inode->i_mode)))
380 		return;
381 
382 	blocksize = inode->i_sb->s_blocksize;
383 	iblock = (inode->i_size + blocksize-1)
384 					>> inode->i_sb->s_blocksize_bits;
385 
386 	block_truncate_page(inode->i_mapping, inode->i_size, get_block);
387 
388 	n = block_to_path(inode, iblock, offsets);
389 	if (n == 0)
390 		return;
391 
392 	if (n == 1) {
393 		free_data(inode, i_data+offsets[0], i_data + DIRECT);
394 		goto do_indirects;
395 	}
396 
397 	partial = find_shared(inode, n, offsets, chain, &nr);
398 	/* Kill the top of shared branch (already detached) */
399 	if (nr) {
400 		if (partial == chain)
401 			mark_inode_dirty(inode);
402 		else
403 			dirty_indirect(partial->bh, inode);
404 		free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
405 	}
406 	/* Clear the ends of indirect blocks on the shared branch */
407 	while (partial > chain) {
408 		free_branches(inode, partial->p + 1, block_end(partial->bh),
409 				(chain+n-1) - partial);
410 		dirty_indirect(partial->bh, inode);
411 		brelse (partial->bh);
412 		partial--;
413 	}
414 do_indirects:
415 	/* Kill the remaining (whole) subtrees (== subtrees deeper than...) */
416 	while (n < DEPTH) {
417 		nr = i_data[DIRECT + n - 1];
418 		if (nr) {
419 			i_data[DIRECT + n - 1] = 0;
420 			mark_inode_dirty(inode);
421 			free_branches(inode, &nr, &nr+1, n);
422 		}
423 		n++;
424 	}
425 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
426 	if (IS_SYNC(inode))
427 		sysv_sync_inode (inode);
428 	else
429 		mark_inode_dirty(inode);
430 }
431 
432 static unsigned sysv_nblocks(struct super_block *s, loff_t size)
433 {
434 	struct sysv_sb_info *sbi = SYSV_SB(s);
435 	int ptrs_bits = sbi->s_ind_per_block_bits;
436 	unsigned blocks, res, direct = DIRECT, i = DEPTH;
437 	blocks = (size + s->s_blocksize - 1) >> s->s_blocksize_bits;
438 	res = blocks;
439 	while (--i && blocks > direct) {
440 		blocks = ((blocks - direct - 1) >> ptrs_bits) + 1;
441 		res += blocks;
442 		direct = 1;
443 	}
444 	return res;
445 }
446 
447 int sysv_getattr(struct mnt_idmap *idmap, const struct path *path,
448 		 struct kstat *stat, u32 request_mask, unsigned int flags)
449 {
450 	struct super_block *s = path->dentry->d_sb;
451 	generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry),
452 			 stat);
453 	stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
454 	stat->blksize = s->s_blocksize;
455 	return 0;
456 }
457 
458 static int sysv_writepages(struct address_space *mapping,
459 		struct writeback_control *wbc)
460 {
461 	return mpage_writepages(mapping, wbc, get_block);
462 }
463 
464 static int sysv_read_folio(struct file *file, struct folio *folio)
465 {
466 	return block_read_full_folio(folio, get_block);
467 }
468 
469 int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len)
470 {
471 	return __block_write_begin(page, pos, len, get_block);
472 }
473 
474 static void sysv_write_failed(struct address_space *mapping, loff_t to)
475 {
476 	struct inode *inode = mapping->host;
477 
478 	if (to > inode->i_size) {
479 		truncate_pagecache(inode, inode->i_size);
480 		sysv_truncate(inode);
481 	}
482 }
483 
484 static int sysv_write_begin(struct file *file, struct address_space *mapping,
485 			loff_t pos, unsigned len,
486 			struct page **pagep, void **fsdata)
487 {
488 	int ret;
489 
490 	ret = block_write_begin(mapping, pos, len, pagep, get_block);
491 	if (unlikely(ret))
492 		sysv_write_failed(mapping, pos + len);
493 
494 	return ret;
495 }
496 
497 static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
498 {
499 	return generic_block_bmap(mapping,block,get_block);
500 }
501 
502 const struct address_space_operations sysv_aops = {
503 	.dirty_folio = block_dirty_folio,
504 	.invalidate_folio = block_invalidate_folio,
505 	.read_folio = sysv_read_folio,
506 	.writepages = sysv_writepages,
507 	.write_begin = sysv_write_begin,
508 	.write_end = generic_write_end,
509 	.migrate_folio = buffer_migrate_folio,
510 	.bmap = sysv_bmap
511 };
512