1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ufs/balloc.c
4 *
5 * Copyright (C) 1998
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
8 *
9 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
10 */
11
12 #include <linux/fs.h>
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/buffer_head.h>
17 #include <linux/capability.h>
18 #include <linux/bitops.h>
19 #include <linux/bio.h>
20 #include <asm/byteorder.h>
21
22 #include "ufs_fs.h"
23 #include "ufs.h"
24 #include "swab.h"
25 #include "util.h"
26
27 #define INVBLOCK ((u64)-1L)
28
29 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
30 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
31 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
32 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
33 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
34 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
35
36 /*
37 * Free 'count' fragments from fragment number 'fragment'
38 */
ufs_free_fragments(struct inode * inode,u64 fragment,unsigned count)39 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
40 {
41 struct super_block * sb;
42 struct ufs_sb_private_info * uspi;
43 struct ufs_cg_private_info * ucpi;
44 struct ufs_cylinder_group * ucg;
45 unsigned cgno, bit, end_bit, bbase, blkmap, i;
46 u64 blkno;
47
48 sb = inode->i_sb;
49 uspi = UFS_SB(sb)->s_uspi;
50
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment, count);
53
54 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 ufs_error (sb, "ufs_free_fragments", "internal error");
56
57 mutex_lock(&UFS_SB(sb)->s_lock);
58
59 cgno = ufs_dtog(uspi, fragment);
60 bit = ufs_dtogd(uspi, fragment);
61 if (cgno >= uspi->s_ncg) {
62 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63 goto failed;
64 }
65
66 ucpi = ufs_load_cylinder (sb, cgno);
67 if (!ucpi)
68 goto failed;
69 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70 if (!ufs_cg_chkmagic(sb, ucg)) {
71 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72 goto failed;
73 }
74
75 end_bit = bit + count;
76 bbase = ufs_blknum (bit);
77 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 for (i = bit; i < end_bit; i++) {
80 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82 else
83 ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
85 }
86
87 inode_sub_bytes(inode, count << uspi->s_fshift);
88 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
89 uspi->cs_total.cs_nffree += count;
90 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
91 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
92 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
93
94 /*
95 * Trying to reassemble free fragments into block
96 */
97 blkno = ufs_fragstoblks (bbase);
98 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
99 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
100 uspi->cs_total.cs_nffree -= uspi->s_fpb;
101 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
102 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
103 ufs_clusteracct (sb, ucpi, blkno, 1);
104 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
105 uspi->cs_total.cs_nbfree++;
106 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
107 if (uspi->fs_magic != UFS2_MAGIC) {
108 unsigned cylno = ufs_cbtocylno (bbase);
109
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
111 ufs_cbtorpos(bbase)), 1);
112 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
113 }
114 }
115
116 ubh_mark_buffer_dirty (USPI_UBH(uspi));
117 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
118 if (sb->s_flags & SB_SYNCHRONOUS)
119 ubh_sync_block(UCPI_UBH(ucpi));
120 ufs_mark_sb_dirty(sb);
121
122 mutex_unlock(&UFS_SB(sb)->s_lock);
123 UFSD("EXIT\n");
124 return;
125
126 failed:
127 mutex_unlock(&UFS_SB(sb)->s_lock);
128 UFSD("EXIT (FAILED)\n");
129 return;
130 }
131
132 /*
133 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
134 */
ufs_free_blocks(struct inode * inode,u64 fragment,unsigned count)135 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
136 {
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
139 struct ufs_cg_private_info * ucpi;
140 struct ufs_cylinder_group * ucg;
141 unsigned overflow, cgno, bit, end_bit, i;
142 u64 blkno;
143
144 sb = inode->i_sb;
145 uspi = UFS_SB(sb)->s_uspi;
146
147 UFSD("ENTER, fragment %llu, count %u\n",
148 (unsigned long long)fragment, count);
149
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
152 "fragment %llu, count %u\n",
153 (unsigned long long)fragment, count);
154 goto failed;
155 }
156
157 mutex_lock(&UFS_SB(sb)->s_lock);
158
159 do_more:
160 overflow = 0;
161 cgno = ufs_dtog(uspi, fragment);
162 bit = ufs_dtogd(uspi, fragment);
163 if (cgno >= uspi->s_ncg) {
164 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
165 goto failed_unlock;
166 }
167 end_bit = bit + count;
168 if (end_bit > uspi->s_fpg) {
169 overflow = bit + count - uspi->s_fpg;
170 count -= overflow;
171 end_bit -= overflow;
172 }
173
174 ucpi = ufs_load_cylinder (sb, cgno);
175 if (!ucpi)
176 goto failed_unlock;
177 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
178 if (!ufs_cg_chkmagic(sb, ucg)) {
179 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
180 goto failed_unlock;
181 }
182
183 for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 blkno = ufs_fragstoblks(i);
185 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
186 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
187 }
188 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
189 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
190 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191 ufs_clusteracct (sb, ucpi, blkno, 1);
192
193 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194 uspi->cs_total.cs_nbfree++;
195 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
196
197 if (uspi->fs_magic != UFS2_MAGIC) {
198 unsigned cylno = ufs_cbtocylno(i);
199
200 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201 ufs_cbtorpos(i)), 1);
202 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203 }
204 }
205
206 ubh_mark_buffer_dirty (USPI_UBH(uspi));
207 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
208 if (sb->s_flags & SB_SYNCHRONOUS)
209 ubh_sync_block(UCPI_UBH(ucpi));
210
211 if (overflow) {
212 fragment += count;
213 count = overflow;
214 goto do_more;
215 }
216
217 ufs_mark_sb_dirty(sb);
218 mutex_unlock(&UFS_SB(sb)->s_lock);
219 UFSD("EXIT\n");
220 return;
221
222 failed_unlock:
223 mutex_unlock(&UFS_SB(sb)->s_lock);
224 failed:
225 UFSD("EXIT (FAILED)\n");
226 return;
227 }
228
229 /*
230 * Modify inode page cache in such way:
231 * have - blocks with b_blocknr equal to oldb...oldb+count-1
232 * get - blocks with b_blocknr equal to newb...newb+count-1
233 * also we suppose that oldb...oldb+count-1 blocks
234 * situated at the end of file.
235 *
236 * We can come here from ufs_writepage or ufs_prepare_write,
237 * locked_page is argument of these functions, so we already lock it.
238 */
ufs_change_blocknr(struct inode * inode,sector_t beg,unsigned int count,sector_t oldb,sector_t newb,struct page * locked_page)239 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240 unsigned int count, sector_t oldb,
241 sector_t newb, struct page *locked_page)
242 {
243 const unsigned blks_per_page =
244 1 << (PAGE_SHIFT - inode->i_blkbits);
245 const unsigned mask = blks_per_page - 1;
246 struct address_space * const mapping = inode->i_mapping;
247 pgoff_t index, cur_index, last_index;
248 unsigned pos, j, lblock;
249 sector_t end, i;
250 struct page *page;
251 struct buffer_head *head, *bh;
252
253 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
254 inode->i_ino, count,
255 (unsigned long long)oldb, (unsigned long long)newb);
256
257 BUG_ON(!locked_page);
258 BUG_ON(!PageLocked(locked_page));
259
260 cur_index = locked_page->index;
261 end = count + beg;
262 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
263 for (i = beg; i < end; i = (i | mask) + 1) {
264 index = i >> (PAGE_SHIFT - inode->i_blkbits);
265
266 if (likely(cur_index != index)) {
267 page = ufs_get_locked_page(mapping, index);
268 if (!page)/* it was truncated */
269 continue;
270 if (IS_ERR(page)) {/* or EIO */
271 ufs_error(inode->i_sb, __func__,
272 "read of page %llu failed\n",
273 (unsigned long long)index);
274 continue;
275 }
276 } else
277 page = locked_page;
278
279 head = page_buffers(page);
280 bh = head;
281 pos = i & mask;
282 for (j = 0; j < pos; ++j)
283 bh = bh->b_this_page;
284
285
286 if (unlikely(index == last_index))
287 lblock = end & mask;
288 else
289 lblock = blks_per_page;
290
291 do {
292 if (j >= lblock)
293 break;
294 pos = (i - beg) + j;
295
296 if (!buffer_mapped(bh))
297 map_bh(bh, inode->i_sb, oldb + pos);
298 if (!buffer_uptodate(bh)) {
299 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
300 wait_on_buffer(bh);
301 if (!buffer_uptodate(bh)) {
302 ufs_error(inode->i_sb, __func__,
303 "read of block failed\n");
304 break;
305 }
306 }
307
308 UFSD(" change from %llu to %llu, pos %u\n",
309 (unsigned long long)(pos + oldb),
310 (unsigned long long)(pos + newb), pos);
311
312 bh->b_blocknr = newb + pos;
313 clean_bdev_bh_alias(bh);
314 mark_buffer_dirty(bh);
315 ++j;
316 bh = bh->b_this_page;
317 } while (bh != head);
318
319 if (likely(cur_index != index))
320 ufs_put_locked_page(page);
321 }
322 UFSD("EXIT\n");
323 }
324
ufs_clear_frags(struct inode * inode,sector_t beg,unsigned int n,int sync)325 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
326 int sync)
327 {
328 struct buffer_head *bh;
329 sector_t end = beg + n;
330
331 for (; beg < end; ++beg) {
332 bh = sb_getblk(inode->i_sb, beg);
333 lock_buffer(bh);
334 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
335 set_buffer_uptodate(bh);
336 mark_buffer_dirty(bh);
337 unlock_buffer(bh);
338 if (IS_SYNC(inode) || sync)
339 sync_dirty_buffer(bh);
340 brelse(bh);
341 }
342 }
343
ufs_new_fragments(struct inode * inode,void * p,u64 fragment,u64 goal,unsigned count,int * err,struct page * locked_page)344 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
345 u64 goal, unsigned count, int *err,
346 struct page *locked_page)
347 {
348 struct super_block * sb;
349 struct ufs_sb_private_info * uspi;
350 struct ufs_super_block_first * usb1;
351 unsigned cgno, oldcount, newcount;
352 u64 tmp, request, result;
353
354 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
355 inode->i_ino, (unsigned long long)fragment,
356 (unsigned long long)goal, count);
357
358 sb = inode->i_sb;
359 uspi = UFS_SB(sb)->s_uspi;
360 usb1 = ubh_get_usb_first(uspi);
361 *err = -ENOSPC;
362
363 mutex_lock(&UFS_SB(sb)->s_lock);
364 tmp = ufs_data_ptr_to_cpu(sb, p);
365
366 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
367 ufs_warning(sb, "ufs_new_fragments", "internal warning"
368 " fragment %llu, count %u",
369 (unsigned long long)fragment, count);
370 count = uspi->s_fpb - ufs_fragnum(fragment);
371 }
372 oldcount = ufs_fragnum (fragment);
373 newcount = oldcount + count;
374
375 /*
376 * Somebody else has just allocated our fragments
377 */
378 if (oldcount) {
379 if (!tmp) {
380 ufs_error(sb, "ufs_new_fragments", "internal error, "
381 "fragment %llu, tmp %llu\n",
382 (unsigned long long)fragment,
383 (unsigned long long)tmp);
384 mutex_unlock(&UFS_SB(sb)->s_lock);
385 return INVBLOCK;
386 }
387 if (fragment < UFS_I(inode)->i_lastfrag) {
388 UFSD("EXIT (ALREADY ALLOCATED)\n");
389 mutex_unlock(&UFS_SB(sb)->s_lock);
390 return 0;
391 }
392 }
393 else {
394 if (tmp) {
395 UFSD("EXIT (ALREADY ALLOCATED)\n");
396 mutex_unlock(&UFS_SB(sb)->s_lock);
397 return 0;
398 }
399 }
400
401 /*
402 * There is not enough space for user on the device
403 */
404 if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
405 if (!capable(CAP_SYS_RESOURCE)) {
406 mutex_unlock(&UFS_SB(sb)->s_lock);
407 UFSD("EXIT (FAILED)\n");
408 return 0;
409 }
410 }
411
412 if (goal >= uspi->s_size)
413 goal = 0;
414 if (goal == 0)
415 cgno = ufs_inotocg (inode->i_ino);
416 else
417 cgno = ufs_dtog(uspi, goal);
418
419 /*
420 * allocate new fragment
421 */
422 if (oldcount == 0) {
423 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
424 if (result) {
425 ufs_clear_frags(inode, result + oldcount,
426 newcount - oldcount, locked_page != NULL);
427 *err = 0;
428 write_seqlock(&UFS_I(inode)->meta_lock);
429 ufs_cpu_to_data_ptr(sb, p, result);
430 UFS_I(inode)->i_lastfrag =
431 max(UFS_I(inode)->i_lastfrag, fragment + count);
432 write_sequnlock(&UFS_I(inode)->meta_lock);
433 }
434 mutex_unlock(&UFS_SB(sb)->s_lock);
435 UFSD("EXIT, result %llu\n", (unsigned long long)result);
436 return result;
437 }
438
439 /*
440 * resize block
441 */
442 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
443 if (result) {
444 *err = 0;
445 read_seqlock_excl(&UFS_I(inode)->meta_lock);
446 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
447 fragment + count);
448 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
449 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
450 locked_page != NULL);
451 mutex_unlock(&UFS_SB(sb)->s_lock);
452 UFSD("EXIT, result %llu\n", (unsigned long long)result);
453 return result;
454 }
455
456 /*
457 * allocate new block and move data
458 */
459 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
460 request = newcount;
461 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
462 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
463 } else {
464 request = uspi->s_fpb;
465 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
466 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
467 }
468 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
469 if (result) {
470 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
471 locked_page != NULL);
472 mutex_unlock(&UFS_SB(sb)->s_lock);
473 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
474 uspi->s_sbbase + tmp,
475 uspi->s_sbbase + result, locked_page);
476 *err = 0;
477 write_seqlock(&UFS_I(inode)->meta_lock);
478 ufs_cpu_to_data_ptr(sb, p, result);
479 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
480 fragment + count);
481 write_sequnlock(&UFS_I(inode)->meta_lock);
482 if (newcount < request)
483 ufs_free_fragments (inode, result + newcount, request - newcount);
484 ufs_free_fragments (inode, tmp, oldcount);
485 UFSD("EXIT, result %llu\n", (unsigned long long)result);
486 return result;
487 }
488
489 mutex_unlock(&UFS_SB(sb)->s_lock);
490 UFSD("EXIT (FAILED)\n");
491 return 0;
492 }
493
try_add_frags(struct inode * inode,unsigned frags)494 static bool try_add_frags(struct inode *inode, unsigned frags)
495 {
496 unsigned size = frags * i_blocksize(inode);
497 spin_lock(&inode->i_lock);
498 __inode_add_bytes(inode, size);
499 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
500 __inode_sub_bytes(inode, size);
501 spin_unlock(&inode->i_lock);
502 return false;
503 }
504 spin_unlock(&inode->i_lock);
505 return true;
506 }
507
ufs_add_fragments(struct inode * inode,u64 fragment,unsigned oldcount,unsigned newcount)508 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
509 unsigned oldcount, unsigned newcount)
510 {
511 struct super_block * sb;
512 struct ufs_sb_private_info * uspi;
513 struct ufs_cg_private_info * ucpi;
514 struct ufs_cylinder_group * ucg;
515 unsigned cgno, fragno, fragoff, count, fragsize, i;
516
517 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
518 (unsigned long long)fragment, oldcount, newcount);
519
520 sb = inode->i_sb;
521 uspi = UFS_SB(sb)->s_uspi;
522 count = newcount - oldcount;
523
524 cgno = ufs_dtog(uspi, fragment);
525 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
526 return 0;
527 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
528 return 0;
529 ucpi = ufs_load_cylinder (sb, cgno);
530 if (!ucpi)
531 return 0;
532 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
533 if (!ufs_cg_chkmagic(sb, ucg)) {
534 ufs_panic (sb, "ufs_add_fragments",
535 "internal error, bad magic number on cg %u", cgno);
536 return 0;
537 }
538
539 fragno = ufs_dtogd(uspi, fragment);
540 fragoff = ufs_fragnum (fragno);
541 for (i = oldcount; i < newcount; i++)
542 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
543 return 0;
544
545 if (!try_add_frags(inode, count))
546 return 0;
547 /*
548 * Block can be extended
549 */
550 ucg->cg_time = ufs_get_seconds(sb);
551 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
552 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
553 break;
554 fragsize = i - oldcount;
555 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
556 ufs_panic (sb, "ufs_add_fragments",
557 "internal error or corrupted bitmap on cg %u", cgno);
558 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
559 if (fragsize != count)
560 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
561 for (i = oldcount; i < newcount; i++)
562 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
563
564 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
565 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
566 uspi->cs_total.cs_nffree -= count;
567
568 ubh_mark_buffer_dirty (USPI_UBH(uspi));
569 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
570 if (sb->s_flags & SB_SYNCHRONOUS)
571 ubh_sync_block(UCPI_UBH(ucpi));
572 ufs_mark_sb_dirty(sb);
573
574 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
575
576 return fragment;
577 }
578
579 #define UFS_TEST_FREE_SPACE_CG \
580 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
581 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
582 goto cg_found; \
583 for (k = count; k < uspi->s_fpb; k++) \
584 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
585 goto cg_found;
586
ufs_alloc_fragments(struct inode * inode,unsigned cgno,u64 goal,unsigned count,int * err)587 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
588 u64 goal, unsigned count, int *err)
589 {
590 struct super_block * sb;
591 struct ufs_sb_private_info * uspi;
592 struct ufs_cg_private_info * ucpi;
593 struct ufs_cylinder_group * ucg;
594 unsigned oldcg, i, j, k, allocsize;
595 u64 result;
596
597 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
598 inode->i_ino, cgno, (unsigned long long)goal, count);
599
600 sb = inode->i_sb;
601 uspi = UFS_SB(sb)->s_uspi;
602 oldcg = cgno;
603
604 /*
605 * 1. searching on preferred cylinder group
606 */
607 UFS_TEST_FREE_SPACE_CG
608
609 /*
610 * 2. quadratic rehash
611 */
612 for (j = 1; j < uspi->s_ncg; j *= 2) {
613 cgno += j;
614 if (cgno >= uspi->s_ncg)
615 cgno -= uspi->s_ncg;
616 UFS_TEST_FREE_SPACE_CG
617 }
618
619 /*
620 * 3. brute force search
621 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
622 */
623 cgno = (oldcg + 1) % uspi->s_ncg;
624 for (j = 2; j < uspi->s_ncg; j++) {
625 cgno++;
626 if (cgno >= uspi->s_ncg)
627 cgno = 0;
628 UFS_TEST_FREE_SPACE_CG
629 }
630
631 UFSD("EXIT (FAILED)\n");
632 return 0;
633
634 cg_found:
635 ucpi = ufs_load_cylinder (sb, cgno);
636 if (!ucpi)
637 return 0;
638 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
639 if (!ufs_cg_chkmagic(sb, ucg))
640 ufs_panic (sb, "ufs_alloc_fragments",
641 "internal error, bad magic number on cg %u", cgno);
642 ucg->cg_time = ufs_get_seconds(sb);
643
644 if (count == uspi->s_fpb) {
645 result = ufs_alloccg_block (inode, ucpi, goal, err);
646 if (result == INVBLOCK)
647 return 0;
648 goto succed;
649 }
650
651 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
652 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
653 break;
654
655 if (allocsize == uspi->s_fpb) {
656 result = ufs_alloccg_block (inode, ucpi, goal, err);
657 if (result == INVBLOCK)
658 return 0;
659 goal = ufs_dtogd(uspi, result);
660 for (i = count; i < uspi->s_fpb; i++)
661 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
662 i = uspi->s_fpb - count;
663
664 inode_sub_bytes(inode, i << uspi->s_fshift);
665 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
666 uspi->cs_total.cs_nffree += i;
667 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
668 fs32_add(sb, &ucg->cg_frsum[i], 1);
669 goto succed;
670 }
671
672 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
673 if (result == INVBLOCK)
674 return 0;
675 if (!try_add_frags(inode, count))
676 return 0;
677 for (i = 0; i < count; i++)
678 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
679
680 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
681 uspi->cs_total.cs_nffree -= count;
682 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
683 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
684
685 if (count != allocsize)
686 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
687
688 succed:
689 ubh_mark_buffer_dirty (USPI_UBH(uspi));
690 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
691 if (sb->s_flags & SB_SYNCHRONOUS)
692 ubh_sync_block(UCPI_UBH(ucpi));
693 ufs_mark_sb_dirty(sb);
694
695 result += cgno * uspi->s_fpg;
696 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
697 return result;
698 }
699
ufs_alloccg_block(struct inode * inode,struct ufs_cg_private_info * ucpi,u64 goal,int * err)700 static u64 ufs_alloccg_block(struct inode *inode,
701 struct ufs_cg_private_info *ucpi,
702 u64 goal, int *err)
703 {
704 struct super_block * sb;
705 struct ufs_sb_private_info * uspi;
706 struct ufs_cylinder_group * ucg;
707 u64 result, blkno;
708
709 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
710
711 sb = inode->i_sb;
712 uspi = UFS_SB(sb)->s_uspi;
713 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
714
715 if (goal == 0) {
716 goal = ucpi->c_rotor;
717 goto norot;
718 }
719 goal = ufs_blknum (goal);
720 goal = ufs_dtogd(uspi, goal);
721
722 /*
723 * If the requested block is available, use it.
724 */
725 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
726 result = goal;
727 goto gotit;
728 }
729
730 norot:
731 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
732 if (result == INVBLOCK)
733 return INVBLOCK;
734 ucpi->c_rotor = result;
735 gotit:
736 if (!try_add_frags(inode, uspi->s_fpb))
737 return 0;
738 blkno = ufs_fragstoblks(result);
739 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
740 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
741 ufs_clusteracct (sb, ucpi, blkno, -1);
742
743 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
744 uspi->cs_total.cs_nbfree--;
745 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
746
747 if (uspi->fs_magic != UFS2_MAGIC) {
748 unsigned cylno = ufs_cbtocylno((unsigned)result);
749
750 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
751 ufs_cbtorpos((unsigned)result)), 1);
752 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
753 }
754
755 UFSD("EXIT, result %llu\n", (unsigned long long)result);
756
757 return result;
758 }
759
ubh_scanc(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned size,unsigned char * table,unsigned char mask)760 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
761 struct ufs_buffer_head *ubh,
762 unsigned begin, unsigned size,
763 unsigned char *table, unsigned char mask)
764 {
765 unsigned rest, offset;
766 unsigned char *cp;
767
768
769 offset = begin & ~uspi->s_fmask;
770 begin >>= uspi->s_fshift;
771 for (;;) {
772 if ((offset + size) < uspi->s_fsize)
773 rest = size;
774 else
775 rest = uspi->s_fsize - offset;
776 size -= rest;
777 cp = ubh->bh[begin]->b_data + offset;
778 while ((table[*cp++] & mask) == 0 && --rest)
779 ;
780 if (rest || !size)
781 break;
782 begin++;
783 offset = 0;
784 }
785 return (size + rest);
786 }
787
788 /*
789 * Find a block of the specified size in the specified cylinder group.
790 * @sp: pointer to super block
791 * @ucpi: pointer to cylinder group info
792 * @goal: near which block we want find new one
793 * @count: specified size
794 */
ufs_bitmap_search(struct super_block * sb,struct ufs_cg_private_info * ucpi,u64 goal,unsigned count)795 static u64 ufs_bitmap_search(struct super_block *sb,
796 struct ufs_cg_private_info *ucpi,
797 u64 goal, unsigned count)
798 {
799 /*
800 * Bit patterns for identifying fragments in the block map
801 * used as ((map & mask_arr) == want_arr)
802 */
803 static const int mask_arr[9] = {
804 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
805 };
806 static const int want_arr[9] = {
807 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
808 };
809 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
810 unsigned start, length, loc;
811 unsigned pos, want, blockmap, mask, end;
812 u64 result;
813
814 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
815 (unsigned long long)goal, count);
816
817 if (goal)
818 start = ufs_dtogd(uspi, goal) >> 3;
819 else
820 start = ucpi->c_frotor >> 3;
821
822 length = ((uspi->s_fpg + 7) >> 3) - start;
823 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
824 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
825 1 << (count - 1 + (uspi->s_fpb & 7)));
826 if (loc == 0) {
827 length = start + 1;
828 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
829 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
830 ufs_fragtable_other,
831 1 << (count - 1 + (uspi->s_fpb & 7)));
832 if (loc == 0) {
833 ufs_error(sb, "ufs_bitmap_search",
834 "bitmap corrupted on cg %u, start %u,"
835 " length %u, count %u, freeoff %u\n",
836 ucpi->c_cgx, start, length, count,
837 ucpi->c_freeoff);
838 return INVBLOCK;
839 }
840 start = 0;
841 }
842 result = (start + length - loc) << 3;
843 ucpi->c_frotor = result;
844
845 /*
846 * found the byte in the map
847 */
848
849 for (end = result + 8; result < end; result += uspi->s_fpb) {
850 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
851 blockmap <<= 1;
852 mask = mask_arr[count];
853 want = want_arr[count];
854 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
855 if ((blockmap & mask) == want) {
856 UFSD("EXIT, result %llu\n",
857 (unsigned long long)result);
858 return result + pos;
859 }
860 mask <<= 1;
861 want <<= 1;
862 }
863 }
864
865 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
866 ucpi->c_cgx);
867 UFSD("EXIT (FAILED)\n");
868 return INVBLOCK;
869 }
870
ufs_clusteracct(struct super_block * sb,struct ufs_cg_private_info * ucpi,unsigned blkno,int cnt)871 static void ufs_clusteracct(struct super_block * sb,
872 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
873 {
874 struct ufs_sb_private_info * uspi;
875 int i, start, end, forw, back;
876
877 uspi = UFS_SB(sb)->s_uspi;
878 if (uspi->s_contigsumsize <= 0)
879 return;
880
881 if (cnt > 0)
882 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
883 else
884 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
885
886 /*
887 * Find the size of the cluster going forward.
888 */
889 start = blkno + 1;
890 end = start + uspi->s_contigsumsize;
891 if ( end >= ucpi->c_nclusterblks)
892 end = ucpi->c_nclusterblks;
893 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
894 if (i > end)
895 i = end;
896 forw = i - start;
897
898 /*
899 * Find the size of the cluster going backward.
900 */
901 start = blkno - 1;
902 end = start - uspi->s_contigsumsize;
903 if (end < 0 )
904 end = -1;
905 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
906 if ( i < end)
907 i = end;
908 back = start - i;
909
910 /*
911 * Account for old cluster and the possibly new forward and
912 * back clusters.
913 */
914 i = back + forw + 1;
915 if (i > uspi->s_contigsumsize)
916 i = uspi->s_contigsumsize;
917 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
918 if (back > 0)
919 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
920 if (forw > 0)
921 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
922 }
923
924
925 static unsigned char ufs_fragtable_8fpb[] = {
926 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
927 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
928 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
929 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
930 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
931 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
932 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
933 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
934 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
935 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
936 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
937 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
938 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
939 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
940 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
941 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
942 };
943
944 static unsigned char ufs_fragtable_other[] = {
945 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
946 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
948 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
949 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
951 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
952 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
953 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
954 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
956 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
957 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
958 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
959 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
960 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
961 };
962