xref: /original-bsd/sys/ufs/lfs/lfs_alloc.c (revision c43e4352)
1 /*	lfs_alloc.c	6.1	83/07/29	*/
2 
3 #include "../h/param.h"
4 #include "../h/systm.h"
5 #include "../h/mount.h"
6 #include "../h/fs.h"
7 #include "../h/conf.h"
8 #include "../h/buf.h"
9 #include "../h/inode.h"
10 #include "../h/dir.h"
11 #include "../h/user.h"
12 #include "../h/quota.h"
13 #include "../h/kernel.h"
14 
15 extern u_long		hashalloc();
16 extern ino_t		ialloccg();
17 extern daddr_t		alloccg();
18 extern daddr_t		alloccgblk();
19 extern daddr_t		fragextend();
20 extern daddr_t		blkpref();
21 extern daddr_t		mapsearch();
22 extern int		inside[], around[];
23 extern unsigned char	*fragtbl[];
24 
25 /*
26  * Allocate a block in the file system.
27  *
28  * The size of the requested block is given, which must be some
29  * multiple of fs_fsize and <= fs_bsize.
30  * A preference may be optionally specified. If a preference is given
31  * the following hierarchy is used to allocate a block:
32  *   1) allocate the requested block.
33  *   2) allocate a rotationally optimal block in the same cylinder.
34  *   3) allocate a block in the same cylinder group.
35  *   4) quadradically rehash into other cylinder groups, until an
36  *      available block is located.
37  * If no block preference is given the following heirarchy is used
38  * to allocate a block:
39  *   1) allocate a block in the cylinder group that contains the
40  *      inode for the file.
41  *   2) quadradically rehash into other cylinder groups, until an
42  *      available block is located.
43  */
44 struct buf *
45 alloc(ip, bpref, size)
46 	register struct inode *ip;
47 	daddr_t bpref;
48 	int size;
49 {
50 	daddr_t bno;
51 	register struct fs *fs;
52 	register struct buf *bp;
53 	int cg;
54 
55 	fs = ip->i_fs;
56 	if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
57 		printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
58 		    ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
59 		panic("alloc: bad size");
60 	}
61 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
62 		goto nospace;
63 	if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
64 		goto nospace;
65 #ifdef QUOTA
66 	u.u_error = chkdq(ip, (long)btodb(size), 0);
67 	if (u.u_error)
68 		return (NULL);
69 #endif
70 	if (bpref >= fs->fs_size)
71 		bpref = 0;
72 	if (bpref == 0)
73 		cg = itog(fs, ip->i_number);
74 	else
75 		cg = dtog(fs, bpref);
76 	bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
77 		(u_long (*)())alloccg);
78 	if (bno <= 0)
79 		goto nospace;
80 	ip->i_blocks += btodb(size);
81 	ip->i_flag |= IUPD|ICHG;
82 	bp = getblk(ip->i_dev, fsbtodb(fs, bno), size);
83 	clrbuf(bp);
84 	return (bp);
85 nospace:
86 	fserr(fs, "file system full");
87 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
88 	u.u_error = ENOSPC;
89 	return (NULL);
90 }
91 
92 /*
93  * Reallocate a fragment to a bigger size
94  *
95  * The number and size of the old block is given, and a preference
96  * and new size is also specified. The allocator attempts to extend
97  * the original block. Failing that, the regular block allocator is
98  * invoked to get an appropriate block.
99  */
100 struct buf *
101 realloccg(ip, bprev, bpref, osize, nsize)
102 	register struct inode *ip;
103 	daddr_t bprev, bpref;
104 	int osize, nsize;
105 {
106 	daddr_t bno;
107 	register struct fs *fs;
108 	register struct buf *bp, *obp;
109 	int cg;
110 
111 	fs = ip->i_fs;
112 	if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
113 	    (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
114 		printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
115 		    ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
116 		panic("realloccg: bad size");
117 	}
118 	if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
119 		goto nospace;
120 	if (bprev == 0) {
121 		printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
122 		    ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
123 		panic("realloccg: bad bprev");
124 	}
125 #ifdef QUOTA
126 	u.u_error = chkdq(ip, (long)btodb(nsize - osize), 0);
127 	if (u.u_error)
128 		return (NULL);
129 #endif
130 	cg = dtog(fs, bprev);
131 	bno = fragextend(ip, cg, (long)bprev, osize, nsize);
132 	if (bno != 0) {
133 		do {
134 			bp = bread(ip->i_dev, fsbtodb(fs, bno), osize);
135 			if (bp->b_flags & B_ERROR) {
136 				brelse(bp);
137 				return (NULL);
138 			}
139 		} while (brealloc(bp, nsize) == 0);
140 		bp->b_flags |= B_DONE;
141 		bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
142 		ip->i_blocks += btodb(nsize - osize);
143 		ip->i_flag |= IUPD|ICHG;
144 		return (bp);
145 	}
146 	if (bpref >= fs->fs_size)
147 		bpref = 0;
148 	bno = (daddr_t)hashalloc(ip, cg, (long)bpref, nsize,
149 		(u_long (*)())alloccg);
150 	if (bno > 0) {
151 		obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize);
152 		if (obp->b_flags & B_ERROR) {
153 			brelse(obp);
154 			return (NULL);
155 		}
156 		bp = getblk(ip->i_dev, fsbtodb(fs, bno), nsize);
157 		bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize);
158 		bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
159 		brelse(obp);
160 		free(ip, bprev, (off_t)osize);
161 		ip->i_blocks += btodb(nsize - osize);
162 		ip->i_flag |= IUPD|ICHG;
163 		return (bp);
164 	}
165 nospace:
166 	/*
167 	 * no space available
168 	 */
169 	fserr(fs, "file system full");
170 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
171 	u.u_error = ENOSPC;
172 	return (NULL);
173 }
174 
175 /*
176  * Allocate an inode in the file system.
177  *
178  * A preference may be optionally specified. If a preference is given
179  * the following hierarchy is used to allocate an inode:
180  *   1) allocate the requested inode.
181  *   2) allocate an inode in the same cylinder group.
182  *   3) quadradically rehash into other cylinder groups, until an
183  *      available inode is located.
184  * If no inode preference is given the following heirarchy is used
185  * to allocate an inode:
186  *   1) allocate an inode in cylinder group 0.
187  *   2) quadradically rehash into other cylinder groups, until an
188  *      available inode is located.
189  */
190 struct inode *
191 ialloc(pip, ipref, mode)
192 	register struct inode *pip;
193 	ino_t ipref;
194 	int mode;
195 {
196 	ino_t ino;
197 	register struct fs *fs;
198 	register struct inode *ip;
199 	int cg;
200 
201 	fs = pip->i_fs;
202 	if (fs->fs_cstotal.cs_nifree == 0)
203 		goto noinodes;
204 #ifdef QUOTA
205 	u.u_error = chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0);
206 	if (u.u_error)
207 		return (NULL);
208 #endif
209 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
210 		ipref = 0;
211 	cg = itog(fs, ipref);
212 	ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
213 	if (ino == 0)
214 		goto noinodes;
215 	ip = iget(pip->i_dev, pip->i_fs, ino);
216 	if (ip == NULL) {
217 		ifree(ip, ino, 0);
218 		return (NULL);
219 	}
220 	if (ip->i_mode) {
221 		printf("mode = 0%o, inum = %d, fs = %s\n",
222 		    ip->i_mode, ip->i_number, fs->fs_fsmnt);
223 		panic("ialloc: dup alloc");
224 	}
225 	if (ip->i_blocks) {				/* XXX */
226 		printf("free inode %s/%d had %d blocks\n",
227 		    fs->fs_fsmnt, ino, ip->i_blocks);
228 		ip->i_blocks = 0;
229 	}
230 	return (ip);
231 noinodes:
232 	fserr(fs, "out of inodes");
233 	uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
234 	u.u_error = ENOSPC;
235 	return (NULL);
236 }
237 
238 /*
239  * Find a cylinder to place a directory.
240  *
241  * The policy implemented by this algorithm is to select from
242  * among those cylinder groups with above the average number of
243  * free inodes, the one with the smallest number of directories.
244  */
245 ino_t
246 dirpref(fs)
247 	register struct fs *fs;
248 {
249 	int cg, minndir, mincg, avgifree;
250 
251 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
252 	minndir = fs->fs_ipg;
253 	mincg = 0;
254 	for (cg = 0; cg < fs->fs_ncg; cg++)
255 		if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
256 		    fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
257 			mincg = cg;
258 			minndir = fs->fs_cs(fs, cg).cs_ndir;
259 		}
260 	return ((ino_t)(fs->fs_ipg * mincg));
261 }
262 
263 /*
264  * Select the desired position for the next block in a file.  The file is
265  * logically divided into sections. The first section is composed of the
266  * direct blocks. Each additional section contains fs_maxbpg blocks.
267  *
268  * If no blocks have been allocated in the first section, the policy is to
269  * request a block in the same cylinder group as the inode that describes
270  * the file. If no blocks have been allocated in any other section, the
271  * policy is to place the section in a cylinder group with a greater than
272  * average number of free blocks.  An appropriate cylinder group is found
273  * by maintaining a rotor that sweeps the cylinder groups. When a new
274  * group of blocks is needed, the rotor is advanced until a cylinder group
275  * with greater than the average number of free blocks is found.
276  *
277  * If a section is already partially allocated, the policy is to
278  * contiguously allocate fs_maxcontig blocks.  The end of one of these
279  * contiguous blocks and the beginning of the next is physically separated
280  * so that the disk head will be in transit between them for at least
281  * fs_rotdelay milliseconds.  This is to allow time for the processor to
282  * schedule another I/O transfer.
283  */
284 daddr_t
285 blkpref(ip, lbn, indx, bap)
286 	struct inode *ip;
287 	daddr_t lbn;
288 	int indx;
289 	daddr_t *bap;
290 {
291 	register struct fs *fs;
292 	int cg, avgbfree;
293 	daddr_t nextblk;
294 
295 	fs = ip->i_fs;
296 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
297 		if (lbn < NDADDR) {
298 			cg = itog(fs, ip->i_number);
299 			return (fs->fs_fpg * cg + fs->fs_frag);
300 		}
301 		/*
302 		 * Find a cylinder with greater than average number of
303 		 * unused data blocks.
304 		 */
305 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
306 		for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++)
307 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
308 				fs->fs_cgrotor = cg;
309 				return (fs->fs_fpg * cg + fs->fs_frag);
310 			}
311 		for (cg = 0; cg <= fs->fs_cgrotor; cg++)
312 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
313 				fs->fs_cgrotor = cg;
314 				return (fs->fs_fpg * cg + fs->fs_frag);
315 			}
316 		return (NULL);
317 	}
318 	/*
319 	 * One or more previous blocks have been laid out. If less
320 	 * than fs_maxcontig previous blocks are contiguous, the
321 	 * next block is requested contiguously, otherwise it is
322 	 * requested rotationally delayed by fs_rotdelay milliseconds.
323 	 */
324 	nextblk = bap[indx - 1] + fs->fs_frag;
325 	if (indx > fs->fs_maxcontig &&
326 	    bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
327 	    != nextblk)
328 		return (nextblk);
329 	if (fs->fs_rotdelay != 0)
330 		/*
331 		 * Here we convert ms of delay to frags as:
332 		 * (frags) = (ms) * (rev/sec) * (sect/rev) /
333 		 *	((sect/frag) * (ms/sec))
334 		 * then round up to the next block.
335 		 */
336 		nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
337 		    (NSPF(fs) * 1000), fs->fs_frag);
338 	return (nextblk);
339 }
340 
341 /*
342  * Implement the cylinder overflow algorithm.
343  *
344  * The policy implemented by this algorithm is:
345  *   1) allocate the block in its requested cylinder group.
346  *   2) quadradically rehash on the cylinder group number.
347  *   3) brute force search for a free block.
348  */
349 /*VARARGS5*/
350 u_long
351 hashalloc(ip, cg, pref, size, allocator)
352 	struct inode *ip;
353 	int cg;
354 	long pref;
355 	int size;	/* size for data blocks, mode for inodes */
356 	u_long (*allocator)();
357 {
358 	register struct fs *fs;
359 	long result;
360 	int i, icg = cg;
361 
362 	fs = ip->i_fs;
363 	/*
364 	 * 1: preferred cylinder group
365 	 */
366 	result = (*allocator)(ip, cg, pref, size);
367 	if (result)
368 		return (result);
369 	/*
370 	 * 2: quadratic rehash
371 	 */
372 	for (i = 1; i < fs->fs_ncg; i *= 2) {
373 		cg += i;
374 		if (cg >= fs->fs_ncg)
375 			cg -= fs->fs_ncg;
376 		result = (*allocator)(ip, cg, 0, size);
377 		if (result)
378 			return (result);
379 	}
380 	/*
381 	 * 3: brute force search
382 	 * Note that we start at i == 2, since 0 was checked initially,
383 	 * and 1 is always checked in the quadratic rehash.
384 	 */
385 	cg = (icg + 2) % fs->fs_ncg;
386 	for (i = 2; i < fs->fs_ncg; i++) {
387 		result = (*allocator)(ip, cg, 0, size);
388 		if (result)
389 			return (result);
390 		cg++;
391 		if (cg == fs->fs_ncg)
392 			cg = 0;
393 	}
394 	return (NULL);
395 }
396 
397 /*
398  * Determine whether a fragment can be extended.
399  *
400  * Check to see if the necessary fragments are available, and
401  * if they are, allocate them.
402  */
403 daddr_t
404 fragextend(ip, cg, bprev, osize, nsize)
405 	struct inode *ip;
406 	int cg;
407 	long bprev;
408 	int osize, nsize;
409 {
410 	register struct fs *fs;
411 	register struct buf *bp;
412 	register struct cg *cgp;
413 	long bno;
414 	int frags, bbase;
415 	int i;
416 
417 	fs = ip->i_fs;
418 	if (fs->fs_cs(fs, cg).cs_nffree < nsize - osize)
419 		return (NULL);
420 	frags = numfrags(fs, nsize);
421 	bbase = fragoff(fs, bprev);
422 	if (bbase > (bprev + frags - 1) % fs->fs_frag) {
423 		/* cannot extend across a block boundry */
424 		return (NULL);
425 	}
426 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
427 	cgp = bp->b_un.b_cg;
428 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
429 		brelse(bp);
430 		return (NULL);
431 	}
432 	cgp->cg_time = time.tv_sec;
433 	bno = dtogd(fs, bprev);
434 	for (i = numfrags(fs, osize); i < frags; i++)
435 		if (isclr(cgp->cg_free, bno + i)) {
436 			brelse(bp);
437 			return (NULL);
438 		}
439 	/*
440 	 * the current fragment can be extended
441 	 * deduct the count on fragment being extended into
442 	 * increase the count on the remaining fragment (if any)
443 	 * allocate the extended piece
444 	 */
445 	for (i = frags; i < fs->fs_frag - bbase; i++)
446 		if (isclr(cgp->cg_free, bno + i))
447 			break;
448 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
449 	if (i != frags)
450 		cgp->cg_frsum[i - frags]++;
451 	for (i = numfrags(fs, osize); i < frags; i++) {
452 		clrbit(cgp->cg_free, bno + i);
453 		cgp->cg_cs.cs_nffree--;
454 		fs->fs_cstotal.cs_nffree--;
455 		fs->fs_cs(fs, cg).cs_nffree--;
456 	}
457 	fs->fs_fmod++;
458 	bdwrite(bp);
459 	return (bprev);
460 }
461 
462 /*
463  * Determine whether a block can be allocated.
464  *
465  * Check to see if a block of the apprpriate size is available,
466  * and if it is, allocate it.
467  */
468 daddr_t
469 alloccg(ip, cg, bpref, size)
470 	struct inode *ip;
471 	int cg;
472 	daddr_t bpref;
473 	int size;
474 {
475 	register struct fs *fs;
476 	register struct buf *bp;
477 	register struct cg *cgp;
478 	int bno, frags;
479 	int allocsiz;
480 	register int i;
481 
482 	fs = ip->i_fs;
483 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
484 		return (NULL);
485 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
486 	cgp = bp->b_un.b_cg;
487 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
488 		brelse(bp);
489 		return (NULL);
490 	}
491 	if (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)
492 		return (NULL);
493 	cgp->cg_time = time.tv_sec;
494 	if (size == fs->fs_bsize) {
495 		bno = alloccgblk(fs, cgp, bpref);
496 		bdwrite(bp);
497 		return (bno);
498 	}
499 	/*
500 	 * check to see if any fragments are already available
501 	 * allocsiz is the size which will be allocated, hacking
502 	 * it down to a smaller size if necessary
503 	 */
504 	frags = numfrags(fs, size);
505 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
506 		if (cgp->cg_frsum[allocsiz] != 0)
507 			break;
508 	if (allocsiz == fs->fs_frag) {
509 		/*
510 		 * no fragments were available, so a block will be
511 		 * allocated, and hacked up
512 		 */
513 		if (cgp->cg_cs.cs_nbfree == 0) {
514 			brelse(bp);
515 			return (NULL);
516 		}
517 		bno = alloccgblk(fs, cgp, bpref);
518 		bpref = dtogd(fs, bno);
519 		for (i = frags; i < fs->fs_frag; i++)
520 			setbit(cgp->cg_free, bpref + i);
521 		i = fs->fs_frag - frags;
522 		cgp->cg_cs.cs_nffree += i;
523 		fs->fs_cstotal.cs_nffree += i;
524 		fs->fs_cs(fs, cg).cs_nffree += i;
525 		fs->fs_fmod++;
526 		cgp->cg_frsum[i]++;
527 		bdwrite(bp);
528 		return (bno);
529 	}
530 	bno = mapsearch(fs, cgp, bpref, allocsiz);
531 	if (bno < 0)
532 		return (NULL);
533 	for (i = 0; i < frags; i++)
534 		clrbit(cgp->cg_free, bno + i);
535 	cgp->cg_cs.cs_nffree -= frags;
536 	fs->fs_cstotal.cs_nffree -= frags;
537 	fs->fs_cs(fs, cg).cs_nffree -= frags;
538 	fs->fs_fmod++;
539 	cgp->cg_frsum[allocsiz]--;
540 	if (frags != allocsiz)
541 		cgp->cg_frsum[allocsiz - frags]++;
542 	bdwrite(bp);
543 	return (cg * fs->fs_fpg + bno);
544 }
545 
546 /*
547  * Allocate a block in a cylinder group.
548  *
549  * This algorithm implements the following policy:
550  *   1) allocate the requested block.
551  *   2) allocate a rotationally optimal block in the same cylinder.
552  *   3) allocate the next available block on the block rotor for the
553  *      specified cylinder group.
554  * Note that this routine only allocates fs_bsize blocks; these
555  * blocks may be fragmented by the routine that allocates them.
556  */
557 daddr_t
558 alloccgblk(fs, cgp, bpref)
559 	register struct fs *fs;
560 	register struct cg *cgp;
561 	daddr_t bpref;
562 {
563 	daddr_t bno;
564 	int cylno, pos, delta;
565 	short *cylbp;
566 	register int i;
567 
568 	if (bpref == 0) {
569 		bpref = cgp->cg_rotor;
570 		goto norot;
571 	}
572 	bpref &= ~(fs->fs_frag - 1);
573 	bpref = dtogd(fs, bpref);
574 	/*
575 	 * if the requested block is available, use it
576 	 */
577 	if (isblock(fs, cgp->cg_free, fragstoblks(fs, bpref))) {
578 		bno = bpref;
579 		goto gotit;
580 	}
581 	/*
582 	 * check for a block available on the same cylinder
583 	 */
584 	cylno = cbtocylno(fs, bpref);
585 	if (cgp->cg_btot[cylno] == 0)
586 		goto norot;
587 	if (fs->fs_cpc == 0) {
588 		/*
589 		 * block layout info is not available, so just have
590 		 * to take any block in this cylinder.
591 		 */
592 		bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
593 		goto norot;
594 	}
595 	/*
596 	 * check the summary information to see if a block is
597 	 * available in the requested cylinder starting at the
598 	 * requested rotational position and proceeding around.
599 	 */
600 	cylbp = cgp->cg_b[cylno];
601 	pos = cbtorpos(fs, bpref);
602 	for (i = pos; i < NRPOS; i++)
603 		if (cylbp[i] > 0)
604 			break;
605 	if (i == NRPOS)
606 		for (i = 0; i < pos; i++)
607 			if (cylbp[i] > 0)
608 				break;
609 	if (cylbp[i] > 0) {
610 		/*
611 		 * found a rotational position, now find the actual
612 		 * block. A panic if none is actually there.
613 		 */
614 		pos = cylno % fs->fs_cpc;
615 		bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
616 		if (fs->fs_postbl[pos][i] == -1) {
617 			printf("pos = %d, i = %d, fs = %s\n",
618 			    pos, i, fs->fs_fsmnt);
619 			panic("alloccgblk: cyl groups corrupted");
620 		}
621 		for (i = fs->fs_postbl[pos][i];; ) {
622 			if (isblock(fs, cgp->cg_free, bno + i)) {
623 				bno = blkstofrags(fs, (bno + i));
624 				goto gotit;
625 			}
626 			delta = fs->fs_rotbl[i];
627 			if (delta <= 0 || delta > MAXBPC - i)
628 				break;
629 			i += delta;
630 		}
631 		printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
632 		panic("alloccgblk: can't find blk in cyl");
633 	}
634 norot:
635 	/*
636 	 * no blocks in the requested cylinder, so take next
637 	 * available one in this cylinder group.
638 	 */
639 	bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
640 	if (bno < 0)
641 		return (NULL);
642 	cgp->cg_rotor = bno;
643 gotit:
644 	clrblock(fs, cgp->cg_free, (long)fragstoblks(fs, bno));
645 	cgp->cg_cs.cs_nbfree--;
646 	fs->fs_cstotal.cs_nbfree--;
647 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
648 	cylno = cbtocylno(fs, bno);
649 	cgp->cg_b[cylno][cbtorpos(fs, bno)]--;
650 	cgp->cg_btot[cylno]--;
651 	fs->fs_fmod++;
652 	return (cgp->cg_cgx * fs->fs_fpg + bno);
653 }
654 
655 /*
656  * Determine whether an inode can be allocated.
657  *
658  * Check to see if an inode is available, and if it is,
659  * allocate it using the following policy:
660  *   1) allocate the requested inode.
661  *   2) allocate the next available inode after the requested
662  *      inode in the specified cylinder group.
663  */
664 ino_t
665 ialloccg(ip, cg, ipref, mode)
666 	struct inode *ip;
667 	int cg;
668 	daddr_t ipref;
669 	int mode;
670 {
671 	register struct fs *fs;
672 	register struct buf *bp;
673 	register struct cg *cgp;
674 	int i;
675 
676 	fs = ip->i_fs;
677 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
678 		return (NULL);
679 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
680 	cgp = bp->b_un.b_cg;
681 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
682 		brelse(bp);
683 		return (NULL);
684 	}
685 	if (cgp->cg_cs.cs_nifree == 0)
686 		return (NULL);
687 	cgp->cg_time = time.tv_sec;
688 	if (ipref) {
689 		ipref %= fs->fs_ipg;
690 		if (isclr(cgp->cg_iused, ipref))
691 			goto gotit;
692 	} else
693 		ipref = cgp->cg_irotor;
694 	for (i = 0; i < fs->fs_ipg; i++) {
695 		ipref++;
696 		if (ipref >= fs->fs_ipg)
697 			ipref = 0;
698 		if (isclr(cgp->cg_iused, ipref)) {
699 			cgp->cg_irotor = ipref;
700 			goto gotit;
701 		}
702 	}
703 	brelse(bp);
704 	return (NULL);
705 gotit:
706 	setbit(cgp->cg_iused, ipref);
707 	cgp->cg_cs.cs_nifree--;
708 	fs->fs_cstotal.cs_nifree--;
709 	fs->fs_cs(fs, cg).cs_nifree--;
710 	fs->fs_fmod++;
711 	if ((mode & IFMT) == IFDIR) {
712 		cgp->cg_cs.cs_ndir++;
713 		fs->fs_cstotal.cs_ndir++;
714 		fs->fs_cs(fs, cg).cs_ndir++;
715 	}
716 	bdwrite(bp);
717 	return (cg * fs->fs_ipg + ipref);
718 }
719 
720 /*
721  * Free a block or fragment.
722  *
723  * The specified block or fragment is placed back in the
724  * free map. If a fragment is deallocated, a possible
725  * block reassembly is checked.
726  */
727 free(ip, bno, size)
728 	register struct inode *ip;
729 	daddr_t bno;
730 	off_t size;
731 {
732 	register struct fs *fs;
733 	register struct cg *cgp;
734 	register struct buf *bp;
735 	int cg, blk, frags, bbase;
736 	register int i;
737 
738 	fs = ip->i_fs;
739 	if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
740 		printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
741 		    ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
742 		panic("free: bad size");
743 	}
744 	cg = dtog(fs, bno);
745 	if (badblock(fs, bno)) {
746 		printf("bad block %d, ino %d\n", bno, ip->i_number);
747 		return;
748 	}
749 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
750 	cgp = bp->b_un.b_cg;
751 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
752 		brelse(bp);
753 		return;
754 	}
755 	cgp->cg_time = time.tv_sec;
756 	bno = dtogd(fs, bno);
757 	if (size == fs->fs_bsize) {
758 		if (isblock(fs, cgp->cg_free, fragstoblks(fs, bno))) {
759 			printf("dev = 0x%x, block = %d, fs = %s\n",
760 			    ip->i_dev, bno, fs->fs_fsmnt);
761 			panic("free: freeing free block");
762 		}
763 		setblock(fs, cgp->cg_free, fragstoblks(fs, bno));
764 		cgp->cg_cs.cs_nbfree++;
765 		fs->fs_cstotal.cs_nbfree++;
766 		fs->fs_cs(fs, cg).cs_nbfree++;
767 		i = cbtocylno(fs, bno);
768 		cgp->cg_b[i][cbtorpos(fs, bno)]++;
769 		cgp->cg_btot[i]++;
770 	} else {
771 		bbase = bno - (bno % fs->fs_frag);
772 		/*
773 		 * decrement the counts associated with the old frags
774 		 */
775 		blk = blkmap(fs, cgp->cg_free, bbase);
776 		fragacct(fs, blk, cgp->cg_frsum, -1);
777 		/*
778 		 * deallocate the fragment
779 		 */
780 		frags = numfrags(fs, size);
781 		for (i = 0; i < frags; i++) {
782 			if (isset(cgp->cg_free, bno + i)) {
783 				printf("dev = 0x%x, block = %d, fs = %s\n",
784 				    ip->i_dev, bno + i, fs->fs_fsmnt);
785 				panic("free: freeing free frag");
786 			}
787 			setbit(cgp->cg_free, bno + i);
788 		}
789 		cgp->cg_cs.cs_nffree += i;
790 		fs->fs_cstotal.cs_nffree += i;
791 		fs->fs_cs(fs, cg).cs_nffree += i;
792 		/*
793 		 * add back in counts associated with the new frags
794 		 */
795 		blk = blkmap(fs, cgp->cg_free, bbase);
796 		fragacct(fs, blk, cgp->cg_frsum, 1);
797 		/*
798 		 * if a complete block has been reassembled, account for it
799 		 */
800 		if (isblock(fs, cgp->cg_free, fragstoblks(fs, bbase))) {
801 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
802 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
803 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
804 			cgp->cg_cs.cs_nbfree++;
805 			fs->fs_cstotal.cs_nbfree++;
806 			fs->fs_cs(fs, cg).cs_nbfree++;
807 			i = cbtocylno(fs, bbase);
808 			cgp->cg_b[i][cbtorpos(fs, bbase)]++;
809 			cgp->cg_btot[i]++;
810 		}
811 	}
812 	fs->fs_fmod++;
813 	bdwrite(bp);
814 }
815 
816 /*
817  * Free an inode.
818  *
819  * The specified inode is placed back in the free map.
820  */
821 ifree(ip, ino, mode)
822 	struct inode *ip;
823 	ino_t ino;
824 	int mode;
825 {
826 	register struct fs *fs;
827 	register struct cg *cgp;
828 	register struct buf *bp;
829 	int cg;
830 
831 	fs = ip->i_fs;
832 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
833 		printf("dev = 0x%x, ino = %d, fs = %s\n",
834 		    ip->i_dev, ino, fs->fs_fsmnt);
835 		panic("ifree: range");
836 	}
837 	cg = itog(fs, ino);
838 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
839 	cgp = bp->b_un.b_cg;
840 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
841 		brelse(bp);
842 		return;
843 	}
844 	cgp->cg_time = time.tv_sec;
845 	ino %= fs->fs_ipg;
846 	if (isclr(cgp->cg_iused, ino)) {
847 		printf("dev = 0x%x, ino = %d, fs = %s\n",
848 		    ip->i_dev, ino, fs->fs_fsmnt);
849 		panic("ifree: freeing free inode");
850 	}
851 	clrbit(cgp->cg_iused, ino);
852 	cgp->cg_cs.cs_nifree++;
853 	fs->fs_cstotal.cs_nifree++;
854 	fs->fs_cs(fs, cg).cs_nifree++;
855 	if ((mode & IFMT) == IFDIR) {
856 		cgp->cg_cs.cs_ndir--;
857 		fs->fs_cstotal.cs_ndir--;
858 		fs->fs_cs(fs, cg).cs_ndir--;
859 	}
860 	fs->fs_fmod++;
861 	bdwrite(bp);
862 }
863 
864 /*
865  * Find a block of the specified size in the specified cylinder group.
866  *
867  * It is a panic if a request is made to find a block if none are
868  * available.
869  */
870 daddr_t
871 mapsearch(fs, cgp, bpref, allocsiz)
872 	register struct fs *fs;
873 	register struct cg *cgp;
874 	daddr_t bpref;
875 	int allocsiz;
876 {
877 	daddr_t bno;
878 	int start, len, loc, i;
879 	int blk, field, subfield, pos;
880 
881 	/*
882 	 * find the fragment by searching through the free block
883 	 * map for an appropriate bit pattern
884 	 */
885 	if (bpref)
886 		start = dtogd(fs, bpref) / NBBY;
887 	else
888 		start = cgp->cg_frotor / NBBY;
889 	len = howmany(fs->fs_fpg, NBBY) - start;
890 	loc = scanc((unsigned)len, (caddr_t)&cgp->cg_free[start],
891 		(caddr_t)fragtbl[fs->fs_frag],
892 		(int)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
893 	if (loc == 0) {
894 		len = start + 1;
895 		start = 0;
896 		loc = scanc((unsigned)len, (caddr_t)&cgp->cg_free[start],
897 			(caddr_t)fragtbl[fs->fs_frag],
898 			(int)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
899 		if (loc == 0)
900 			return (-1);
901 	}
902 	bno = (start + len - loc) * NBBY;
903 	cgp->cg_frotor = bno;
904 	/*
905 	 * found the byte in the map
906 	 * sift through the bits to find the selected frag
907 	 */
908 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
909 		blk = blkmap(fs, cgp->cg_free, bno);
910 		blk <<= 1;
911 		field = around[allocsiz];
912 		subfield = inside[allocsiz];
913 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
914 			if ((blk & field) == subfield)
915 				return (bno + pos);
916 			field <<= 1;
917 			subfield <<= 1;
918 		}
919 	}
920 	printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
921 	panic("alloccg: block not in map");
922 	return (-1);
923 }
924 
925 /*
926  * Fserr prints the name of a file system with an error diagnostic.
927  *
928  * The form of the error message is:
929  *	fs: error message
930  */
931 fserr(fs, cp)
932 	struct fs *fs;
933 	char *cp;
934 {
935 
936 	printf("%s: %s\n", fs->fs_fsmnt, cp);
937 }
938