xref: /original-bsd/sys/ufs/ffs/ffs_alloc.c (revision 792e4f5f)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)ffs_alloc.c	7.9 (Berkeley) 05/09/89
18  */
19 
20 #include "param.h"
21 #include "systm.h"
22 #include "mount.h"
23 #include "buf.h"
24 #include "user.h"
25 #include "vnode.h"
26 #include "kernel.h"
27 #include "syslog.h"
28 #include "cmap.h"
29 #include "../ufs/quota.h"
30 #include "../ufs/inode.h"
31 #include "../ufs/fs.h"
32 
33 extern u_long		hashalloc();
34 extern ino_t		ialloccg();
35 extern daddr_t		alloccg();
36 extern daddr_t		alloccgblk();
37 extern daddr_t		fragextend();
38 extern daddr_t		blkpref();
39 extern daddr_t		mapsearch();
40 extern int		inside[], around[];
41 extern unsigned char	*fragtbl[];
42 
43 /*
44  * Allocate a block in the file system.
45  *
46  * The size of the requested block is given, which must be some
47  * multiple of fs_fsize and <= fs_bsize.
48  * A preference may be optionally specified. If a preference is given
49  * the following hierarchy is used to allocate a block:
50  *   1) allocate the requested block.
51  *   2) allocate a rotationally optimal block in the same cylinder.
52  *   3) allocate a block in the same cylinder group.
53  *   4) quadradically rehash into other cylinder groups, until an
54  *      available block is located.
55  * If no block preference is given the following heirarchy is used
56  * to allocate a block:
57  *   1) allocate a block in the cylinder group that contains the
58  *      inode for the file.
59  *   2) quadradically rehash into other cylinder groups, until an
60  *      available block is located.
61  */
62 alloc(ip, bpref, size, bpp, flags)
63 	register struct inode *ip;
64 	daddr_t bpref;
65 	int size;
66 	struct buf **bpp;
67 	int flags;
68 {
69 	daddr_t bno;
70 	register struct fs *fs;
71 	register struct buf *bp;
72 	int cg, error;
73 
74 	*bpp = 0;
75 	fs = ip->i_fs;
76 	if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
77 		printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
78 		    ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
79 		panic("alloc: bad size");
80 	}
81 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
82 		goto nospace;
83 	if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
84 		goto nospace;
85 #ifdef QUOTA
86 	if (error = chkdq(ip, (long)btodb(size), 0))
87 		return (error);
88 #endif
89 	if (bpref >= fs->fs_size)
90 		bpref = 0;
91 	if (bpref == 0)
92 		cg = itog(fs, ip->i_number);
93 	else
94 		cg = dtog(fs, bpref);
95 	bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
96 		(u_long (*)())alloccg);
97 	if (bno <= 0)
98 		goto nospace;
99 	ip->i_blocks += btodb(size);
100 	ip->i_flag |= IUPD|ICHG;
101 	bp = getblk(ip->i_devvp, fsbtodb(fs, bno), size);
102 	if (flags & B_CLRBUF)
103 		clrbuf(bp);
104 	*bpp = bp;
105 	return (0);
106 nospace:
107 	fserr(fs, "file system full");
108 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
109 	return (ENOSPC);
110 }
111 
112 /*
113  * Reallocate a fragment to a bigger size
114  *
115  * The number and size of the old block is given, and a preference
116  * and new size is also specified. The allocator attempts to extend
117  * the original block. Failing that, the regular block allocator is
118  * invoked to get an appropriate block.
119  */
120 realloccg(ip, bprev, bpref, osize, nsize, bpp)
121 	register struct inode *ip;
122 	daddr_t bprev, bpref;
123 	int osize, nsize;
124 	struct buf **bpp;
125 {
126 	register struct fs *fs;
127 	struct buf *bp, *obp;
128 	int cg, request;
129 	daddr_t bno, bn;
130 	int i, error, count;
131 
132 	*bpp = 0;
133 	fs = ip->i_fs;
134 	if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
135 	    (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
136 		printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
137 		    ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
138 		panic("realloccg: bad size");
139 	}
140 	if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
141 		goto nospace;
142 	if (bprev == 0) {
143 		printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
144 		    ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
145 		panic("realloccg: bad bprev");
146 	}
147 #ifdef QUOTA
148 	if (error = chkdq(ip, (long)btodb(nsize - osize), 0))
149 		return (error);
150 #endif
151 	cg = dtog(fs, bprev);
152 	bno = fragextend(ip, cg, (long)bprev, osize, nsize);
153 	if (bno != 0) {
154 		do {
155 			error = bread(ip->i_devvp, fsbtodb(fs, bno),
156 				osize, &bp);
157 			if (error) {
158 				brelse(bp);
159 				return (error);
160 			}
161 		} while (brealloc(bp, nsize) == 0);
162 		bp->b_flags |= B_DONE;
163 		bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
164 		ip->i_blocks += btodb(nsize - osize);
165 		ip->i_flag |= IUPD|ICHG;
166 		*bpp = bp;
167 		return (0);
168 	}
169 	if (bpref >= fs->fs_size)
170 		bpref = 0;
171 	switch ((int)fs->fs_optim) {
172 	case FS_OPTSPACE:
173 		/*
174 		 * Allocate an exact sized fragment. Although this makes
175 		 * best use of space, we will waste time relocating it if
176 		 * the file continues to grow. If the fragmentation is
177 		 * less than half of the minimum free reserve, we choose
178 		 * to begin optimizing for time.
179 		 */
180 		request = nsize;
181 		if (fs->fs_minfree < 5 ||
182 		    fs->fs_cstotal.cs_nffree >
183 		    fs->fs_dsize * fs->fs_minfree / (2 * 100))
184 			break;
185 		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
186 			fs->fs_fsmnt);
187 		fs->fs_optim = FS_OPTTIME;
188 		break;
189 	case FS_OPTTIME:
190 		/*
191 		 * At this point we have discovered a file that is trying
192 		 * to grow a small fragment to a larger fragment. To save
193 		 * time, we allocate a full sized block, then free the
194 		 * unused portion. If the file continues to grow, the
195 		 * `fragextend' call above will be able to grow it in place
196 		 * without further copying. If aberrant programs cause
197 		 * disk fragmentation to grow within 2% of the free reserve,
198 		 * we choose to begin optimizing for space.
199 		 */
200 		request = fs->fs_bsize;
201 		if (fs->fs_cstotal.cs_nffree <
202 		    fs->fs_dsize * (fs->fs_minfree - 2) / 100)
203 			break;
204 		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
205 			fs->fs_fsmnt);
206 		fs->fs_optim = FS_OPTSPACE;
207 		break;
208 	default:
209 		printf("dev = 0x%x, optim = %d, fs = %s\n",
210 		    ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
211 		panic("realloccg: bad optim");
212 		/* NOTREACHED */
213 	}
214 	bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request,
215 		(u_long (*)())alloccg);
216 	if (bno > 0) {
217 		error = bread(ip->i_devvp, fsbtodb(fs, bprev), osize, &obp);
218 		if (error) {
219 			brelse(obp);
220 			return (error);
221 		}
222 		bn = fsbtodb(fs, bno);
223 		bp = getblk(ip->i_devvp, bn, nsize);
224 		bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize);
225 		count = howmany(osize, CLBYTES);
226 		for (i = 0; i < count; i++)
227 			munhash(ip->i_devvp, bn + i * CLBYTES / DEV_BSIZE);
228 		bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
229 		if (obp->b_flags & B_DELWRI) {
230 			obp->b_flags &= ~B_DELWRI;
231 			u.u_ru.ru_oublock--;		/* delete charge */
232 		}
233 		brelse(obp);
234 		blkfree(ip, bprev, (off_t)osize);
235 		if (nsize < request)
236 			blkfree(ip, bno + numfrags(fs, nsize),
237 				(off_t)(request - nsize));
238 		ip->i_blocks += btodb(nsize - osize);
239 		ip->i_flag |= IUPD|ICHG;
240 		*bpp = bp;
241 		return (0);
242 	}
243 nospace:
244 	/*
245 	 * no space available
246 	 */
247 	fserr(fs, "file system full");
248 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
249 	return (ENOSPC);
250 }
251 
252 /*
253  * Allocate an inode in the file system.
254  *
255  * A preference may be optionally specified. If a preference is given
256  * the following hierarchy is used to allocate an inode:
257  *   1) allocate the requested inode.
258  *   2) allocate an inode in the same cylinder group.
259  *   3) quadradically rehash into other cylinder groups, until an
260  *      available inode is located.
261  * If no inode preference is given the following heirarchy is used
262  * to allocate an inode:
263  *   1) allocate an inode in cylinder group 0.
264  *   2) quadradically rehash into other cylinder groups, until an
265  *      available inode is located.
266  */
267 ialloc(pip, ipref, mode, ipp)
268 	register struct inode *pip;
269 	ino_t ipref;
270 	int mode;
271 	struct inode **ipp;
272 {
273 	ino_t ino;
274 	register struct fs *fs;
275 	register struct inode *ip;
276 	int cg, error;
277 
278 	*ipp = 0;
279 	fs = pip->i_fs;
280 	if (fs->fs_cstotal.cs_nifree == 0)
281 		goto noinodes;
282 #ifdef QUOTA
283 	if (error = chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0))
284 		return (error);
285 #endif
286 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
287 		ipref = 0;
288 	cg = itog(fs, ipref);
289 	ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
290 	if (ino == 0)
291 		goto noinodes;
292 	error = iget(pip, ino, ipp);
293 	ip = *ipp;
294 	if (error) {
295 		ifree(pip, ino, 0);
296 		return (error);
297 	}
298 	if (ip->i_mode) {
299 		printf("mode = 0%o, inum = %d, fs = %s\n",
300 		    ip->i_mode, ip->i_number, fs->fs_fsmnt);
301 		panic("ialloc: dup alloc");
302 	}
303 	if (ip->i_blocks) {				/* XXX */
304 		printf("free inode %s/%d had %d blocks\n",
305 		    fs->fs_fsmnt, ino, ip->i_blocks);
306 		ip->i_blocks = 0;
307 	}
308 	return (0);
309 noinodes:
310 	fserr(fs, "out of inodes");
311 	uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
312 	return (ENOSPC);
313 }
314 
315 /*
316  * Find a cylinder to place a directory.
317  *
318  * The policy implemented by this algorithm is to select from
319  * among those cylinder groups with above the average number of
320  * free inodes, the one with the smallest number of directories.
321  */
322 ino_t
323 dirpref(fs)
324 	register struct fs *fs;
325 {
326 	int cg, minndir, mincg, avgifree;
327 
328 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
329 	minndir = fs->fs_ipg;
330 	mincg = 0;
331 	for (cg = 0; cg < fs->fs_ncg; cg++)
332 		if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
333 		    fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
334 			mincg = cg;
335 			minndir = fs->fs_cs(fs, cg).cs_ndir;
336 		}
337 	return ((ino_t)(fs->fs_ipg * mincg));
338 }
339 
340 /*
341  * Select the desired position for the next block in a file.  The file is
342  * logically divided into sections. The first section is composed of the
343  * direct blocks. Each additional section contains fs_maxbpg blocks.
344  *
345  * If no blocks have been allocated in the first section, the policy is to
346  * request a block in the same cylinder group as the inode that describes
347  * the file. If no blocks have been allocated in any other section, the
348  * policy is to place the section in a cylinder group with a greater than
349  * average number of free blocks.  An appropriate cylinder group is found
350  * by using a rotor that sweeps the cylinder groups. When a new group of
351  * blocks is needed, the sweep begins in the cylinder group following the
352  * cylinder group from which the previous allocation was made. The sweep
353  * continues until a cylinder group with greater than the average number
354  * of free blocks is found. If the allocation is for the first block in an
355  * indirect block, the information on the previous allocation is unavailable;
356  * here a best guess is made based upon the logical block number being
357  * allocated.
358  *
359  * If a section is already partially allocated, the policy is to
360  * contiguously allocate fs_maxcontig blocks.  The end of one of these
361  * contiguous blocks and the beginning of the next is physically separated
362  * so that the disk head will be in transit between them for at least
363  * fs_rotdelay milliseconds.  This is to allow time for the processor to
364  * schedule another I/O transfer.
365  */
366 daddr_t
367 blkpref(ip, lbn, indx, bap)
368 	struct inode *ip;
369 	daddr_t lbn;
370 	int indx;
371 	daddr_t *bap;
372 {
373 	register struct fs *fs;
374 	register int cg;
375 	int avgbfree, startcg;
376 	daddr_t nextblk;
377 
378 	fs = ip->i_fs;
379 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
380 		if (lbn < NDADDR) {
381 			cg = itog(fs, ip->i_number);
382 			return (fs->fs_fpg * cg + fs->fs_frag);
383 		}
384 		/*
385 		 * Find a cylinder with greater than average number of
386 		 * unused data blocks.
387 		 */
388 		if (indx == 0 || bap[indx - 1] == 0)
389 			startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg;
390 		else
391 			startcg = dtog(fs, bap[indx - 1]) + 1;
392 		startcg %= fs->fs_ncg;
393 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
394 		for (cg = startcg; cg < fs->fs_ncg; cg++)
395 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
396 				fs->fs_cgrotor = cg;
397 				return (fs->fs_fpg * cg + fs->fs_frag);
398 			}
399 		for (cg = 0; cg <= startcg; cg++)
400 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
401 				fs->fs_cgrotor = cg;
402 				return (fs->fs_fpg * cg + fs->fs_frag);
403 			}
404 		return (NULL);
405 	}
406 	/*
407 	 * One or more previous blocks have been laid out. If less
408 	 * than fs_maxcontig previous blocks are contiguous, the
409 	 * next block is requested contiguously, otherwise it is
410 	 * requested rotationally delayed by fs_rotdelay milliseconds.
411 	 */
412 	nextblk = bap[indx - 1] + fs->fs_frag;
413 	if (indx > fs->fs_maxcontig &&
414 	    bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
415 	    != nextblk)
416 		return (nextblk);
417 	if (fs->fs_rotdelay != 0)
418 		/*
419 		 * Here we convert ms of delay to frags as:
420 		 * (frags) = (ms) * (rev/sec) * (sect/rev) /
421 		 *	((sect/frag) * (ms/sec))
422 		 * then round up to the next block.
423 		 */
424 		nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
425 		    (NSPF(fs) * 1000), fs->fs_frag);
426 	return (nextblk);
427 }
428 
429 /*
430  * Implement the cylinder overflow algorithm.
431  *
432  * The policy implemented by this algorithm is:
433  *   1) allocate the block in its requested cylinder group.
434  *   2) quadradically rehash on the cylinder group number.
435  *   3) brute force search for a free block.
436  */
437 /*VARARGS5*/
438 u_long
439 hashalloc(ip, cg, pref, size, allocator)
440 	struct inode *ip;
441 	int cg;
442 	long pref;
443 	int size;	/* size for data blocks, mode for inodes */
444 	u_long (*allocator)();
445 {
446 	register struct fs *fs;
447 	long result;
448 	int i, icg = cg;
449 
450 	fs = ip->i_fs;
451 	/*
452 	 * 1: preferred cylinder group
453 	 */
454 	result = (*allocator)(ip, cg, pref, size);
455 	if (result)
456 		return (result);
457 	/*
458 	 * 2: quadratic rehash
459 	 */
460 	for (i = 1; i < fs->fs_ncg; i *= 2) {
461 		cg += i;
462 		if (cg >= fs->fs_ncg)
463 			cg -= fs->fs_ncg;
464 		result = (*allocator)(ip, cg, 0, size);
465 		if (result)
466 			return (result);
467 	}
468 	/*
469 	 * 3: brute force search
470 	 * Note that we start at i == 2, since 0 was checked initially,
471 	 * and 1 is always checked in the quadratic rehash.
472 	 */
473 	cg = (icg + 2) % fs->fs_ncg;
474 	for (i = 2; i < fs->fs_ncg; i++) {
475 		result = (*allocator)(ip, cg, 0, size);
476 		if (result)
477 			return (result);
478 		cg++;
479 		if (cg == fs->fs_ncg)
480 			cg = 0;
481 	}
482 	return (NULL);
483 }
484 
485 /*
486  * Determine whether a fragment can be extended.
487  *
488  * Check to see if the necessary fragments are available, and
489  * if they are, allocate them.
490  */
491 daddr_t
492 fragextend(ip, cg, bprev, osize, nsize)
493 	struct inode *ip;
494 	int cg;
495 	long bprev;
496 	int osize, nsize;
497 {
498 	register struct fs *fs;
499 	register struct cg *cgp;
500 	struct buf *bp;
501 	long bno;
502 	int frags, bbase;
503 	int i, error;
504 
505 	fs = ip->i_fs;
506 	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
507 		return (NULL);
508 	frags = numfrags(fs, nsize);
509 	bbase = fragnum(fs, bprev);
510 	if (bbase > fragnum(fs, (bprev + frags - 1))) {
511 		/* cannot extend across a block boundary */
512 		return (NULL);
513 	}
514 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
515 		(int)fs->fs_cgsize, &bp);
516 	if (error) {
517 		brelse(bp);
518 		return (NULL);
519 	}
520 	cgp = bp->b_un.b_cg;
521 	if (!cg_chkmagic(cgp)) {
522 		brelse(bp);
523 		return (NULL);
524 	}
525 	cgp->cg_time = time.tv_sec;
526 	bno = dtogd(fs, bprev);
527 	for (i = numfrags(fs, osize); i < frags; i++)
528 		if (isclr(cg_blksfree(cgp), bno + i)) {
529 			brelse(bp);
530 			return (NULL);
531 		}
532 	/*
533 	 * the current fragment can be extended
534 	 * deduct the count on fragment being extended into
535 	 * increase the count on the remaining fragment (if any)
536 	 * allocate the extended piece
537 	 */
538 	for (i = frags; i < fs->fs_frag - bbase; i++)
539 		if (isclr(cg_blksfree(cgp), bno + i))
540 			break;
541 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
542 	if (i != frags)
543 		cgp->cg_frsum[i - frags]++;
544 	for (i = numfrags(fs, osize); i < frags; i++) {
545 		clrbit(cg_blksfree(cgp), bno + i);
546 		cgp->cg_cs.cs_nffree--;
547 		fs->fs_cstotal.cs_nffree--;
548 		fs->fs_cs(fs, cg).cs_nffree--;
549 	}
550 	fs->fs_fmod++;
551 	bdwrite(bp);
552 	return (bprev);
553 }
554 
555 /*
556  * Determine whether a block can be allocated.
557  *
558  * Check to see if a block of the apprpriate size is available,
559  * and if it is, allocate it.
560  */
561 daddr_t
562 alloccg(ip, cg, bpref, size)
563 	struct inode *ip;
564 	int cg;
565 	daddr_t bpref;
566 	int size;
567 {
568 	register struct fs *fs;
569 	register struct cg *cgp;
570 	struct buf *bp;
571 	register int i;
572 	int error, bno, frags, allocsiz;
573 
574 	fs = ip->i_fs;
575 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
576 		return (NULL);
577 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
578 		(int)fs->fs_cgsize, &bp);
579 	if (error) {
580 		brelse(bp);
581 		return (NULL);
582 	}
583 	cgp = bp->b_un.b_cg;
584 	if (!cg_chkmagic(cgp) ||
585 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
586 		brelse(bp);
587 		return (NULL);
588 	}
589 	cgp->cg_time = time.tv_sec;
590 	if (size == fs->fs_bsize) {
591 		bno = alloccgblk(fs, cgp, bpref);
592 		bdwrite(bp);
593 		return (bno);
594 	}
595 	/*
596 	 * check to see if any fragments are already available
597 	 * allocsiz is the size which will be allocated, hacking
598 	 * it down to a smaller size if necessary
599 	 */
600 	frags = numfrags(fs, size);
601 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
602 		if (cgp->cg_frsum[allocsiz] != 0)
603 			break;
604 	if (allocsiz == fs->fs_frag) {
605 		/*
606 		 * no fragments were available, so a block will be
607 		 * allocated, and hacked up
608 		 */
609 		if (cgp->cg_cs.cs_nbfree == 0) {
610 			brelse(bp);
611 			return (NULL);
612 		}
613 		bno = alloccgblk(fs, cgp, bpref);
614 		bpref = dtogd(fs, bno);
615 		for (i = frags; i < fs->fs_frag; i++)
616 			setbit(cg_blksfree(cgp), bpref + i);
617 		i = fs->fs_frag - frags;
618 		cgp->cg_cs.cs_nffree += i;
619 		fs->fs_cstotal.cs_nffree += i;
620 		fs->fs_cs(fs, cg).cs_nffree += i;
621 		fs->fs_fmod++;
622 		cgp->cg_frsum[i]++;
623 		bdwrite(bp);
624 		return (bno);
625 	}
626 	bno = mapsearch(fs, cgp, bpref, allocsiz);
627 	if (bno < 0) {
628 		brelse(bp);
629 		return (NULL);
630 	}
631 	for (i = 0; i < frags; i++)
632 		clrbit(cg_blksfree(cgp), bno + i);
633 	cgp->cg_cs.cs_nffree -= frags;
634 	fs->fs_cstotal.cs_nffree -= frags;
635 	fs->fs_cs(fs, cg).cs_nffree -= frags;
636 	fs->fs_fmod++;
637 	cgp->cg_frsum[allocsiz]--;
638 	if (frags != allocsiz)
639 		cgp->cg_frsum[allocsiz - frags]++;
640 	bdwrite(bp);
641 	return (cg * fs->fs_fpg + bno);
642 }
643 
644 /*
645  * Allocate a block in a cylinder group.
646  *
647  * This algorithm implements the following policy:
648  *   1) allocate the requested block.
649  *   2) allocate a rotationally optimal block in the same cylinder.
650  *   3) allocate the next available block on the block rotor for the
651  *      specified cylinder group.
652  * Note that this routine only allocates fs_bsize blocks; these
653  * blocks may be fragmented by the routine that allocates them.
654  */
655 daddr_t
656 alloccgblk(fs, cgp, bpref)
657 	register struct fs *fs;
658 	register struct cg *cgp;
659 	daddr_t bpref;
660 {
661 	daddr_t bno;
662 	int cylno, pos, delta;
663 	short *cylbp;
664 	register int i;
665 
666 	if (bpref == 0) {
667 		bpref = cgp->cg_rotor;
668 		goto norot;
669 	}
670 	bpref = blknum(fs, bpref);
671 	bpref = dtogd(fs, bpref);
672 	/*
673 	 * if the requested block is available, use it
674 	 */
675 	if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) {
676 		bno = bpref;
677 		goto gotit;
678 	}
679 	/*
680 	 * check for a block available on the same cylinder
681 	 */
682 	cylno = cbtocylno(fs, bpref);
683 	if (cg_blktot(cgp)[cylno] == 0)
684 		goto norot;
685 	if (fs->fs_cpc == 0) {
686 		/*
687 		 * block layout info is not available, so just have
688 		 * to take any block in this cylinder.
689 		 */
690 		bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
691 		goto norot;
692 	}
693 	/*
694 	 * check the summary information to see if a block is
695 	 * available in the requested cylinder starting at the
696 	 * requested rotational position and proceeding around.
697 	 */
698 	cylbp = cg_blks(fs, cgp, cylno);
699 	pos = cbtorpos(fs, bpref);
700 	for (i = pos; i < fs->fs_nrpos; i++)
701 		if (cylbp[i] > 0)
702 			break;
703 	if (i == fs->fs_nrpos)
704 		for (i = 0; i < pos; i++)
705 			if (cylbp[i] > 0)
706 				break;
707 	if (cylbp[i] > 0) {
708 		/*
709 		 * found a rotational position, now find the actual
710 		 * block. A panic if none is actually there.
711 		 */
712 		pos = cylno % fs->fs_cpc;
713 		bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
714 		if (fs_postbl(fs, pos)[i] == -1) {
715 			printf("pos = %d, i = %d, fs = %s\n",
716 			    pos, i, fs->fs_fsmnt);
717 			panic("alloccgblk: cyl groups corrupted");
718 		}
719 		for (i = fs_postbl(fs, pos)[i];; ) {
720 			if (isblock(fs, cg_blksfree(cgp), bno + i)) {
721 				bno = blkstofrags(fs, (bno + i));
722 				goto gotit;
723 			}
724 			delta = fs_rotbl(fs)[i];
725 			if (delta <= 0 ||
726 			    delta + i > fragstoblks(fs, fs->fs_fpg))
727 				break;
728 			i += delta;
729 		}
730 		printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
731 		panic("alloccgblk: can't find blk in cyl");
732 	}
733 norot:
734 	/*
735 	 * no blocks in the requested cylinder, so take next
736 	 * available one in this cylinder group.
737 	 */
738 	bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
739 	if (bno < 0)
740 		return (NULL);
741 	cgp->cg_rotor = bno;
742 gotit:
743 	clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno));
744 	cgp->cg_cs.cs_nbfree--;
745 	fs->fs_cstotal.cs_nbfree--;
746 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
747 	cylno = cbtocylno(fs, bno);
748 	cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
749 	cg_blktot(cgp)[cylno]--;
750 	fs->fs_fmod++;
751 	return (cgp->cg_cgx * fs->fs_fpg + bno);
752 }
753 
754 /*
755  * Determine whether an inode can be allocated.
756  *
757  * Check to see if an inode is available, and if it is,
758  * allocate it using the following policy:
759  *   1) allocate the requested inode.
760  *   2) allocate the next available inode after the requested
761  *      inode in the specified cylinder group.
762  */
763 ino_t
764 ialloccg(ip, cg, ipref, mode)
765 	struct inode *ip;
766 	int cg;
767 	daddr_t ipref;
768 	int mode;
769 {
770 	register struct fs *fs;
771 	register struct cg *cgp;
772 	struct buf *bp;
773 	int error, start, len, loc, map, i;
774 
775 	fs = ip->i_fs;
776 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
777 		return (NULL);
778 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
779 		(int)fs->fs_cgsize, &bp);
780 	if (error) {
781 		brelse(bp);
782 		return (NULL);
783 	}
784 	cgp = bp->b_un.b_cg;
785 	if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
786 		brelse(bp);
787 		return (NULL);
788 	}
789 	cgp->cg_time = time.tv_sec;
790 	if (ipref) {
791 		ipref %= fs->fs_ipg;
792 		if (isclr(cg_inosused(cgp), ipref))
793 			goto gotit;
794 	}
795 	start = cgp->cg_irotor / NBBY;
796 	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
797 	loc = skpc(0xff, len, &cg_inosused(cgp)[start]);
798 	if (loc == 0) {
799 		len = start + 1;
800 		start = 0;
801 		loc = skpc(0xff, len, &cg_inosused(cgp)[0]);
802 		if (loc == 0) {
803 			printf("cg = %s, irotor = %d, fs = %s\n",
804 			    cg, cgp->cg_irotor, fs->fs_fsmnt);
805 			panic("ialloccg: map corrupted");
806 			/* NOTREACHED */
807 		}
808 	}
809 	i = start + len - loc;
810 	map = cg_inosused(cgp)[i];
811 	ipref = i * NBBY;
812 	for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
813 		if ((map & i) == 0) {
814 			cgp->cg_irotor = ipref;
815 			goto gotit;
816 		}
817 	}
818 	printf("fs = %s\n", fs->fs_fsmnt);
819 	panic("ialloccg: block not in map");
820 	/* NOTREACHED */
821 gotit:
822 	setbit(cg_inosused(cgp), ipref);
823 	cgp->cg_cs.cs_nifree--;
824 	fs->fs_cstotal.cs_nifree--;
825 	fs->fs_cs(fs, cg).cs_nifree--;
826 	fs->fs_fmod++;
827 	if ((mode & IFMT) == IFDIR) {
828 		cgp->cg_cs.cs_ndir++;
829 		fs->fs_cstotal.cs_ndir++;
830 		fs->fs_cs(fs, cg).cs_ndir++;
831 	}
832 	bdwrite(bp);
833 	return (cg * fs->fs_ipg + ipref);
834 }
835 
836 /*
837  * Free a block or fragment.
838  *
839  * The specified block or fragment is placed back in the
840  * free map. If a fragment is deallocated, a possible
841  * block reassembly is checked.
842  */
843 blkfree(ip, bno, size)
844 	register struct inode *ip;
845 	daddr_t bno;
846 	off_t size;
847 {
848 	register struct fs *fs;
849 	register struct cg *cgp;
850 	struct buf *bp;
851 	int error, cg, blk, frags, bbase;
852 	register int i;
853 
854 	fs = ip->i_fs;
855 	if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
856 		printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
857 		    ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
858 		panic("blkfree: bad size");
859 	}
860 	cg = dtog(fs, bno);
861 	if (badblock(fs, bno)) {
862 		printf("bad block %d, ino %d\n", bno, ip->i_number);
863 		return;
864 	}
865 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
866 		(int)fs->fs_cgsize, &bp);
867 	if (error) {
868 		brelse(bp);
869 		return;
870 	}
871 	cgp = bp->b_un.b_cg;
872 	if (!cg_chkmagic(cgp)) {
873 		brelse(bp);
874 		return;
875 	}
876 	cgp->cg_time = time.tv_sec;
877 	bno = dtogd(fs, bno);
878 	if (size == fs->fs_bsize) {
879 		if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) {
880 			printf("dev = 0x%x, block = %d, fs = %s\n",
881 			    ip->i_dev, bno, fs->fs_fsmnt);
882 			panic("blkfree: freeing free block");
883 		}
884 		setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno));
885 		cgp->cg_cs.cs_nbfree++;
886 		fs->fs_cstotal.cs_nbfree++;
887 		fs->fs_cs(fs, cg).cs_nbfree++;
888 		i = cbtocylno(fs, bno);
889 		cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++;
890 		cg_blktot(cgp)[i]++;
891 	} else {
892 		bbase = bno - fragnum(fs, bno);
893 		/*
894 		 * decrement the counts associated with the old frags
895 		 */
896 		blk = blkmap(fs, cg_blksfree(cgp), bbase);
897 		fragacct(fs, blk, cgp->cg_frsum, -1);
898 		/*
899 		 * deallocate the fragment
900 		 */
901 		frags = numfrags(fs, size);
902 		for (i = 0; i < frags; i++) {
903 			if (isset(cg_blksfree(cgp), bno + i)) {
904 				printf("dev = 0x%x, block = %d, fs = %s\n",
905 				    ip->i_dev, bno + i, fs->fs_fsmnt);
906 				panic("blkfree: freeing free frag");
907 			}
908 			setbit(cg_blksfree(cgp), bno + i);
909 		}
910 		cgp->cg_cs.cs_nffree += i;
911 		fs->fs_cstotal.cs_nffree += i;
912 		fs->fs_cs(fs, cg).cs_nffree += i;
913 		/*
914 		 * add back in counts associated with the new frags
915 		 */
916 		blk = blkmap(fs, cg_blksfree(cgp), bbase);
917 		fragacct(fs, blk, cgp->cg_frsum, 1);
918 		/*
919 		 * if a complete block has been reassembled, account for it
920 		 */
921 		if (isblock(fs, cg_blksfree(cgp),
922 		    (daddr_t)fragstoblks(fs, bbase))) {
923 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
924 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
925 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
926 			cgp->cg_cs.cs_nbfree++;
927 			fs->fs_cstotal.cs_nbfree++;
928 			fs->fs_cs(fs, cg).cs_nbfree++;
929 			i = cbtocylno(fs, bbase);
930 			cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
931 			cg_blktot(cgp)[i]++;
932 		}
933 	}
934 	fs->fs_fmod++;
935 	bdwrite(bp);
936 }
937 
938 /*
939  * Free an inode.
940  *
941  * The specified inode is placed back in the free map.
942  */
943 ifree(ip, ino, mode)
944 	struct inode *ip;
945 	ino_t ino;
946 	int mode;
947 {
948 	register struct fs *fs;
949 	register struct cg *cgp;
950 	struct buf *bp;
951 	int error, cg;
952 
953 	fs = ip->i_fs;
954 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
955 		printf("dev = 0x%x, ino = %d, fs = %s\n",
956 		    ip->i_dev, ino, fs->fs_fsmnt);
957 		panic("ifree: range");
958 	}
959 	cg = itog(fs, ino);
960 	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
961 		(int)fs->fs_cgsize, &bp);
962 	if (error) {
963 		brelse(bp);
964 		return;
965 	}
966 	cgp = bp->b_un.b_cg;
967 	if (!cg_chkmagic(cgp)) {
968 		brelse(bp);
969 		return;
970 	}
971 	cgp->cg_time = time.tv_sec;
972 	ino %= fs->fs_ipg;
973 	if (isclr(cg_inosused(cgp), ino)) {
974 		printf("dev = 0x%x, ino = %d, fs = %s\n",
975 		    ip->i_dev, ino, fs->fs_fsmnt);
976 		panic("ifree: freeing free inode");
977 	}
978 	clrbit(cg_inosused(cgp), ino);
979 	if (ino < cgp->cg_irotor)
980 		cgp->cg_irotor = ino;
981 	cgp->cg_cs.cs_nifree++;
982 	fs->fs_cstotal.cs_nifree++;
983 	fs->fs_cs(fs, cg).cs_nifree++;
984 	if ((mode & IFMT) == IFDIR) {
985 		cgp->cg_cs.cs_ndir--;
986 		fs->fs_cstotal.cs_ndir--;
987 		fs->fs_cs(fs, cg).cs_ndir--;
988 	}
989 	fs->fs_fmod++;
990 	bdwrite(bp);
991 }
992 
993 /*
994  * Find a block of the specified size in the specified cylinder group.
995  *
996  * It is a panic if a request is made to find a block if none are
997  * available.
998  */
999 daddr_t
1000 mapsearch(fs, cgp, bpref, allocsiz)
1001 	register struct fs *fs;
1002 	register struct cg *cgp;
1003 	daddr_t bpref;
1004 	int allocsiz;
1005 {
1006 	daddr_t bno;
1007 	int start, len, loc, i;
1008 	int blk, field, subfield, pos;
1009 
1010 	/*
1011 	 * find the fragment by searching through the free block
1012 	 * map for an appropriate bit pattern
1013 	 */
1014 	if (bpref)
1015 		start = dtogd(fs, bpref) / NBBY;
1016 	else
1017 		start = cgp->cg_frotor / NBBY;
1018 	len = howmany(fs->fs_fpg, NBBY) - start;
1019 	loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start],
1020 		(u_char *)fragtbl[fs->fs_frag],
1021 		(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
1022 	if (loc == 0) {
1023 		len = start + 1;
1024 		start = 0;
1025 		loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0],
1026 			(u_char *)fragtbl[fs->fs_frag],
1027 			(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
1028 		if (loc == 0) {
1029 			printf("start = %d, len = %d, fs = %s\n",
1030 			    start, len, fs->fs_fsmnt);
1031 			panic("alloccg: map corrupted");
1032 			/* NOTREACHED */
1033 		}
1034 	}
1035 	bno = (start + len - loc) * NBBY;
1036 	cgp->cg_frotor = bno;
1037 	/*
1038 	 * found the byte in the map
1039 	 * sift through the bits to find the selected frag
1040 	 */
1041 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
1042 		blk = blkmap(fs, cg_blksfree(cgp), bno);
1043 		blk <<= 1;
1044 		field = around[allocsiz];
1045 		subfield = inside[allocsiz];
1046 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
1047 			if ((blk & field) == subfield)
1048 				return (bno + pos);
1049 			field <<= 1;
1050 			subfield <<= 1;
1051 		}
1052 	}
1053 	printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
1054 	panic("alloccg: block not in map");
1055 	return (-1);
1056 }
1057 
1058 /*
1059  * Fserr prints the name of a file system with an error diagnostic.
1060  *
1061  * The form of the error message is:
1062  *	fs: error message
1063  */
1064 fserr(fs, cp)
1065 	struct fs *fs;
1066 	char *cp;
1067 {
1068 
1069 	log(LOG_ERR, "%s: %s\n", fs->fs_fsmnt, cp);
1070 }
1071