xref: /original-bsd/sys/kern/subr_rmap.c.sav (revision 0f30d223)
1/*	subr_rmap.c.sav	6.2	84/08/29	*/
2
3#include "param.h"
4#include "systm.h"
5#include "map.h"
6#include "dir.h"
7#include "user.h"
8#include "proc.h"
9#include "text.h"
10#include "kernel.h"
11
12/*
13 * Resource map handling routines.
14 *
15 * A resource map is an array of structures each
16 * of which describes a segment of the address space of an available
17 * resource.  The segments are described by their base address and
18 * length, and sorted in address order.  Each resource map has a fixed
19 * maximum number of segments allowed.  Resources are allocated
20 * by taking part or all of one of the segments of the map.
21 *
22 * Returning of resources will require another segment if
23 * the returned resources are not adjacent in the address
24 * space to an existing segment.  If the return of a segment
25 * would require a slot which is not available, then one of
26 * the resource map segments is discarded after a warning is printed.
27 * Returning of resources may also cause the map to collapse
28 * by coalescing two existing segments and the returned space
29 * into a single segment.  In this case the resource map is
30 * made smaller by copying together to fill the resultant gap.
31 *
32 * N.B.: the current implementation uses a dense array and does
33 * not admit the value ``0'' as a legal address, since that is used
34 * as a delimiter.
35 */
36
37/*
38 * Initialize map mp to have (mapsize-2) segments
39 * and to be called ``name'', which we print if
40 * the slots become so fragmented that we lose space.
41 * The map itself is initialized with size elements free
42 * starting at addr.
43 */
44rminit(mp, size, addr, name, mapsize)
45	register struct map *mp;
46	long size, addr;
47	char *name;
48	int mapsize;
49{
50	register struct mapent *ep = (struct mapent *)(mp+1);
51
52	mp->m_name = name;
53/* N.B.: WE ASSUME HERE THAT sizeof (struct map) == sizeof (struct mapent) */
54	/*
55	 * One of the mapsize slots is taken by the map structure,
56	 * segments has size 0 and addr 0, and acts as a delimiter.
57	 * We insure that we never use segments past the end of
58	 * the array which is given by mp->m_limit.
59	 * Instead, when excess segments occur we discard some resources.
60	 */
61	mp->m_limit = (struct mapent *)&mp[mapsize];
62	/*
63	 * Simulate a rmfree(), but with the option to
64	 * call with size 0 and addr 0 when we just want
65	 * to initialize without freeing.
66	 */
67	ep->m_size = size;
68	ep->m_addr = addr;
69}
70
71/*
72 * Allocate 'size' units from the given
73 * map. Return the base of the allocated space.
74 * In a map, the addresses are increasing and the
75 * list is terminated by a 0 size.
76 *
77 * Algorithm is first-fit.
78 *
79 * This routine knows about the interleaving of the swapmap
80 * and handles that.
81 */
82long
83rmalloc(mp, size)
84	register struct map *mp;
85	long size;
86{
87	register struct mapent *ep = (struct mapent *)(mp+1);
88	register int addr;
89	register struct mapent *bp;
90	swblk_t first, rest;
91
92	if (size <= 0 || mp == swapmap && size > dmmax)
93		panic("rmalloc");
94	/*
95	 * Search for a piece of the resource map which has enough
96	 * free space to accomodate the request.
97	 */
98	for (bp = ep; bp->m_size; bp++) {
99		if (bp->m_size >= size) {
100			/*
101			 * If allocating from swapmap,
102			 * then have to respect interleaving
103			 * boundaries.
104			 */
105			if (mp == swapmap && nswdev > 1 &&
106			    (first = dmmax - bp->m_addr%dmmax) < bp->m_size) {
107				if (bp->m_size - first < size)
108					continue;
109				addr = bp->m_addr + first;
110				rest = bp->m_size - first - size;
111				bp->m_size = first;
112				if (rest)
113					rmfree(swapmap, rest, addr+size);
114				return (addr);
115			}
116			/*
117			 * Allocate from the map.
118			 * If there is no space left of the piece
119			 * we allocated from, move the rest of
120			 * the pieces to the left.
121			 */
122			addr = bp->m_addr;
123			bp->m_addr += size;
124			if ((bp->m_size -= size) == 0) {
125				do {
126					bp++;
127					(bp-1)->m_addr = bp->m_addr;
128				} while ((bp-1)->m_size = bp->m_size);
129			}
130			if (mp == swapmap && addr % CLSIZE)
131				panic("rmalloc swapmap");
132			return (addr);
133		}
134	}
135	return (0);
136}
137
138/*
139 * Free the previously allocated space at addr
140 * of size units into the specified map.
141 * Sort addr into map and combine on
142 * one or both ends if possible.
143 */
144rmfree(mp, size, addr)
145	struct map *mp;
146	long size, addr;
147{
148	struct mapent *firstbp;
149	register struct mapent *bp;
150	register int t;
151
152	/*
153	 * Both address and size must be
154	 * positive, or the protocol has broken down.
155	 */
156	if (addr <= 0 || size <= 0)
157		goto badrmfree;
158	/*
159	 * Locate the piece of the map which starts after the
160	 * returned space (or the end of the map).
161	 */
162	firstbp = bp = (struct mapent *)(mp + 1);
163	for (; bp->m_addr <= addr && bp->m_size != 0; bp++)
164		continue;
165	/*
166	 * If the piece on the left abuts us,
167	 * then we should combine with it.
168	 */
169	if (bp > firstbp && (bp-1)->m_addr+(bp-1)->m_size >= addr) {
170		/*
171		 * Check no overlap (internal error).
172		 */
173		if ((bp-1)->m_addr+(bp-1)->m_size > addr)
174			goto badrmfree;
175		/*
176		 * Add into piece on the left by increasing its size.
177		 */
178		(bp-1)->m_size += size;
179		/*
180		 * If the combined piece abuts the piece on
181		 * the right now, compress it in also,
182		 * by shifting the remaining pieces of the map over.
183		 */
184		if (bp->m_addr && addr+size >= bp->m_addr) {
185			if (addr+size > bp->m_addr)
186				goto badrmfree;
187			(bp-1)->m_size += bp->m_size;
188			while (bp->m_size) {
189				bp++;
190				(bp-1)->m_addr = bp->m_addr;
191				(bp-1)->m_size = bp->m_size;
192			}
193		}
194		goto done;
195	}
196	/*
197	 * Don't abut on the left, check for abutting on
198	 * the right.
199	 */
200	if (addr+size >= bp->m_addr && bp->m_size) {
201		if (addr+size > bp->m_addr)
202			goto badrmfree;
203		bp->m_addr -= size;
204		bp->m_size += size;
205		goto done;
206	}
207	/*
208	 * Don't abut at all.  Make a new entry
209	 * and check for map overflow.
210	 */
211	do {
212		t = bp->m_addr;
213		bp->m_addr = addr;
214		addr = t;
215		t = bp->m_size;
216		bp->m_size = size;
217		bp++;
218	} while (size = t);
219	/*
220	 * Segment at bp is to be the delimiter;
221	 * If there is not room for it
222	 * then the table is too full
223	 * and we must discard something.
224	 */
225	if (bp+1 > mp->m_limit) {
226		/*
227		 * Back bp up to last available segment.
228		 * which contains a segment already and must
229		 * be made into the delimiter.
230		 * Discard second to last entry,
231		 * since it is presumably smaller than the last
232		 * and move the last entry back one.
233		 */
234		bp--;
235		printf("%s: rmap ovflo, lost [%d,%d)\n", mp->m_name,
236		    (bp-1)->m_addr, (bp-1)->m_addr+(bp-1)->m_size);
237		bp[-1] = bp[0];
238		bp[0].m_size = bp[0].m_addr = 0;
239	}
240done:
241	/*
242	 * THIS IS RIDICULOUS... IT DOESN'T BELONG HERE!
243	 */
244	if ((mp == kernelmap) && kmapwnt) {
245		kmapwnt = 0;
246		wakeup((caddr_t)kernelmap);
247	}
248	return;
249badrmfree:
250	panic("bad rmfree");
251}
252
253/*
254 * Allocate 'size' units from the given map, starting at address 'addr'.
255 * Return 'addr' if successful, 0 if not.
256 * This may cause the creation or destruction of a resource map segment.
257 *
258 * This routine will return failure status if there is not enough room
259 * for a required additional map segment.
260 *
261 * An attempt to use this on 'swapmap' will result in
262 * a failure return.  This is due mainly to laziness and could be fixed
263 * to do the right thing, although it probably will never be used.
264 */
265rmget(mp, size, addr)
266	register struct map *mp;
267{
268	register struct mapent *ep = (struct mapent *)(mp+1);
269	register struct mapent *bp, *bp2;
270
271	if (size <= 0)
272		panic("rmget");
273	if (mp == swapmap)
274		return (0);
275	/*
276	 * Look for a map segment containing the requested address.
277	 * If none found, return failure.
278	 */
279	for (bp = ep; bp->m_size; bp++)
280		if (bp->m_addr <= addr && bp->m_addr + bp->m_size > addr)
281			break;
282	if (bp->m_size == 0)
283		return (0);
284
285	/*
286	 * If segment is too small, return failure.
287	 * If big enough, allocate the block, compressing or expanding
288	 * the map as necessary.
289	 */
290	if (bp->m_addr + bp->m_size < addr + size)
291		return (0);
292	if (bp->m_addr == addr)
293		if (bp->m_addr + bp->m_size == addr + size) {
294			/*
295			 * Allocate entire segment and compress map
296			 */
297			bp2 = bp;
298			while (bp2->m_size) {
299				bp2++;
300				(bp2-1)->m_addr = bp2->m_addr;
301				(bp2-1)->m_size = bp2->m_size;
302			}
303		} else {
304			/*
305			 * Allocate first part of segment
306			 */
307			bp->m_addr += size;
308			bp->m_size -= size;
309		}
310	else
311		if (bp->m_addr + bp->m_size == addr + size) {
312			/*
313			 * Allocate last part of segment
314			 */
315			bp->m_size -= size;
316		} else {
317			/*
318			 * Allocate from middle of segment, but only
319			 * if table can be expanded.
320			 */
321			for (bp2=bp; bp2->m_size; bp2++)
322				;
323			if (bp2 == mp->m_limit)
324				return (0);
325			while (bp2 > bp) {
326				(bp2+1)->m_addr = bp2->m_addr;
327				(bp2+1)->m_size = bp2->m_size;
328				bp2--;
329			}
330			(bp+1)->m_addr = addr + size;
331			(bp+1)->m_size =
332			    bp->m_addr + bp->m_size - (addr + size);
333			bp->m_size = addr - bp->m_addr;
334		}
335	return (addr);
336}
337