1 /* 2 * Copyright (c) 1982, 1986 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)subr_rmap.c 7.3 (Berkeley) 05/05/89 7 */ 8 9 #include "param.h" 10 #include "systm.h" 11 #include "map.h" 12 #include "user.h" 13 #include "proc.h" 14 #include "text.h" 15 #include "kernel.h" 16 17 /* 18 * Resource map handling routines. 19 * 20 * A resource map is an array of structures each 21 * of which describes a segment of the address space of an available 22 * resource. The segments are described by their base address and 23 * length, and sorted in address order. Each resource map has a fixed 24 * maximum number of segments allowed. Resources are allocated 25 * by taking part or all of one of the segments of the map. 26 * 27 * Returning of resources will require another segment if 28 * the returned resources are not adjacent in the address 29 * space to an existing segment. If the return of a segment 30 * would require a slot which is not available, then one of 31 * the resource map segments is discarded after a warning is printed. 32 * Returning of resources may also cause the map to collapse 33 * by coalescing two existing segments and the returned space 34 * into a single segment. In this case the resource map is 35 * made smaller by copying together to fill the resultant gap. 36 * 37 * N.B.: the current implementation uses a dense array and does 38 * not admit the value ``0'' as a legal address, since that is used 39 * as a delimiter. 40 */ 41 42 /* 43 * Initialize map mp to have (mapsize-2) segments 44 * and to be called ``name'', which we print if 45 * the slots become so fragmented that we lose space. 46 * The map itself is initialized with size elements free 47 * starting at addr. 48 */ 49 rminit(mp, size, addr, name, mapsize) 50 register struct map *mp; 51 long size, addr; 52 char *name; 53 int mapsize; 54 { 55 register struct mapent *ep = (struct mapent *)(mp+1); 56 57 mp->m_name = name; 58 /* N.B.: WE ASSUME HERE THAT sizeof (struct map) == sizeof (struct mapent) */ 59 /* 60 * One of the mapsize slots is taken by the map structure, 61 * segments has size 0 and addr 0, and acts as a delimiter. 62 * We insure that we never use segments past the end of 63 * the array which is given by mp->m_limit. 64 * Instead, when excess segments occur we discard some resources. 65 */ 66 mp->m_limit = (struct mapent *)&mp[mapsize]; 67 /* 68 * Simulate a rmfree(), but with the option to 69 * call with size 0 and addr 0 when we just want 70 * to initialize without freeing. 71 */ 72 ep->m_size = size; 73 ep->m_addr = addr; 74 (++ep)->m_size = 0; 75 ep->m_addr = 0; 76 } 77 78 /* 79 * Allocate 'size' units from the given 80 * map. Return the base of the allocated space. 81 * In a map, the addresses are increasing and the 82 * list is terminated by a 0 size. 83 * 84 * Algorithm is first-fit. 85 * 86 * This routine knows about the interleaving of the swapmap 87 * and handles that. 88 */ 89 long 90 rmalloc(mp, size) 91 register struct map *mp; 92 long size; 93 { 94 register struct mapent *ep = (struct mapent *)(mp+1); 95 register int addr; 96 register struct mapent *bp; 97 swblk_t first, rest; 98 99 if (size <= 0 || mp == swapmap && size > dmmax) 100 panic("rmalloc"); 101 /* 102 * Search for a piece of the resource map which has enough 103 * free space to accomodate the request. 104 */ 105 for (bp = ep; bp->m_size; bp++) { 106 if (bp->m_size >= size) { 107 /* 108 * If allocating from swapmap, 109 * then have to respect interleaving 110 * boundaries. 111 */ 112 if (mp == swapmap && nswdev > 1 && 113 (first = dmmax - bp->m_addr%dmmax) < bp->m_size) { 114 if (bp->m_size - first < size) 115 continue; 116 addr = bp->m_addr + first; 117 rest = bp->m_size - first - size; 118 bp->m_size = first; 119 if (rest) 120 rmfree(swapmap, rest, addr+size); 121 return (addr); 122 } 123 /* 124 * Allocate from the map. 125 * If there is no space left of the piece 126 * we allocated from, move the rest of 127 * the pieces to the left. 128 */ 129 addr = bp->m_addr; 130 bp->m_addr += size; 131 if ((bp->m_size -= size) == 0) { 132 do { 133 bp++; 134 (bp-1)->m_addr = bp->m_addr; 135 } while ((bp-1)->m_size = bp->m_size); 136 } 137 if (mp == swapmap && addr % CLSIZE) 138 panic("rmalloc swapmap"); 139 return (addr); 140 } 141 } 142 return (0); 143 } 144 145 /* 146 * Free the previously allocated space at addr 147 * of size units into the specified map. 148 * Sort addr into map and combine on 149 * one or both ends if possible. 150 */ 151 rmfree(mp, size, addr) 152 struct map *mp; 153 long size, addr; 154 { 155 struct mapent *firstbp; 156 register struct mapent *bp; 157 register int t; 158 159 /* 160 * Both address and size must be 161 * positive, or the protocol has broken down. 162 */ 163 if (addr <= 0 || size <= 0) 164 goto badrmfree; 165 /* 166 * Locate the piece of the map which starts after the 167 * returned space (or the end of the map). 168 */ 169 firstbp = bp = (struct mapent *)(mp + 1); 170 for (; bp->m_addr <= addr && bp->m_size != 0; bp++) 171 continue; 172 /* 173 * If the piece on the left abuts us, 174 * then we should combine with it. 175 */ 176 if (bp > firstbp && (bp-1)->m_addr+(bp-1)->m_size >= addr) { 177 /* 178 * Check no overlap (internal error). 179 */ 180 if ((bp-1)->m_addr+(bp-1)->m_size > addr) 181 goto badrmfree; 182 /* 183 * Add into piece on the left by increasing its size. 184 */ 185 (bp-1)->m_size += size; 186 /* 187 * If the combined piece abuts the piece on 188 * the right now, compress it in also, 189 * by shifting the remaining pieces of the map over. 190 */ 191 if (bp->m_addr && addr+size >= bp->m_addr) { 192 if (addr+size > bp->m_addr) 193 goto badrmfree; 194 (bp-1)->m_size += bp->m_size; 195 while (bp->m_size) { 196 bp++; 197 (bp-1)->m_addr = bp->m_addr; 198 (bp-1)->m_size = bp->m_size; 199 } 200 } 201 goto done; 202 } 203 /* 204 * Don't abut on the left, check for abutting on 205 * the right. 206 */ 207 if (addr+size >= bp->m_addr && bp->m_size) { 208 if (addr+size > bp->m_addr) 209 goto badrmfree; 210 bp->m_addr -= size; 211 bp->m_size += size; 212 goto done; 213 } 214 /* 215 * Don't abut at all. Make a new entry 216 * and check for map overflow. 217 */ 218 do { 219 t = bp->m_addr; 220 bp->m_addr = addr; 221 addr = t; 222 t = bp->m_size; 223 bp->m_size = size; 224 bp++; 225 } while (size = t); 226 /* 227 * Segment at bp is to be the delimiter; 228 * If there is not room for it 229 * then the table is too full 230 * and we must discard something. 231 */ 232 if (bp+1 > mp->m_limit) { 233 /* 234 * Back bp up to last available segment. 235 * which contains a segment already and must 236 * be made into the delimiter. 237 * Discard second to last entry, 238 * since it is presumably smaller than the last 239 * and move the last entry back one. 240 */ 241 bp--; 242 printf("%s: rmap ovflo, lost [%d,%d)\n", mp->m_name, 243 (bp-1)->m_addr, (bp-1)->m_addr+(bp-1)->m_size); 244 bp[-1] = bp[0]; 245 bp[0].m_size = bp[0].m_addr = 0; 246 } 247 done: 248 /* 249 * THIS IS RIDICULOUS... IT DOESN'T BELONG HERE! 250 */ 251 if ((mp == kernelmap) && kmapwnt) { 252 kmapwnt = 0; 253 wakeup((caddr_t)kernelmap); 254 } 255 return; 256 badrmfree: 257 panic("bad rmfree"); 258 } 259 260 /* 261 * Allocate 'size' units from the given map, starting at address 'addr'. 262 * Return 'addr' if successful, 0 if not. 263 * This may cause the creation or destruction of a resource map segment. 264 * 265 * This routine will return failure status if there is not enough room 266 * for a required additional map segment. 267 * 268 * An attempt to use this on 'swapmap' will result in 269 * a failure return. This is due mainly to laziness and could be fixed 270 * to do the right thing, although it probably will never be used. 271 */ 272 rmget(mp, size, addr) 273 register struct map *mp; 274 { 275 register struct mapent *ep = (struct mapent *)(mp+1); 276 register struct mapent *bp, *bp2; 277 278 if (size <= 0) 279 panic("rmget"); 280 if (mp == swapmap) 281 return (0); 282 /* 283 * Look for a map segment containing the requested address. 284 * If none found, return failure. 285 */ 286 for (bp = ep; bp->m_size; bp++) 287 if (bp->m_addr <= addr && bp->m_addr + bp->m_size > addr) 288 break; 289 if (bp->m_size == 0) 290 return (0); 291 292 /* 293 * If segment is too small, return failure. 294 * If big enough, allocate the block, compressing or expanding 295 * the map as necessary. 296 */ 297 if (bp->m_addr + bp->m_size < addr + size) 298 return (0); 299 if (bp->m_addr == addr) 300 if (bp->m_addr + bp->m_size == addr + size) { 301 /* 302 * Allocate entire segment and compress map 303 */ 304 bp2 = bp; 305 while (bp2->m_size) { 306 bp2++; 307 (bp2-1)->m_addr = bp2->m_addr; 308 (bp2-1)->m_size = bp2->m_size; 309 } 310 } else { 311 /* 312 * Allocate first part of segment 313 */ 314 bp->m_addr += size; 315 bp->m_size -= size; 316 } 317 else 318 if (bp->m_addr + bp->m_size == addr + size) { 319 /* 320 * Allocate last part of segment 321 */ 322 bp->m_size -= size; 323 } else { 324 /* 325 * Allocate from middle of segment, but only 326 * if table can be expanded. 327 */ 328 for (bp2=bp; bp2->m_size; bp2++) 329 ; 330 if (bp2 + 1 >= mp->m_limit) 331 return (0); 332 while (bp2 > bp) { 333 (bp2+1)->m_addr = bp2->m_addr; 334 (bp2+1)->m_size = bp2->m_size; 335 bp2--; 336 } 337 (bp+1)->m_addr = addr + size; 338 (bp+1)->m_size = 339 bp->m_addr + bp->m_size - (addr + size); 340 bp->m_size = addr - bp->m_addr; 341 } 342 return (addr); 343 } 344