1 /* 2 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Hiten Pandya <hmp@backplane.com>. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 */ 35 /* 36 * Copyright (c) 1991 Regents of the University of California. 37 * All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * The Mach Operating System project at Carnegie-Mellon University. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 67 * $DragonFly: src/sys/vm/vm_contig.c,v 1.21 2006/12/28 21:24:02 dillon Exp $ 68 */ 69 70 /* 71 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 72 * All rights reserved. 73 * 74 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 75 * 76 * Permission to use, copy, modify and distribute this software and 77 * its documentation is hereby granted, provided that both the copyright 78 * notice and this permission notice appear in all copies of the 79 * software, derivative works or modified versions, and any portions 80 * thereof, and that both notices appear in supporting documentation. 81 * 82 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 83 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 84 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 85 * 86 * Carnegie Mellon requests users of this software to return to 87 * 88 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 89 * School of Computer Science 90 * Carnegie Mellon University 91 * Pittsburgh PA 15213-3890 92 * 93 * any improvements or extensions that they make and grant Carnegie the 94 * rights to redistribute these changes. 95 */ 96 97 /* 98 * Contiguous memory allocation API. 99 */ 100 101 #include <sys/param.h> 102 #include <sys/systm.h> 103 #include <sys/malloc.h> 104 #include <sys/proc.h> 105 #include <sys/lock.h> 106 #include <sys/vmmeter.h> 107 #include <sys/vnode.h> 108 109 #include <vm/vm.h> 110 #include <vm/vm_param.h> 111 #include <vm/vm_kern.h> 112 #include <vm/pmap.h> 113 #include <vm/vm_map.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_page.h> 116 #include <vm/vm_pageout.h> 117 #include <vm/vm_pager.h> 118 #include <vm/vm_extern.h> 119 120 #include <sys/thread2.h> 121 #include <sys/spinlock2.h> 122 #include <vm/vm_page2.h> 123 124 static void vm_contig_pg_free(int start, u_long size); 125 126 /* 127 * vm_contig_pg_clean: 128 * 129 * Do a thorough cleanup of the specified 'queue', which can be either 130 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough. If the page is not 131 * marked dirty, it is shoved into the page cache, provided no one has 132 * currently aqcuired it, otherwise localized action per object type 133 * is taken for cleanup: 134 * 135 * In the OBJT_VNODE case, the whole page range is cleaned up 136 * using the vm_object_page_clean() routine, by specyfing a 137 * start and end of '0'. 138 * 139 * Otherwise if the object is of any other type, the generic 140 * pageout (daemon) flush routine is invoked. 141 */ 142 static void 143 vm_contig_pg_clean(int queue, int count) 144 { 145 vm_object_t object; 146 vm_page_t m, m_tmp; 147 struct vm_page marker; 148 struct vpgqueues *pq = &vm_page_queues[queue]; 149 150 /* 151 * Setup a local marker 152 */ 153 bzero(&marker, sizeof(marker)); 154 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 155 marker.queue = queue; 156 marker.wire_count = 1; 157 158 vm_page_queues_spin_lock(queue); 159 TAILQ_INSERT_HEAD(&pq->pl, &marker, pageq); 160 vm_page_queues_spin_unlock(queue); 161 162 /* 163 * Iterate the queue. Note that the vm_page spinlock must be 164 * acquired before the pageq spinlock so it's easiest to simply 165 * not hold it in the loop iteration. 166 */ 167 while (count-- > 0 && (m = TAILQ_NEXT(&marker, pageq)) != NULL) { 168 vm_page_and_queue_spin_lock(m); 169 if (m != TAILQ_NEXT(&marker, pageq)) { 170 vm_page_and_queue_spin_unlock(m); 171 ++count; 172 continue; 173 } 174 KKASSERT(m->queue == queue); 175 176 TAILQ_REMOVE(&pq->pl, &marker, pageq); 177 TAILQ_INSERT_AFTER(&pq->pl, m, &marker, pageq); 178 179 if (m->flags & PG_MARKER) { 180 vm_page_and_queue_spin_unlock(m); 181 continue; 182 } 183 if (vm_page_busy_try(m, TRUE)) { 184 vm_page_and_queue_spin_unlock(m); 185 continue; 186 } 187 vm_page_and_queue_spin_unlock(m); 188 189 /* 190 * We've successfully busied the page 191 */ 192 if (m->queue - m->pc != queue) { 193 vm_page_wakeup(m); 194 continue; 195 } 196 if (m->wire_count || m->hold_count) { 197 vm_page_wakeup(m); 198 continue; 199 } 200 if ((object = m->object) == NULL) { 201 vm_page_wakeup(m); 202 continue; 203 } 204 vm_page_test_dirty(m); 205 if (m->dirty || (m->flags & PG_NEED_COMMIT)) { 206 vm_object_hold(object); 207 KKASSERT(m->object == object); 208 209 if (object->type == OBJT_VNODE) { 210 vm_page_wakeup(m); 211 vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY); 212 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 213 vn_unlock(((struct vnode *)object->handle)); 214 } else if (object->type == OBJT_SWAP || 215 object->type == OBJT_DEFAULT) { 216 m_tmp = m; 217 vm_pageout_flush(&m_tmp, 1, 0); 218 } else { 219 vm_page_wakeup(m); 220 } 221 vm_object_drop(object); 222 } else if (m->hold_count == 0) { 223 vm_page_cache(m); 224 } else { 225 vm_page_wakeup(m); 226 } 227 } 228 229 /* 230 * Scrap our local marker 231 */ 232 vm_page_queues_spin_lock(queue); 233 TAILQ_REMOVE(&pq->pl, &marker, pageq); 234 vm_page_queues_spin_unlock(queue); 235 } 236 237 /* 238 * vm_contig_pg_alloc: 239 * 240 * Allocate contiguous pages from the VM. This function does not 241 * map the allocated pages into the kernel map, otherwise it is 242 * impossible to make large allocations (i.e. >2G). 243 * 244 * Malloc()'s data structures have been used for collection of 245 * statistics and for allocations of less than a page. 246 */ 247 static int 248 vm_contig_pg_alloc(unsigned long size, vm_paddr_t low, vm_paddr_t high, 249 unsigned long alignment, unsigned long boundary, int mflags) 250 { 251 int i, q, start, pass; 252 vm_offset_t phys; 253 vm_page_t pga = vm_page_array; 254 vm_page_t m; 255 int pqtype; 256 257 size = round_page(size); 258 if (size == 0) 259 panic("vm_contig_pg_alloc: size must not be 0"); 260 if ((alignment & (alignment - 1)) != 0) 261 panic("vm_contig_pg_alloc: alignment must be a power of 2"); 262 if ((boundary & (boundary - 1)) != 0) 263 panic("vm_contig_pg_alloc: boundary must be a power of 2"); 264 265 /* 266 * See if we can get the pages from the contiguous page reserve 267 * alist. The returned pages will be allocated and wired but not 268 * busied. 269 */ 270 m = vm_page_alloc_contig( 271 low, high, alignment, boundary, size, VM_MEMATTR_DEFAULT); 272 if (m) 273 return (m - &pga[0]); 274 275 /* 276 * Three passes (0, 1, 2). Each pass scans the VM page list for 277 * free or cached pages. After each pass if the entire scan failed 278 * we attempt to flush inactive pages and reset the start index back 279 * to 0. For passes 1 and 2 we also attempt to flush active pages. 280 */ 281 start = 0; 282 for (pass = 0; pass < 3; pass++) { 283 /* 284 * Find first page in array that is free, within range, 285 * aligned, and such that the boundary won't be crossed. 286 */ 287 again: 288 for (i = start; i < vmstats.v_page_count; i++) { 289 m = &pga[i]; 290 phys = VM_PAGE_TO_PHYS(m); 291 pqtype = m->queue - m->pc; 292 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && 293 (phys >= low) && (phys < high) && 294 ((phys & (alignment - 1)) == 0) && 295 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0) && 296 m->busy == 0 && m->wire_count == 0 && 297 m->hold_count == 0 && 298 (m->flags & (PG_BUSY | PG_NEED_COMMIT)) == 0) 299 { 300 break; 301 } 302 } 303 304 /* 305 * If we cannot find the page in the given range, or we have 306 * crossed the boundary, call the vm_contig_pg_clean() function 307 * for flushing out the queues, and returning it back to 308 * normal state. 309 */ 310 if ((i == vmstats.v_page_count) || 311 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 312 313 /* 314 * Best effort flush of all inactive pages. 315 * This is quite quick, for now stall all 316 * callers, even if they've specified M_NOWAIT. 317 */ 318 for (q = 0; q < PQ_L2_SIZE; ++q) { 319 vm_contig_pg_clean(PQ_INACTIVE + q, 320 vmstats.v_inactive_count); 321 lwkt_yield(); 322 } 323 324 /* 325 * Best effort flush of active pages. 326 * 327 * This is very, very slow. 328 * Only do this if the caller has agreed to M_WAITOK. 329 * 330 * If enough pages are flushed, we may succeed on 331 * next (final) pass, if not the caller, contigmalloc(), 332 * will fail in the index < 0 case. 333 */ 334 if (pass > 0 && (mflags & M_WAITOK)) { 335 for (q = 0; q < PQ_L2_SIZE; ++q) { 336 vm_contig_pg_clean(PQ_ACTIVE + q, 337 vmstats.v_active_count); 338 } 339 lwkt_yield(); 340 } 341 342 /* 343 * We're already too high in the address space 344 * to succeed, reset to 0 for the next iteration. 345 */ 346 start = 0; 347 continue; /* next pass */ 348 } 349 start = i; 350 351 /* 352 * Check successive pages for contiguous and free. 353 * 354 * (still in critical section) 355 */ 356 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 357 m = &pga[i]; 358 pqtype = m->queue - m->pc; 359 if ((VM_PAGE_TO_PHYS(&m[0]) != 360 (VM_PAGE_TO_PHYS(&m[-1]) + PAGE_SIZE)) || 361 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE)) || 362 m->busy || m->wire_count || 363 m->hold_count || 364 (m->flags & (PG_BUSY | PG_NEED_COMMIT))) 365 { 366 start++; 367 goto again; 368 } 369 } 370 371 /* 372 * Try to allocate the pages, wiring them as we go. 373 * 374 * (still in critical section) 375 */ 376 for (i = start; i < (start + size / PAGE_SIZE); i++) { 377 m = &pga[i]; 378 379 if (vm_page_busy_try(m, TRUE)) { 380 vm_contig_pg_free(start, 381 (i - start) * PAGE_SIZE); 382 start++; 383 goto again; 384 } 385 pqtype = m->queue - m->pc; 386 if (pqtype == PQ_CACHE && 387 m->hold_count == 0 && 388 m->wire_count == 0 && 389 (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0) { 390 vm_page_protect(m, VM_PROT_NONE); 391 KKASSERT((m->flags & PG_MAPPED) == 0); 392 KKASSERT(m->dirty == 0); 393 vm_page_free(m); 394 --i; 395 continue; /* retry the page */ 396 } 397 if (pqtype != PQ_FREE || m->hold_count) { 398 vm_page_wakeup(m); 399 vm_contig_pg_free(start, 400 (i - start) * PAGE_SIZE); 401 start++; 402 goto again; 403 } 404 KKASSERT((m->valid & m->dirty) == 0); 405 KKASSERT(m->wire_count == 0); 406 KKASSERT(m->object == NULL); 407 vm_page_unqueue_nowakeup(m); 408 m->valid = VM_PAGE_BITS_ALL; 409 if (m->flags & PG_ZERO) 410 vm_page_zero_count--; 411 KASSERT(m->dirty == 0, 412 ("vm_contig_pg_alloc: page %p was dirty", m)); 413 KKASSERT(m->wire_count == 0); 414 KKASSERT(m->busy == 0); 415 416 /* 417 * Clear all flags except PG_BUSY, PG_ZERO, and 418 * PG_WANTED, then unbusy the now allocated page. 419 */ 420 vm_page_flag_clear(m, ~(PG_BUSY | PG_SBUSY | 421 PG_ZERO | PG_WANTED)); 422 vm_page_wire(m); 423 vm_page_wakeup(m); 424 } 425 426 /* 427 * Our job is done, return the index page of vm_page_array. 428 */ 429 return (start); /* aka &pga[start] */ 430 } 431 432 /* 433 * Failed. 434 */ 435 return (-1); 436 } 437 438 /* 439 * vm_contig_pg_free: 440 * 441 * Remove pages previously allocated by vm_contig_pg_alloc, and 442 * assume all references to the pages have been removed, and that 443 * it is OK to add them back to the free list. 444 * 445 * Caller must ensure no races on the page range in question. 446 * No other requirements. 447 */ 448 static void 449 vm_contig_pg_free(int start, u_long size) 450 { 451 vm_page_t pga = vm_page_array; 452 453 size = round_page(size); 454 if (size == 0) 455 panic("vm_contig_pg_free: size must not be 0"); 456 457 /* 458 * The pages are wired, vm_page_free_contig() determines whether they 459 * belong to the contig space or not and either frees them to that 460 * space (leaving them wired), or unwires the page and frees it to the 461 * normal PQ_FREE queue. 462 */ 463 vm_page_free_contig(&pga[start], size); 464 } 465 466 /* 467 * vm_contig_pg_kmap: 468 * 469 * Map previously allocated (vm_contig_pg_alloc) range of pages from 470 * vm_page_array[] into the KVA. Once mapped, the pages are part of 471 * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size). 472 * 473 * No requirements. 474 */ 475 static vm_offset_t 476 vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags) 477 { 478 vm_offset_t addr; 479 vm_paddr_t pa; 480 vm_page_t pga = vm_page_array; 481 u_long offset; 482 483 if (size == 0) 484 panic("vm_contig_pg_kmap: size must not be 0"); 485 size = round_page(size); 486 addr = kmem_alloc_pageable(&kernel_map, size); 487 if (addr) { 488 pa = VM_PAGE_TO_PHYS(&pga[start]); 489 for (offset = 0; offset < size; offset += PAGE_SIZE) 490 pmap_kenter_quick(addr + offset, pa + offset); 491 smp_invltlb(); 492 if (flags & M_ZERO) 493 bzero((void *)addr, size); 494 } 495 return(addr); 496 } 497 498 /* 499 * No requirements. 500 */ 501 void * 502 contigmalloc( 503 unsigned long size, /* should be size_t here and for malloc() */ 504 struct malloc_type *type, 505 int flags, 506 vm_paddr_t low, 507 vm_paddr_t high, 508 unsigned long alignment, 509 unsigned long boundary) 510 { 511 return contigmalloc_map(size, type, flags, low, high, alignment, 512 boundary, &kernel_map); 513 } 514 515 /* 516 * No requirements. 517 */ 518 void * 519 contigmalloc_map(unsigned long size, struct malloc_type *type, 520 int flags, vm_paddr_t low, vm_paddr_t high, 521 unsigned long alignment, unsigned long boundary, 522 vm_map_t map) 523 { 524 int index; 525 void *rv; 526 527 index = vm_contig_pg_alloc(size, low, high, alignment, boundary, flags); 528 if (index < 0) { 529 kprintf("contigmalloc_map: failed size %lu low=%llx " 530 "high=%llx align=%lu boundary=%lu flags=%08x\n", 531 size, (long long)low, (long long)high, 532 alignment, boundary, flags); 533 return NULL; 534 } 535 536 rv = (void *)vm_contig_pg_kmap(index, size, map, flags); 537 if (rv == NULL) 538 vm_contig_pg_free(index, size); 539 540 return rv; 541 } 542 543 /* 544 * No requirements. 545 */ 546 void 547 contigfree(void *addr, unsigned long size, struct malloc_type *type) 548 { 549 vm_paddr_t pa; 550 vm_page_t m; 551 552 if (size == 0) 553 panic("vm_contig_pg_kmap: size must not be 0"); 554 size = round_page(size); 555 556 pa = pmap_extract(&kernel_pmap, (vm_offset_t)addr); 557 pmap_qremove((vm_offset_t)addr, size / PAGE_SIZE); 558 kmem_free(&kernel_map, (vm_offset_t)addr, size); 559 560 m = PHYS_TO_VM_PAGE(pa); 561 vm_page_free_contig(m, size); 562 } 563 564 /* 565 * No requirements. 566 */ 567 vm_offset_t 568 kmem_alloc_contig(vm_offset_t size, vm_paddr_t low, vm_paddr_t high, 569 vm_offset_t alignment) 570 { 571 return ((vm_offset_t)contigmalloc_map(size, M_DEVBUF, M_NOWAIT, low, 572 high, alignment, 0ul, &kernel_map)); 573 } 574