1 /* 2 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Hiten Pandya <hmp@backplane.com>. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 */ 35 /* 36 * Copyright (c) 1991 Regents of the University of California. 37 * All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * The Mach Operating System project at Carnegie-Mellon University. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 67 */ 68 69 /* 70 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 71 * All rights reserved. 72 * 73 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 74 * 75 * Permission to use, copy, modify and distribute this software and 76 * its documentation is hereby granted, provided that both the copyright 77 * notice and this permission notice appear in all copies of the 78 * software, derivative works or modified versions, and any portions 79 * thereof, and that both notices appear in supporting documentation. 80 * 81 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 82 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 83 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 84 * 85 * Carnegie Mellon requests users of this software to return to 86 * 87 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 88 * School of Computer Science 89 * Carnegie Mellon University 90 * Pittsburgh PA 15213-3890 91 * 92 * any improvements or extensions that they make and grant Carnegie the 93 * rights to redistribute these changes. 94 */ 95 96 /* 97 * Contiguous memory allocation API. 98 */ 99 100 #include <sys/param.h> 101 #include <sys/systm.h> 102 #include <sys/malloc.h> 103 #include <sys/proc.h> 104 #include <sys/lock.h> 105 #include <sys/vmmeter.h> 106 #include <sys/vnode.h> 107 108 #include <vm/vm.h> 109 #include <vm/vm_param.h> 110 #include <vm/vm_kern.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_object.h> 114 #include <vm/vm_page.h> 115 #include <vm/vm_pageout.h> 116 #include <vm/vm_pager.h> 117 #include <vm/vm_extern.h> 118 119 #include <sys/spinlock2.h> 120 #include <vm/vm_page2.h> 121 122 #include <machine/bus_dma.h> 123 124 static void vm_contig_pg_free(vm_pindex_t start, u_long size); 125 126 /* 127 * vm_contig_pg_clean: 128 * 129 * Do a thorough cleanup of the specified 'queue', which can be either 130 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough. If the page is not 131 * marked dirty, it is shoved into the page cache, provided no one has 132 * currently aqcuired it, otherwise localized action per object type 133 * is taken for cleanup: 134 * 135 * In the OBJT_VNODE case, the whole page range is cleaned up 136 * using the vm_object_page_clean() routine, by specyfing a 137 * start and end of '0'. 138 * 139 * Otherwise if the object is of any other type, the generic 140 * pageout (daemon) flush routine is invoked. 141 */ 142 static void 143 vm_contig_pg_clean(int queue, vm_pindex_t count) 144 { 145 vm_object_t object; 146 vm_page_t m, m_tmp; 147 struct vm_page marker; 148 struct vpgqueues *pq = &vm_page_queues[queue]; 149 150 /* 151 * Setup a local marker 152 */ 153 bzero(&marker, sizeof(marker)); 154 marker.flags = PG_FICTITIOUS | PG_MARKER; 155 marker.busy_count = PBUSY_LOCKED; 156 marker.queue = queue; 157 marker.wire_count = 1; 158 159 vm_page_queues_spin_lock(queue); 160 TAILQ_INSERT_HEAD(&pq->pl, &marker, pageq); 161 vm_page_queues_spin_unlock(queue); 162 163 /* 164 * Iterate the queue. Note that the vm_page spinlock must be 165 * acquired before the pageq spinlock so it's easiest to simply 166 * not hold it in the loop iteration. 167 */ 168 while ((long)count-- > 0 && 169 (m = TAILQ_NEXT(&marker, pageq)) != NULL) { 170 vm_page_and_queue_spin_lock(m); 171 if (m != TAILQ_NEXT(&marker, pageq)) { 172 vm_page_and_queue_spin_unlock(m); 173 ++count; 174 continue; 175 } 176 KKASSERT(m->queue == queue); 177 178 TAILQ_REMOVE(&pq->pl, &marker, pageq); 179 TAILQ_INSERT_AFTER(&pq->pl, m, &marker, pageq); 180 181 if (m->flags & PG_MARKER) { 182 vm_page_and_queue_spin_unlock(m); 183 continue; 184 } 185 if (vm_page_busy_try(m, TRUE)) { 186 vm_page_and_queue_spin_unlock(m); 187 continue; 188 } 189 vm_page_and_queue_spin_unlock(m); 190 191 /* 192 * We've successfully busied the page 193 */ 194 if (m->queue - m->pc != queue) { 195 vm_page_wakeup(m); 196 continue; 197 } 198 if (m->wire_count || m->hold_count) { 199 vm_page_wakeup(m); 200 continue; 201 } 202 if ((object = m->object) == NULL) { 203 vm_page_wakeup(m); 204 continue; 205 } 206 vm_page_test_dirty(m); 207 if (m->dirty || (m->flags & PG_NEED_COMMIT)) { 208 vm_object_hold(object); 209 KKASSERT(m->object == object); 210 211 if (object->type == OBJT_VNODE) { 212 vm_page_wakeup(m); 213 vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY); 214 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 215 vn_unlock(((struct vnode *)object->handle)); 216 } else if (object->type == OBJT_SWAP || 217 object->type == OBJT_DEFAULT) { 218 m_tmp = m; 219 vm_pageout_flush(&m_tmp, 1, 0); 220 } else { 221 vm_page_wakeup(m); 222 } 223 vm_object_drop(object); 224 } else if (m->hold_count == 0) { 225 vm_page_cache(m); 226 } else { 227 vm_page_wakeup(m); 228 } 229 } 230 231 /* 232 * Scrap our local marker 233 */ 234 vm_page_queues_spin_lock(queue); 235 TAILQ_REMOVE(&pq->pl, &marker, pageq); 236 vm_page_queues_spin_unlock(queue); 237 } 238 239 /* 240 * vm_contig_pg_alloc: 241 * 242 * Allocate contiguous pages from the VM. This function does not 243 * map the allocated pages into the kernel map, otherwise it is 244 * impossible to make large allocations (i.e. >2G). 245 * 246 * Malloc()'s data structures have been used for collection of 247 * statistics and for allocations of less than a page. 248 */ 249 static vm_pindex_t 250 vm_contig_pg_alloc(unsigned long size, vm_paddr_t low, vm_paddr_t high, 251 unsigned long alignment, unsigned long boundary, int mflags) 252 { 253 vm_pindex_t i, q, start; 254 vm_offset_t phys; 255 vm_page_t pga = vm_page_array; 256 vm_page_t m; 257 int pass; 258 int pqtype; 259 260 size = round_page(size); 261 if (size == 0) 262 panic("vm_contig_pg_alloc: size must not be 0"); 263 if ((alignment & (alignment - 1)) != 0) 264 panic("vm_contig_pg_alloc: alignment must be a power of 2"); 265 if ((boundary & (boundary - 1)) != 0) 266 panic("vm_contig_pg_alloc: boundary must be a power of 2"); 267 268 /* 269 * See if we can get the pages from the contiguous page reserve 270 * alist. The returned pages will be allocated and wired but not 271 * busied. 272 * 273 * If high is not set to BUS_SPACE_MAXADDR we try using our 274 * free memory reserve first, otherwise we try it last. 275 * 276 * XXX Always use the dma reserve first for performance, until 277 * we find a better way to differentiate the DRM API. 278 */ 279 #if 0 280 if (high != BUS_SPACE_MAXADDR) 281 #endif 282 { 283 m = vm_page_alloc_contig( 284 low, high, alignment, boundary, 285 size, VM_MEMATTR_DEFAULT); 286 if (m) 287 return (m - &pga[0]); 288 } 289 290 /* 291 * Three passes (0, 1, 2). Each pass scans the VM page list for 292 * free or cached pages. After each pass if the entire scan failed 293 * we attempt to flush inactive pages and reset the start index back 294 * to 0. For passes 1 and 2 we also attempt to flush active pages. 295 */ 296 start = 0; 297 for (pass = 0; pass < 3; pass++) { 298 /* 299 * Find first page in array that is free, within range, 300 * aligned, and such that the boundary won't be crossed. 301 */ 302 again: 303 for (i = start; i < vmstats.v_page_count; i++) { 304 m = &pga[i]; 305 phys = VM_PAGE_TO_PHYS(m); 306 pqtype = m->queue - m->pc; 307 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && 308 (phys >= low) && (phys < high) && 309 ((phys & (alignment - 1)) == 0) && 310 ((rounddown2(phys ^ (phys + size - 1), boundary)) == 0) && 311 m->wire_count == 0 && m->hold_count == 0 && 312 (m->busy_count & 313 (PBUSY_LOCKED | PBUSY_MASK)) == 0 && 314 (m->flags & PG_NEED_COMMIT) == 0) 315 { 316 break; 317 } 318 } 319 320 /* 321 * If we cannot find the page in the given range, or we have 322 * crossed the boundary, call the vm_contig_pg_clean() function 323 * for flushing out the queues, and returning it back to 324 * normal state. 325 */ 326 if ((i == vmstats.v_page_count) || 327 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 328 329 /* 330 * Best effort flush of all inactive pages. 331 * This is quite quick, for now stall all 332 * callers, even if they've specified M_NOWAIT. 333 */ 334 for (q = 0; q < PQ_L2_SIZE; ++q) { 335 vm_contig_pg_clean(PQ_INACTIVE + q, 336 vmstats.v_inactive_count); 337 lwkt_yield(); 338 } 339 340 /* 341 * Best effort flush of active pages. 342 * 343 * This is very, very slow. 344 * Only do this if the caller has agreed to M_WAITOK. 345 * 346 * If enough pages are flushed, we may succeed on 347 * next (final) pass, if not the caller, contigmalloc(), 348 * will fail in the index < 0 case. 349 */ 350 if (pass > 0 && (mflags & M_WAITOK)) { 351 for (q = 0; q < PQ_L2_SIZE; ++q) { 352 vm_contig_pg_clean(PQ_ACTIVE + q, 353 vmstats.v_active_count); 354 } 355 lwkt_yield(); 356 } 357 358 /* 359 * We're already too high in the address space 360 * to succeed, reset to 0 for the next iteration. 361 */ 362 start = 0; 363 continue; /* next pass */ 364 } 365 start = i; 366 367 /* 368 * Check successive pages for contiguous and free. 369 * 370 * (still in critical section) 371 */ 372 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 373 m = &pga[i]; 374 pqtype = m->queue - m->pc; 375 if ((VM_PAGE_TO_PHYS(&m[0]) != 376 (VM_PAGE_TO_PHYS(&m[-1]) + PAGE_SIZE)) || 377 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE)) || 378 m->wire_count || 379 m->hold_count || 380 (m->busy_count & (PBUSY_LOCKED | PBUSY_MASK)) || 381 (m->flags & PG_NEED_COMMIT)) 382 { 383 start++; 384 goto again; 385 } 386 } 387 388 /* 389 * Try to allocate the pages, wiring them as we go. 390 * 391 * (still in critical section) 392 */ 393 for (i = start; i < (start + size / PAGE_SIZE); i++) { 394 m = &pga[i]; 395 396 if (vm_page_busy_try(m, TRUE)) { 397 vm_contig_pg_free(start, 398 (i - start) * PAGE_SIZE); 399 start++; 400 goto again; 401 } 402 pqtype = m->queue - m->pc; 403 if (pqtype == PQ_CACHE && 404 m->hold_count == 0 && 405 m->wire_count == 0 && 406 (m->flags & PG_NEED_COMMIT) == 0) { 407 vm_page_protect(m, VM_PROT_NONE); 408 KKASSERT((m->flags & 409 (PG_MAPPED | PG_UNQUEUED)) == 0); 410 KKASSERT(m->dirty == 0); 411 vm_page_free(m); 412 --i; 413 continue; /* retry the page */ 414 } 415 if (pqtype != PQ_FREE || m->hold_count) { 416 vm_page_wakeup(m); 417 vm_contig_pg_free(start, 418 (i - start) * PAGE_SIZE); 419 start++; 420 goto again; 421 } 422 KKASSERT((m->valid & m->dirty) == 0); 423 KKASSERT(m->wire_count == 0); 424 KKASSERT(m->object == NULL); 425 vm_page_unqueue_nowakeup(m); 426 m->valid = VM_PAGE_BITS_ALL; 427 KASSERT(m->dirty == 0, 428 ("vm_contig_pg_alloc: page %p was dirty", m)); 429 KKASSERT(m->wire_count == 0); 430 KKASSERT((m->busy_count & PBUSY_MASK) == 0); 431 432 /* 433 * Clear all flags, set FICTITIOUS and UNQUEUED to 434 * indicate the the pages are special, then unbusy 435 * the now allocated page. 436 * 437 * XXX setting FICTITIOUS and UNQUEUED in the future. 438 * (also pair up with vm_contig_pg_free) 439 */ 440 vm_page_flag_clear(m, ~PG_KEEP_NEWPAGE_MASK); 441 /* vm_page_flag_set(m, PG_FICTITIOUS | PG_UNQUEUED);*/ 442 vm_page_wire(m); 443 vm_page_wakeup(m); 444 } 445 446 /* 447 * Our job is done, return the index page of vm_page_array. 448 */ 449 return (start); /* aka &pga[start] */ 450 } 451 452 #if 0 453 /* 454 * Failed, if we haven't already tried, allocate from our reserved 455 * dma memory. 456 * 457 * XXX (see conditionalized code above) 458 */ 459 if (high == BUS_SPACE_MAXADDR) { 460 m = vm_page_alloc_contig( 461 low, high, alignment, boundary, 462 size, VM_MEMATTR_DEFAULT); 463 if (m) 464 return (m - &pga[0]); 465 } 466 #endif 467 468 /* 469 * Failed. 470 */ 471 return ((vm_pindex_t)-1); 472 } 473 474 /* 475 * vm_contig_pg_free: 476 * 477 * Remove pages previously allocated by vm_contig_pg_alloc, and 478 * assume all references to the pages have been removed, and that 479 * it is OK to add them back to the free list. 480 * 481 * Caller must ensure no races on the page range in question. 482 * No other requirements. 483 */ 484 static void 485 vm_contig_pg_free(vm_pindex_t start, u_long size) 486 { 487 vm_page_t pga = vm_page_array; 488 489 size = round_page(size); 490 if (size == 0) 491 panic("vm_contig_pg_free: size must not be 0"); 492 493 /* 494 * The pages are wired, vm_page_free_contig() determines whether they 495 * belong to the contig space or not and either frees them to that 496 * space (leaving them wired), or unwires the page and frees it to the 497 * normal PQ_FREE queue. 498 */ 499 vm_page_free_contig(&pga[start], size); 500 } 501 502 /* 503 * vm_contig_pg_kmap: 504 * 505 * Map previously allocated (vm_contig_pg_alloc) range of pages from 506 * vm_page_array[] into the KVA. Once mapped, the pages are part of 507 * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size). 508 * 509 * No requirements. 510 */ 511 static vm_offset_t 512 vm_contig_pg_kmap(vm_pindex_t start, u_long size, vm_map_t map, int flags) 513 { 514 vm_offset_t addr; 515 vm_paddr_t pa; 516 vm_page_t pga = vm_page_array; 517 u_long offset; 518 519 if (size == 0) 520 panic("vm_contig_pg_kmap: size must not be 0"); 521 size = round_page(size); 522 addr = kmem_alloc_pageable(&kernel_map, size, VM_SUBSYS_CONTIG); 523 if (addr) { 524 pa = VM_PAGE_TO_PHYS(&pga[start]); 525 for (offset = 0; offset < size; offset += PAGE_SIZE) 526 pmap_kenter_noinval(addr + offset, pa + offset); 527 pmap_invalidate_range(&kernel_pmap, addr, addr + size); 528 if (flags & M_ZERO) 529 bzero((void *)addr, size); 530 } 531 return(addr); 532 } 533 534 /* 535 * No requirements. 536 */ 537 void * 538 contigmalloc( 539 unsigned long size, /* should be size_t here and for malloc() */ 540 struct malloc_type *type, 541 int flags, 542 vm_paddr_t low, 543 vm_paddr_t high, 544 unsigned long alignment, 545 unsigned long boundary) 546 { 547 return contigmalloc_map(size, type, flags, low, high, alignment, 548 boundary, &kernel_map); 549 } 550 551 /* 552 * No requirements. 553 */ 554 void * 555 contigmalloc_map(unsigned long size, struct malloc_type *type, 556 int flags, vm_paddr_t low, vm_paddr_t high, 557 unsigned long alignment, unsigned long boundary, 558 vm_map_t map) 559 { 560 vm_pindex_t index; 561 void *rv; 562 563 index = vm_contig_pg_alloc(size, low, high, alignment, boundary, flags); 564 if (index == (vm_pindex_t)-1) { 565 kprintf("contigmalloc_map: failed size %lu low=%llx " 566 "high=%llx align=%lu boundary=%lu flags=%08x\n", 567 size, (long long)low, (long long)high, 568 alignment, boundary, flags); 569 return NULL; 570 } 571 572 rv = (void *)vm_contig_pg_kmap(index, size, map, flags); 573 if (rv == NULL) 574 vm_contig_pg_free(index, size); 575 576 return rv; 577 } 578 579 /* 580 * No requirements. 581 */ 582 void 583 contigfree(void *addr, unsigned long size, struct malloc_type *type) 584 { 585 vm_paddr_t pa; 586 vm_page_t m; 587 588 if (size == 0) 589 panic("vm_contig_pg_kmap: size must not be 0"); 590 size = round_page(size); 591 592 pa = pmap_kextract((vm_offset_t)addr); 593 pmap_qremove((vm_offset_t)addr, size / PAGE_SIZE); 594 kmem_free(&kernel_map, (vm_offset_t)addr, size); 595 596 m = PHYS_TO_VM_PAGE(pa); 597 vm_page_free_contig(m, size); 598 } 599 600 /* 601 * No requirements. 602 */ 603 vm_offset_t 604 kmem_alloc_contig(vm_offset_t size, vm_paddr_t low, vm_paddr_t high, 605 vm_offset_t alignment) 606 { 607 return ((vm_offset_t)contigmalloc_map(size, M_DEVBUF, M_NOWAIT, low, 608 high, alignment, 0ul, &kernel_map)); 609 } 610