1 /* $OpenBSD: uvm_km.c,v 1.155 2024/11/01 20:26:18 mpi Exp $ */
2 /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
38 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
39 *
40 *
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 */
64
65 /*
66 * uvm_km.c: handle kernel memory allocation and management
67 */
68
69 /*
70 * overview of kernel memory management:
71 *
72 * the kernel virtual address space is mapped by "kernel_map." kernel_map
73 * starts at a machine-dependent address and is VM_KERNEL_SPACE_SIZE bytes
74 * large.
75 *
76 * the kernel_map has several "submaps." submaps can only appear in
77 * the kernel_map (user processes can't use them). submaps "take over"
78 * the management of a sub-range of the kernel's address space. submaps
79 * are typically allocated at boot time and are never released. kernel
80 * virtual address space that is mapped by a submap is locked by the
81 * submap's lock -- not the kernel_map's lock.
82 *
83 * thus, the useful feature of submaps is that they allow us to break
84 * up the locking and protection of the kernel address space into smaller
85 * chunks.
86 *
87 * The VM system has several standard kernel submaps:
88 * kmem_map: Contains only wired kernel memory for malloc(9).
89 * Note: All access to this map must be protected by splvm as
90 * calls to malloc(9) are allowed in interrupt handlers.
91 * exec_map: Memory to hold arguments to system calls are allocated from
92 * this map.
93 * XXX: This is primeraly used to artificially limit the number
94 * of concurrent processes doing an exec.
95 * phys_map: Buffers for vmapbuf (physio) are allocated from this map.
96 *
97 * the kernel allocates its private memory out of special uvm_objects whose
98 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
99 * are "special" and never die). all kernel objects should be thought of
100 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
101 * object is equal to the size of kernel virtual address space (i.e.
102 * VM_KERNEL_SPACE_SIZE).
103 *
104 * most kernel private memory lives in kernel_object. the only exception
105 * to this is for memory that belongs to submaps that must be protected
106 * by splvm(). each of these submaps manages their own pages.
107 *
108 * note that just because a kernel object spans the entire kernel virtual
109 * address space doesn't mean that it has to be mapped into the entire space.
110 * large chunks of a kernel object's space go unused either because
111 * that area of kernel VM is unmapped, or there is some other type of
112 * object mapped into that range (e.g. a vnode). for submap's kernel
113 * objects, the only part of the object that can ever be populated is the
114 * offsets that are managed by the submap.
115 *
116 * note that the "offset" in a kernel object is always the kernel virtual
117 * address minus the vm_map_min(kernel_map).
118 * example:
119 * suppose kernel_map starts at 0xf8000000 and the kernel does a
120 * km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok)) [allocate 1 wired
121 * down page in the kernel map]. if km_alloc() returns virtual address
122 * 0xf8235000, then that means that the page at offset 0x235000 in
123 * kernel_object is mapped at 0xf8235000.
124 *
125 * kernel objects have one other special property: when the kernel virtual
126 * memory mapping them is unmapped, the backing memory in the object is
127 * freed right away. this is done with the uvm_km_pgremove() function.
128 * this has to be done because there is no backing store for kernel pages
129 * and no need to save them after they are no longer referenced.
130 */
131
132 #include <sys/param.h>
133 #include <sys/systm.h>
134 #include <sys/proc.h>
135 #include <sys/kthread.h>
136 #include <uvm/uvm.h>
137
138 /*
139 * global data structures
140 */
141
142 struct vm_map *kernel_map = NULL;
143
144 /* Unconstraint range. */
145 struct uvm_constraint_range no_constraint = { 0x0, (paddr_t)-1 };
146
147 /*
148 * local data structures
149 */
150 static struct vm_map kernel_map_store;
151
152 /*
153 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
154 * KVM already allocated for text, data, bss, and static data structures).
155 *
156 * => KVM is defined by [base.. base + VM_KERNEL_SPACE_SIZE].
157 * we assume that [base -> start] has already been allocated and that
158 * "end" is the end of the kernel image span.
159 */
160 void
uvm_km_init(vaddr_t base,vaddr_t start,vaddr_t end)161 uvm_km_init(vaddr_t base, vaddr_t start, vaddr_t end)
162 {
163 /* kernel_object: for pageable anonymous kernel memory */
164 uao_init();
165 uvm.kernel_object = uao_create(VM_KERNEL_SPACE_SIZE, UAO_FLAG_KERNOBJ);
166
167 /*
168 * init the map and reserve already allocated kernel space
169 * before installing.
170 */
171
172 uvm_map_setup(&kernel_map_store, pmap_kernel(), base, end,
173 #ifdef KVA_GUARDPAGES
174 VM_MAP_PAGEABLE | VM_MAP_GUARDPAGES
175 #else
176 VM_MAP_PAGEABLE
177 #endif
178 );
179 if (base != start && uvm_map(&kernel_map_store, &base, start - base,
180 NULL, UVM_UNKNOWN_OFFSET, 0,
181 UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
182 MAP_INHERIT_NONE, MADV_RANDOM, UVM_FLAG_FIXED)) != 0)
183 panic("uvm_km_init: could not reserve space for kernel");
184
185 kernel_map = &kernel_map_store;
186
187 #ifndef __HAVE_PMAP_DIRECT
188 /* allow km_alloc calls before uvm_km_thread starts */
189 mtx_init(&uvm_km_pages.mtx, IPL_VM);
190 #endif
191 }
192
193 /*
194 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
195 * is allocated all references to that area of VM must go through it. this
196 * allows the locking of VAs in kernel_map to be broken up into regions.
197 *
198 * => if `fixed' is true, *min specifies where the region described
199 * by the submap must start
200 * => if submap is non NULL we use that as the submap, otherwise we
201 * alloc a new map
202 */
203 struct vm_map *
uvm_km_suballoc(struct vm_map * map,vaddr_t * min,vaddr_t * max,vsize_t size,int flags,boolean_t fixed,struct vm_map * submap)204 uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
205 int flags, boolean_t fixed, struct vm_map *submap)
206 {
207 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
208
209 size = round_page(size); /* round up to pagesize */
210
211 /* first allocate a blank spot in the parent map */
212 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
213 UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
214 MAP_INHERIT_NONE, MADV_RANDOM, mapflags)) != 0) {
215 panic("uvm_km_suballoc: unable to allocate space in parent map");
216 }
217
218 /* set VM bounds (min is filled in by uvm_map) */
219 *max = *min + size;
220
221 /* add references to pmap and create or init the submap */
222 pmap_reference(vm_map_pmap(map));
223 if (submap == NULL) {
224 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
225 if (submap == NULL)
226 panic("uvm_km_suballoc: unable to create submap");
227 } else {
228 uvm_map_setup(submap, vm_map_pmap(map), *min, *max, flags);
229 }
230
231 /*
232 * now let uvm_map_submap plug in it...
233 */
234 if (uvm_map_submap(map, *min, *max, submap) != 0)
235 panic("uvm_km_suballoc: submap allocation failed");
236
237 return(submap);
238 }
239
240 /*
241 * uvm_km_pgremove: remove pages from a kernel uvm_object.
242 *
243 * => when you unmap a part of anonymous kernel memory you want to toss
244 * the pages right away. (this gets called from uvm_unmap_...).
245 */
246 void
uvm_km_pgremove(struct uvm_object * uobj,vaddr_t startva,vaddr_t endva)247 uvm_km_pgremove(struct uvm_object *uobj, vaddr_t startva, vaddr_t endva)
248 {
249 const voff_t start = startva - vm_map_min(kernel_map);
250 const voff_t end = endva - vm_map_min(kernel_map);
251 struct vm_page *pp;
252 voff_t curoff;
253 int slot;
254 int swpgonlydelta = 0;
255
256 KASSERT(UVM_OBJ_IS_AOBJ(uobj));
257 KASSERT(rw_write_held(uobj->vmobjlock));
258
259 pmap_remove(pmap_kernel(), startva, endva);
260 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
261 pp = uvm_pagelookup(uobj, curoff);
262 if (pp && pp->pg_flags & PG_BUSY) {
263 uvm_pagewait(pp, uobj->vmobjlock, "km_pgrm");
264 rw_enter(uobj->vmobjlock, RW_WRITE);
265 curoff -= PAGE_SIZE; /* loop back to us */
266 continue;
267 }
268
269 /* free the swap slot, then the page */
270 slot = uao_dropswap(uobj, curoff >> PAGE_SHIFT);
271
272 if (pp != NULL) {
273 uvm_lock_pageq();
274 uvm_pagefree(pp);
275 uvm_unlock_pageq();
276 } else if (slot != 0) {
277 swpgonlydelta++;
278 }
279 }
280
281 if (swpgonlydelta > 0) {
282 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
283 atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
284 }
285 }
286
287
288 /*
289 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
290 * objects
291 *
292 * => when you unmap a part of anonymous kernel memory you want to toss
293 * the pages right away. (this gets called from uvm_unmap_...).
294 * => none of the pages will ever be busy, and none of them will ever
295 * be on the active or inactive queues (because these objects are
296 * never allowed to "page").
297 */
298 void
uvm_km_pgremove_intrsafe(vaddr_t start,vaddr_t end)299 uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
300 {
301 struct vm_page *pg;
302 vaddr_t va;
303 paddr_t pa;
304
305 for (va = start; va < end; va += PAGE_SIZE) {
306 if (!pmap_extract(pmap_kernel(), va, &pa))
307 continue;
308 pg = PHYS_TO_VM_PAGE(pa);
309 if (pg == NULL)
310 panic("uvm_km_pgremove_intrsafe: no page");
311 uvm_pagefree(pg);
312 }
313 pmap_kremove(start, end - start);
314 }
315
316 /*
317 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
318 *
319 * => we map wired memory into the specified map using the obj passed in
320 * => NOTE: we can return NULL even if we can wait if there is not enough
321 * free VM space in the map... caller should be prepared to handle
322 * this case.
323 * => we return KVA of memory allocated
324 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
325 * lock the map
326 * => low, high, alignment, boundary, nsegs are the corresponding parameters
327 * to uvm_pglistalloc
328 * => flags: ZERO - correspond to uvm_pglistalloc flags
329 */
330 vaddr_t
uvm_km_kmemalloc_pla(struct vm_map * map,struct uvm_object * obj,vsize_t size,vsize_t valign,int flags,paddr_t low,paddr_t high,paddr_t alignment,paddr_t boundary,int nsegs)331 uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
332 vsize_t valign, int flags, paddr_t low, paddr_t high, paddr_t alignment,
333 paddr_t boundary, int nsegs)
334 {
335 vaddr_t kva, loopva;
336 voff_t offset;
337 struct vm_page *pg;
338 struct pglist pgl;
339 int pla_flags;
340
341 KASSERT(vm_map_pmap(map) == pmap_kernel());
342 /* UVM_KMF_VALLOC => !UVM_KMF_ZERO */
343 KASSERT(!(flags & UVM_KMF_VALLOC) ||
344 !(flags & UVM_KMF_ZERO));
345
346 /* setup for call */
347 size = round_page(size);
348 kva = vm_map_min(map); /* hint */
349 if (nsegs == 0)
350 nsegs = atop(size);
351
352 /* allocate some virtual space */
353 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
354 valign, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
355 MAP_INHERIT_NONE, MADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) {
356 return 0;
357 }
358
359 /* if all we wanted was VA, return now */
360 if (flags & UVM_KMF_VALLOC) {
361 return kva;
362 }
363
364 /* recover object offset from virtual address */
365 if (obj != NULL)
366 offset = kva - vm_map_min(kernel_map);
367 else
368 offset = 0;
369
370 /*
371 * now allocate and map in the memory... note that we are the only ones
372 * whom should ever get a handle on this area of VM.
373 */
374 TAILQ_INIT(&pgl);
375 pla_flags = 0;
376 KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
377 if ((flags & UVM_KMF_NOWAIT) ||
378 ((flags & UVM_KMF_CANFAIL) &&
379 uvmexp.swpages - uvmexp.swpgonly <= atop(size)))
380 pla_flags |= UVM_PLA_NOWAIT;
381 else
382 pla_flags |= UVM_PLA_WAITOK;
383 if (flags & UVM_KMF_ZERO)
384 pla_flags |= UVM_PLA_ZERO;
385 if (uvm_pglistalloc(size, low, high, alignment, boundary, &pgl, nsegs,
386 pla_flags) != 0) {
387 /* Failed. */
388 uvm_unmap(map, kva, kva + size);
389 return (0);
390 }
391
392 if (obj != NULL)
393 rw_enter(obj->vmobjlock, RW_WRITE);
394
395 loopva = kva;
396 while (loopva != kva + size) {
397 pg = TAILQ_FIRST(&pgl);
398 TAILQ_REMOVE(&pgl, pg, pageq);
399 uvm_pagealloc_pg(pg, obj, offset, NULL);
400 atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
401 UVM_PAGE_OWN(pg, NULL);
402
403 /*
404 * map it in: note that we call pmap_enter with the map and
405 * object unlocked in case we are kmem_map.
406 */
407 if (obj == NULL) {
408 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
409 PROT_READ | PROT_WRITE);
410 } else {
411 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
412 PROT_READ | PROT_WRITE,
413 PROT_READ | PROT_WRITE | PMAP_WIRED);
414 }
415 loopva += PAGE_SIZE;
416 offset += PAGE_SIZE;
417 }
418 KASSERT(TAILQ_EMPTY(&pgl));
419 pmap_update(pmap_kernel());
420
421 if (obj != NULL)
422 rw_exit(obj->vmobjlock);
423
424 return kva;
425 }
426
427 /*
428 * uvm_km_free: free an area of kernel memory
429 */
430 void
uvm_km_free(struct vm_map * map,vaddr_t addr,vsize_t size)431 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size)
432 {
433 uvm_unmap(map, trunc_page(addr), round_page(addr+size));
434 }
435
436 #if defined(__HAVE_PMAP_DIRECT)
437 /*
438 * uvm_km_page allocator, __HAVE_PMAP_DIRECT arch
439 * On architectures with machine memory direct mapped into a portion
440 * of KVM, we have very little work to do. Just get a physical page,
441 * and find and return its VA.
442 */
443 void
uvm_km_page_init(void)444 uvm_km_page_init(void)
445 {
446 /* nothing */
447 }
448
449 void
uvm_km_page_lateinit(void)450 uvm_km_page_lateinit(void)
451 {
452 /* nothing */
453 }
454
455 #else
456 /*
457 * uvm_km_page allocator, non __HAVE_PMAP_DIRECT archs
458 * This is a special allocator that uses a reserve of free pages
459 * to fulfill requests. It is fast and interrupt safe, but can only
460 * return page sized regions. Its primary use is as a backend for pool.
461 *
462 * The memory returned is allocated from the larger kernel_map, sparing
463 * pressure on the small interrupt-safe kmem_map. It is wired, but
464 * not zero filled.
465 */
466
467 struct uvm_km_pages uvm_km_pages;
468
469 void uvm_km_createthread(void *);
470 void uvm_km_thread(void *);
471 struct uvm_km_free_page *uvm_km_doputpage(struct uvm_km_free_page *);
472
473 /*
474 * Allocate the initial reserve, and create the thread which will
475 * keep the reserve full. For bootstrapping, we allocate more than
476 * the lowat amount, because it may be a while before the thread is
477 * running.
478 */
479 void
uvm_km_page_init(void)480 uvm_km_page_init(void)
481 {
482 int lowat_min;
483 int i;
484 int len, bulk;
485 vaddr_t addr;
486
487 if (!uvm_km_pages.lowat) {
488 /* based on physmem, calculate a good value here */
489 uvm_km_pages.lowat = physmem / 256;
490 lowat_min = physmem < atop(16 * 1024 * 1024) ? 32 : 128;
491 if (uvm_km_pages.lowat < lowat_min)
492 uvm_km_pages.lowat = lowat_min;
493 }
494 if (uvm_km_pages.lowat > UVM_KM_PAGES_LOWAT_MAX)
495 uvm_km_pages.lowat = UVM_KM_PAGES_LOWAT_MAX;
496 uvm_km_pages.hiwat = 4 * uvm_km_pages.lowat;
497 if (uvm_km_pages.hiwat > UVM_KM_PAGES_HIWAT_MAX)
498 uvm_km_pages.hiwat = UVM_KM_PAGES_HIWAT_MAX;
499
500 /* Allocate all pages in as few allocations as possible. */
501 len = 0;
502 bulk = uvm_km_pages.hiwat;
503 while (len < uvm_km_pages.hiwat && bulk > 0) {
504 bulk = MIN(bulk, uvm_km_pages.hiwat - len);
505 addr = vm_map_min(kernel_map);
506 if (uvm_map(kernel_map, &addr, (vsize_t)bulk << PAGE_SHIFT,
507 NULL, UVM_UNKNOWN_OFFSET, 0,
508 UVM_MAPFLAG(PROT_READ | PROT_WRITE,
509 PROT_READ | PROT_WRITE, MAP_INHERIT_NONE,
510 MADV_RANDOM, UVM_KMF_TRYLOCK)) != 0) {
511 bulk /= 2;
512 continue;
513 }
514
515 for (i = len; i < len + bulk; i++, addr += PAGE_SIZE)
516 uvm_km_pages.page[i] = addr;
517 len += bulk;
518 }
519
520 uvm_km_pages.free = len;
521 for (i = len; i < UVM_KM_PAGES_HIWAT_MAX; i++)
522 uvm_km_pages.page[i] = 0;
523
524 /* tone down if really high */
525 if (uvm_km_pages.lowat > 512)
526 uvm_km_pages.lowat = 512;
527 }
528
529 void
uvm_km_page_lateinit(void)530 uvm_km_page_lateinit(void)
531 {
532 kthread_create_deferred(uvm_km_createthread, NULL);
533 }
534
535 void
uvm_km_createthread(void * arg)536 uvm_km_createthread(void *arg)
537 {
538 kthread_create(uvm_km_thread, NULL, &uvm_km_pages.km_proc, "kmthread");
539 }
540
541 /*
542 * Endless loop. We grab pages in increments of 16 pages, then
543 * quickly swap them into the list.
544 */
545 void
uvm_km_thread(void * arg)546 uvm_km_thread(void *arg)
547 {
548 vaddr_t pg[16];
549 int i;
550 int allocmore = 0;
551 int flags;
552 struct uvm_km_free_page *fp = NULL;
553
554 KERNEL_UNLOCK();
555
556 for (;;) {
557 mtx_enter(&uvm_km_pages.mtx);
558 if (uvm_km_pages.free >= uvm_km_pages.lowat &&
559 uvm_km_pages.freelist == NULL) {
560 msleep_nsec(&uvm_km_pages.km_proc, &uvm_km_pages.mtx,
561 PVM, "kmalloc", INFSLP);
562 }
563 allocmore = uvm_km_pages.free < uvm_km_pages.lowat;
564 fp = uvm_km_pages.freelist;
565 uvm_km_pages.freelist = NULL;
566 uvm_km_pages.freelistlen = 0;
567 mtx_leave(&uvm_km_pages.mtx);
568
569 if (allocmore) {
570 /*
571 * If there was nothing on the freelist, then we
572 * must obtain at least one page to make progress.
573 * So, only use UVM_KMF_TRYLOCK for the first page
574 * if fp != NULL
575 */
576 flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE,
577 PROT_READ | PROT_WRITE, MAP_INHERIT_NONE,
578 MADV_RANDOM, fp != NULL ? UVM_KMF_TRYLOCK : 0);
579 memset(pg, 0, sizeof(pg));
580 for (i = 0; i < nitems(pg); i++) {
581 pg[i] = vm_map_min(kernel_map);
582 if (uvm_map(kernel_map, &pg[i], PAGE_SIZE,
583 NULL, UVM_UNKNOWN_OFFSET, 0, flags) != 0) {
584 pg[i] = 0;
585 break;
586 }
587
588 /* made progress, so don't sleep for more */
589 flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE,
590 PROT_READ | PROT_WRITE, MAP_INHERIT_NONE,
591 MADV_RANDOM, UVM_KMF_TRYLOCK);
592 }
593
594 mtx_enter(&uvm_km_pages.mtx);
595 for (i = 0; i < nitems(pg); i++) {
596 if (uvm_km_pages.free ==
597 nitems(uvm_km_pages.page))
598 break;
599 else if (pg[i] != 0)
600 uvm_km_pages.page[uvm_km_pages.free++]
601 = pg[i];
602 }
603 wakeup(&uvm_km_pages.free);
604 mtx_leave(&uvm_km_pages.mtx);
605
606 /* Cleanup left-over pages (if any). */
607 for (; i < nitems(pg); i++) {
608 if (pg[i] != 0) {
609 uvm_unmap(kernel_map,
610 pg[i], pg[i] + PAGE_SIZE);
611 }
612 }
613 }
614 while (fp) {
615 fp = uvm_km_doputpage(fp);
616 }
617 }
618 }
619
620 struct uvm_km_free_page *
uvm_km_doputpage(struct uvm_km_free_page * fp)621 uvm_km_doputpage(struct uvm_km_free_page *fp)
622 {
623 vaddr_t va = (vaddr_t)fp;
624 struct vm_page *pg;
625 int freeva = 1;
626 struct uvm_km_free_page *nextfp = fp->next;
627
628 pg = uvm_atopg(va);
629
630 pmap_kremove(va, PAGE_SIZE);
631 pmap_update(kernel_map->pmap);
632
633 mtx_enter(&uvm_km_pages.mtx);
634 if (uvm_km_pages.free < uvm_km_pages.hiwat) {
635 uvm_km_pages.page[uvm_km_pages.free++] = va;
636 freeva = 0;
637 }
638 mtx_leave(&uvm_km_pages.mtx);
639
640 if (freeva)
641 uvm_unmap(kernel_map, va, va + PAGE_SIZE);
642
643 uvm_pagefree(pg);
644 return (nextfp);
645 }
646 #endif /* !__HAVE_PMAP_DIRECT */
647
648 void *
km_alloc(size_t sz,const struct kmem_va_mode * kv,const struct kmem_pa_mode * kp,const struct kmem_dyn_mode * kd)649 km_alloc(size_t sz, const struct kmem_va_mode *kv,
650 const struct kmem_pa_mode *kp, const struct kmem_dyn_mode *kd)
651 {
652 struct vm_map *map;
653 struct vm_page *pg;
654 struct pglist pgl;
655 int mapflags = 0;
656 vm_prot_t prot;
657 paddr_t pla_align;
658 int pla_flags;
659 int pla_maxseg;
660 vaddr_t va, sva = 0;
661
662 KASSERT(sz == round_page(sz));
663
664 TAILQ_INIT(&pgl);
665
666 if (kp->kp_nomem || kp->kp_pageable)
667 goto alloc_va;
668
669 pla_flags = kd->kd_waitok ? UVM_PLA_WAITOK : UVM_PLA_NOWAIT;
670 pla_flags |= UVM_PLA_TRYCONTIG;
671 if (kp->kp_zero)
672 pla_flags |= UVM_PLA_ZERO;
673
674 pla_align = kp->kp_align;
675 #ifdef __HAVE_PMAP_DIRECT
676 if (pla_align < kv->kv_align)
677 pla_align = kv->kv_align;
678 #endif
679 pla_maxseg = kp->kp_maxseg;
680 if (pla_maxseg == 0)
681 pla_maxseg = sz / PAGE_SIZE;
682
683 if (uvm_pglistalloc(sz, kp->kp_constraint->ucr_low,
684 kp->kp_constraint->ucr_high, pla_align, kp->kp_boundary,
685 &pgl, pla_maxseg, pla_flags)) {
686 return (NULL);
687 }
688
689 #ifdef __HAVE_PMAP_DIRECT
690 /*
691 * Only use direct mappings for single page or single segment
692 * allocations.
693 */
694 if (kv->kv_singlepage || kp->kp_maxseg == 1) {
695 TAILQ_FOREACH(pg, &pgl, pageq) {
696 va = pmap_map_direct(pg);
697 if (pg == TAILQ_FIRST(&pgl))
698 sva = va;
699 }
700 return ((void *)sva);
701 }
702 #endif
703 alloc_va:
704 prot = PROT_READ | PROT_WRITE;
705
706 if (kp->kp_pageable) {
707 KASSERT(kp->kp_object);
708 KASSERT(!kv->kv_singlepage);
709 } else {
710 KASSERT(kp->kp_object == NULL);
711 }
712
713 if (kv->kv_singlepage) {
714 KASSERT(sz == PAGE_SIZE);
715 #ifdef __HAVE_PMAP_DIRECT
716 panic("km_alloc: DIRECT single page");
717 #else
718 mtx_enter(&uvm_km_pages.mtx);
719 while (uvm_km_pages.free == 0) {
720 if (kd->kd_waitok == 0) {
721 mtx_leave(&uvm_km_pages.mtx);
722 uvm_pglistfree(&pgl);
723 return NULL;
724 }
725 msleep_nsec(&uvm_km_pages.free, &uvm_km_pages.mtx,
726 PVM, "getpage", INFSLP);
727 }
728 va = uvm_km_pages.page[--uvm_km_pages.free];
729 if (uvm_km_pages.free < uvm_km_pages.lowat &&
730 curproc != uvm_km_pages.km_proc) {
731 if (kd->kd_slowdown)
732 *kd->kd_slowdown = 1;
733 wakeup(&uvm_km_pages.km_proc);
734 }
735 mtx_leave(&uvm_km_pages.mtx);
736 #endif
737 } else {
738 struct uvm_object *uobj = NULL;
739
740 if (kd->kd_trylock)
741 mapflags |= UVM_KMF_TRYLOCK;
742
743 if (kp->kp_object)
744 uobj = *kp->kp_object;
745 try_map:
746 map = *kv->kv_map;
747 va = vm_map_min(map);
748 if (uvm_map(map, &va, sz, uobj, kd->kd_prefer,
749 kv->kv_align, UVM_MAPFLAG(prot, prot, MAP_INHERIT_NONE,
750 MADV_RANDOM, mapflags))) {
751 if (kv->kv_wait && kd->kd_waitok) {
752 tsleep_nsec(map, PVM, "km_allocva", INFSLP);
753 goto try_map;
754 }
755 uvm_pglistfree(&pgl);
756 return (NULL);
757 }
758 }
759 sva = va;
760 TAILQ_FOREACH(pg, &pgl, pageq) {
761 if (kp->kp_pageable)
762 pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pg),
763 prot, prot | PMAP_WIRED);
764 else
765 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), prot);
766 va += PAGE_SIZE;
767 }
768 pmap_update(pmap_kernel());
769 return ((void *)sva);
770 }
771
772 void
km_free(void * v,size_t sz,const struct kmem_va_mode * kv,const struct kmem_pa_mode * kp)773 km_free(void *v, size_t sz, const struct kmem_va_mode *kv,
774 const struct kmem_pa_mode *kp)
775 {
776 vaddr_t sva, eva, va;
777 struct vm_page *pg;
778 struct pglist pgl;
779
780 sva = (vaddr_t)v;
781 eva = sva + sz;
782
783 if (kp->kp_nomem)
784 goto free_va;
785
786 #ifdef __HAVE_PMAP_DIRECT
787 if (kv->kv_singlepage || kp->kp_maxseg == 1) {
788 TAILQ_INIT(&pgl);
789 for (va = sva; va < eva; va += PAGE_SIZE) {
790 pg = pmap_unmap_direct(va);
791 TAILQ_INSERT_TAIL(&pgl, pg, pageq);
792 }
793 uvm_pglistfree(&pgl);
794 return;
795 }
796 #else
797 if (kv->kv_singlepage) {
798 struct uvm_km_free_page *fp = v;
799
800 mtx_enter(&uvm_km_pages.mtx);
801 fp->next = uvm_km_pages.freelist;
802 uvm_km_pages.freelist = fp;
803 if (uvm_km_pages.freelistlen++ > 16)
804 wakeup(&uvm_km_pages.km_proc);
805 mtx_leave(&uvm_km_pages.mtx);
806 return;
807 }
808 #endif
809
810 if (kp->kp_pageable) {
811 pmap_remove(pmap_kernel(), sva, eva);
812 pmap_update(pmap_kernel());
813 } else {
814 TAILQ_INIT(&pgl);
815 for (va = sva; va < eva; va += PAGE_SIZE) {
816 paddr_t pa;
817
818 if (!pmap_extract(pmap_kernel(), va, &pa))
819 continue;
820
821 pg = PHYS_TO_VM_PAGE(pa);
822 if (pg == NULL) {
823 panic("km_free: unmanaged page 0x%lx", pa);
824 }
825 TAILQ_INSERT_TAIL(&pgl, pg, pageq);
826 }
827 pmap_kremove(sva, sz);
828 pmap_update(pmap_kernel());
829 uvm_pglistfree(&pgl);
830 }
831 free_va:
832 uvm_unmap(*kv->kv_map, sva, eva);
833 if (kv->kv_wait)
834 wakeup(*kv->kv_map);
835 }
836
837 const struct kmem_va_mode kv_any = {
838 .kv_map = &kernel_map,
839 };
840
841 const struct kmem_va_mode kv_intrsafe = {
842 .kv_map = &kmem_map,
843 };
844
845 const struct kmem_va_mode kv_page = {
846 .kv_singlepage = 1
847 };
848
849 const struct kmem_pa_mode kp_dirty = {
850 .kp_constraint = &no_constraint
851 };
852
853 const struct kmem_pa_mode kp_dma = {
854 .kp_constraint = &dma_constraint
855 };
856
857 const struct kmem_pa_mode kp_dma_contig = {
858 .kp_constraint = &dma_constraint,
859 .kp_maxseg = 1
860 };
861
862 const struct kmem_pa_mode kp_dma_zero = {
863 .kp_constraint = &dma_constraint,
864 .kp_zero = 1
865 };
866
867 const struct kmem_pa_mode kp_zero = {
868 .kp_constraint = &no_constraint,
869 .kp_zero = 1
870 };
871
872 const struct kmem_pa_mode kp_pageable = {
873 .kp_object = &uvm.kernel_object,
874 .kp_pageable = 1
875 /* XXX - kp_nomem, maybe, but we'll need to fix km_free. */
876 };
877
878 const struct kmem_pa_mode kp_none = {
879 .kp_nomem = 1
880 };
881
882 const struct kmem_dyn_mode kd_waitok = {
883 .kd_waitok = 1,
884 .kd_prefer = UVM_UNKNOWN_OFFSET
885 };
886
887 const struct kmem_dyn_mode kd_nowait = {
888 .kd_prefer = UVM_UNKNOWN_OFFSET
889 };
890
891 const struct kmem_dyn_mode kd_trylock = {
892 .kd_trylock = 1,
893 .kd_prefer = UVM_UNKNOWN_OFFSET
894 };
895