1*6b672424Schs /* $NetBSD: uvm_pgflcache.c,v 1.6 2020/10/18 18:31:31 chs Exp $ */
276a3b334Sad
376a3b334Sad /*-
476a3b334Sad * Copyright (c) 2019 The NetBSD Foundation, Inc.
576a3b334Sad * All rights reserved.
676a3b334Sad *
776a3b334Sad * This code is derived from software contributed to The NetBSD Foundation
876a3b334Sad * by Andrew Doran.
976a3b334Sad *
1076a3b334Sad * Redistribution and use in source and binary forms, with or without
1176a3b334Sad * modification, are permitted provided that the following conditions
1276a3b334Sad * are met:
1376a3b334Sad * 1. Redistributions of source code must retain the above copyright
1476a3b334Sad * notice, this list of conditions and the following disclaimer.
1576a3b334Sad * 2. Redistributions in binary form must reproduce the above copyright
1676a3b334Sad * notice, this list of conditions and the following disclaimer in the
1776a3b334Sad * documentation and/or other materials provided with the distribution.
1876a3b334Sad *
1976a3b334Sad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2076a3b334Sad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2176a3b334Sad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2276a3b334Sad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2376a3b334Sad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2476a3b334Sad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2576a3b334Sad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2676a3b334Sad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2776a3b334Sad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2876a3b334Sad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2976a3b334Sad * POSSIBILITY OF SUCH DAMAGE.
3076a3b334Sad */
3176a3b334Sad
3276a3b334Sad /*
3376a3b334Sad * uvm_pgflcache.c: page freelist cache.
3476a3b334Sad *
3576a3b334Sad * This implements a tiny per-CPU cache of pages that sits between the main
3676a3b334Sad * page allocator and the freelists. By allocating and freeing pages in
3776a3b334Sad * batch, it reduces freelist contention by an order of magnitude.
3876a3b334Sad *
3976a3b334Sad * The cache can be paused & resumed at runtime so that UVM_HOTPLUG,
4076a3b334Sad * uvm_pglistalloc() and uvm_page_redim() can have a consistent view of the
4176a3b334Sad * world. On system with one CPU per physical package (e.g. a uniprocessor)
4276a3b334Sad * the cache is not enabled.
4376a3b334Sad */
4476a3b334Sad
4576a3b334Sad #include <sys/cdefs.h>
46*6b672424Schs __KERNEL_RCSID(0, "$NetBSD: uvm_pgflcache.c,v 1.6 2020/10/18 18:31:31 chs Exp $");
4776a3b334Sad
4876a3b334Sad #include "opt_uvm.h"
4976a3b334Sad #include "opt_multiprocessor.h"
5076a3b334Sad
5176a3b334Sad #include <sys/param.h>
5276a3b334Sad #include <sys/systm.h>
5376a3b334Sad #include <sys/sched.h>
5476a3b334Sad #include <sys/kernel.h>
5576a3b334Sad #include <sys/vnode.h>
5676a3b334Sad #include <sys/proc.h>
5776a3b334Sad #include <sys/atomic.h>
5876a3b334Sad #include <sys/cpu.h>
5976a3b334Sad #include <sys/xcall.h>
6076a3b334Sad
6176a3b334Sad #include <uvm/uvm.h>
6276a3b334Sad #include <uvm/uvm_pglist.h>
6376a3b334Sad #include <uvm/uvm_pgflcache.h>
6476a3b334Sad
6576a3b334Sad /* There is no point doing any of this on a uniprocessor. */
6676a3b334Sad #ifdef MULTIPROCESSOR
6776a3b334Sad
6876a3b334Sad /*
6976a3b334Sad * MAXPGS - maximum pages per color, per bucket.
7076a3b334Sad * FILLPGS - number of pages to allocate at once, per color, per bucket.
7176a3b334Sad *
7276a3b334Sad * Why the chosen values:
7376a3b334Sad *
7476a3b334Sad * (1) In 2019, an average Intel system has 4kB pages and 8x L2 cache
7576a3b334Sad * colors. We make the assumption that most of the time allocation activity
7676a3b334Sad * will be centered around one UVM freelist, so most of the time there will
7776a3b334Sad * be no more than 224kB worth of cached pages per-CPU. That's tiny, but
7876a3b334Sad * enough to hugely reduce contention on the freelist locks, and give us a
7976a3b334Sad * small pool of pages which if we're very lucky may have some L1/L2 cache
8076a3b334Sad * locality, and do so without subtracting too much from the L2/L3 cache
8176a3b334Sad * benefits of having per-package free lists in the page allocator.
8276a3b334Sad *
8376a3b334Sad * (2) With the chosen values on _LP64, the data structure for each color
8476a3b334Sad * takes up a single cache line (64 bytes) giving this very low overhead
8576a3b334Sad * even in the "miss" case.
8676a3b334Sad *
8776a3b334Sad * (3) We don't want to cause too much pressure by hiding away memory that
8876a3b334Sad * could otherwise be put to good use.
8976a3b334Sad */
9076a3b334Sad #define MAXPGS 7
9176a3b334Sad #define FILLPGS 6
9276a3b334Sad
9376a3b334Sad /* Variable size, according to # colors. */
9476a3b334Sad struct pgflcache {
9576a3b334Sad struct pccolor {
9676a3b334Sad intptr_t count;
9776a3b334Sad struct vm_page *pages[MAXPGS];
9876a3b334Sad } color[1];
9976a3b334Sad };
10076a3b334Sad
10176a3b334Sad static kmutex_t uvm_pgflcache_lock;
10276a3b334Sad static int uvm_pgflcache_sem;
10376a3b334Sad
10476a3b334Sad /*
10576a3b334Sad * uvm_pgflcache_fill: fill specified freelist/color from global list
10676a3b334Sad *
10776a3b334Sad * => must be called at IPL_VM
10876a3b334Sad * => must be called with given bucket lock held
10976a3b334Sad * => must only fill from the correct bucket for this CPU
11076a3b334Sad */
11176a3b334Sad
11276a3b334Sad void
uvm_pgflcache_fill(struct uvm_cpu * ucpu,int fl,int b,int c)11376a3b334Sad uvm_pgflcache_fill(struct uvm_cpu *ucpu, int fl, int b, int c)
11476a3b334Sad {
11576a3b334Sad struct pgflbucket *pgb;
11676a3b334Sad struct pgflcache *pc;
11776a3b334Sad struct pccolor *pcc;
11876a3b334Sad struct pgflist *head;
11976a3b334Sad struct vm_page *pg;
12076a3b334Sad int count;
12176a3b334Sad
12276a3b334Sad KASSERT(mutex_owned(&uvm_freelist_locks[b].lock));
12376a3b334Sad KASSERT(ucpu->pgflbucket == b);
12476a3b334Sad
12576a3b334Sad /* If caching is off, then bail out. */
12676a3b334Sad if (__predict_false((pc = ucpu->pgflcache[fl]) == NULL)) {
12776a3b334Sad return;
12876a3b334Sad }
12976a3b334Sad
13076a3b334Sad /* Fill only to the limit. */
13176a3b334Sad pcc = &pc->color[c];
13276a3b334Sad pgb = uvm.page_free[fl].pgfl_buckets[b];
13376a3b334Sad head = &pgb->pgb_colors[c];
13476a3b334Sad if (pcc->count >= FILLPGS) {
13576a3b334Sad return;
13676a3b334Sad }
13776a3b334Sad
13876a3b334Sad /* Pull pages from the bucket until it's empty, or we are full. */
13976a3b334Sad count = pcc->count;
14076a3b334Sad pg = LIST_FIRST(head);
14176a3b334Sad while (__predict_true(pg != NULL && count < FILLPGS)) {
14276a3b334Sad KASSERT(pg->flags & PG_FREE);
14376a3b334Sad KASSERT(uvm_page_get_bucket(pg) == b);
14476a3b334Sad pcc->pages[count++] = pg;
14576a3b334Sad pg = LIST_NEXT(pg, pageq.list);
14676a3b334Sad }
14776a3b334Sad
14876a3b334Sad /* Violate LIST abstraction to remove all pages at once. */
14976a3b334Sad head->lh_first = pg;
15076a3b334Sad if (__predict_true(pg != NULL)) {
15176a3b334Sad pg->pageq.list.le_prev = &head->lh_first;
15276a3b334Sad }
15376a3b334Sad pgb->pgb_nfree -= (count - pcc->count);
154*6b672424Schs CPU_COUNT(CPU_COUNT_FREEPAGES, -(count - pcc->count));
15576a3b334Sad pcc->count = count;
15676a3b334Sad }
15776a3b334Sad
15876a3b334Sad /*
15976a3b334Sad * uvm_pgflcache_spill: spill specified freelist/color to global list
16076a3b334Sad *
16176a3b334Sad * => must be called at IPL_VM
16276a3b334Sad * => mark __noinline so we don't pull it into uvm_pgflcache_free()
16376a3b334Sad */
16476a3b334Sad
16576a3b334Sad static void __noinline
uvm_pgflcache_spill(struct uvm_cpu * ucpu,int fl,int c)16676a3b334Sad uvm_pgflcache_spill(struct uvm_cpu *ucpu, int fl, int c)
16776a3b334Sad {
16876a3b334Sad struct pgflbucket *pgb;
16976a3b334Sad struct pgfreelist *pgfl;
17076a3b334Sad struct pgflcache *pc;
17176a3b334Sad struct pccolor *pcc;
17276a3b334Sad struct pgflist *head;
17376a3b334Sad kmutex_t *lock;
17476a3b334Sad int b, adj;
17576a3b334Sad
17676a3b334Sad pc = ucpu->pgflcache[fl];
17776a3b334Sad pcc = &pc->color[c];
17876a3b334Sad pgfl = &uvm.page_free[fl];
17976a3b334Sad b = ucpu->pgflbucket;
18076a3b334Sad pgb = pgfl->pgfl_buckets[b];
18176a3b334Sad head = &pgb->pgb_colors[c];
18276a3b334Sad lock = &uvm_freelist_locks[b].lock;
18376a3b334Sad
18476a3b334Sad mutex_spin_enter(lock);
18576a3b334Sad for (adj = pcc->count; pcc->count != 0;) {
18676a3b334Sad pcc->count--;
18776a3b334Sad KASSERT(pcc->pages[pcc->count] != NULL);
18876a3b334Sad KASSERT(pcc->pages[pcc->count]->flags & PG_FREE);
18976a3b334Sad LIST_INSERT_HEAD(head, pcc->pages[pcc->count], pageq.list);
19076a3b334Sad }
19176a3b334Sad pgb->pgb_nfree += adj;
192*6b672424Schs CPU_COUNT(CPU_COUNT_FREEPAGES, adj);
19376a3b334Sad mutex_spin_exit(lock);
19476a3b334Sad }
19576a3b334Sad
19676a3b334Sad /*
19776a3b334Sad * uvm_pgflcache_alloc: try to allocate a cached page.
19876a3b334Sad *
19976a3b334Sad * => must be called at IPL_VM
20076a3b334Sad * => allocate only from the given freelist and given page color
20176a3b334Sad */
20276a3b334Sad
20376a3b334Sad struct vm_page *
uvm_pgflcache_alloc(struct uvm_cpu * ucpu,int fl,int c)20476a3b334Sad uvm_pgflcache_alloc(struct uvm_cpu *ucpu, int fl, int c)
20576a3b334Sad {
20676a3b334Sad struct pgflcache *pc;
20776a3b334Sad struct pccolor *pcc;
20876a3b334Sad struct vm_page *pg;
20976a3b334Sad
21076a3b334Sad /* If caching is off, then bail out. */
21176a3b334Sad if (__predict_false((pc = ucpu->pgflcache[fl]) == NULL)) {
21276a3b334Sad return NULL;
21376a3b334Sad }
21476a3b334Sad
21576a3b334Sad /* Very simple: if we have a page then return it. */
21676a3b334Sad pcc = &pc->color[c];
21776a3b334Sad if (__predict_false(pcc->count == 0)) {
21876a3b334Sad return NULL;
21976a3b334Sad }
22076a3b334Sad pg = pcc->pages[--(pcc->count)];
22176a3b334Sad KASSERT(pg != NULL);
222255eb7afSad KASSERT(pg->flags == PG_FREE);
22376a3b334Sad KASSERT(uvm_page_get_freelist(pg) == fl);
22476a3b334Sad KASSERT(uvm_page_get_bucket(pg) == ucpu->pgflbucket);
225255eb7afSad pg->flags = PG_BUSY | PG_CLEAN | PG_FAKE;
22676a3b334Sad return pg;
22776a3b334Sad }
22876a3b334Sad
22976a3b334Sad /*
23076a3b334Sad * uvm_pgflcache_free: cache a page, if possible.
23176a3b334Sad *
23276a3b334Sad * => must be called at IPL_VM
23376a3b334Sad * => must only send pages for the correct bucket for this CPU
23476a3b334Sad */
23576a3b334Sad
23676a3b334Sad bool
uvm_pgflcache_free(struct uvm_cpu * ucpu,struct vm_page * pg)23776a3b334Sad uvm_pgflcache_free(struct uvm_cpu *ucpu, struct vm_page *pg)
23876a3b334Sad {
23976a3b334Sad struct pgflcache *pc;
24076a3b334Sad struct pccolor *pcc;
24176a3b334Sad int fl, c;
24276a3b334Sad
24376a3b334Sad KASSERT(uvm_page_get_bucket(pg) == ucpu->pgflbucket);
24476a3b334Sad
24576a3b334Sad /* If caching is off, then bail out. */
24676a3b334Sad fl = uvm_page_get_freelist(pg);
24776a3b334Sad if (__predict_false((pc = ucpu->pgflcache[fl]) == NULL)) {
24876a3b334Sad return false;
24976a3b334Sad }
25076a3b334Sad
25176a3b334Sad /* If the array is full spill it first, then add page to array. */
25276a3b334Sad c = VM_PGCOLOR(pg);
25376a3b334Sad pcc = &pc->color[c];
25476a3b334Sad KASSERT((pg->flags & PG_FREE) == 0);
25576a3b334Sad if (__predict_false(pcc->count == MAXPGS)) {
25676a3b334Sad uvm_pgflcache_spill(ucpu, fl, c);
25776a3b334Sad }
258255eb7afSad pg->flags = PG_FREE;
25976a3b334Sad pcc->pages[pcc->count] = pg;
26076a3b334Sad pcc->count++;
26176a3b334Sad return true;
26276a3b334Sad }
26376a3b334Sad
26476a3b334Sad /*
26576a3b334Sad * uvm_pgflcache_init: allocate and initialize per-CPU data structures for
26676a3b334Sad * the free page cache. Don't set anything in motion - that's taken care
26776a3b334Sad * of by uvm_pgflcache_resume().
26876a3b334Sad */
26976a3b334Sad
27076a3b334Sad static void
uvm_pgflcache_init_cpu(struct cpu_info * ci)27176a3b334Sad uvm_pgflcache_init_cpu(struct cpu_info *ci)
27276a3b334Sad {
27376a3b334Sad struct uvm_cpu *ucpu;
27476a3b334Sad size_t sz;
27576a3b334Sad
27676a3b334Sad ucpu = ci->ci_data.cpu_uvm;
27776a3b334Sad KASSERT(ucpu->pgflcachemem == NULL);
27876a3b334Sad KASSERT(ucpu->pgflcache[0] == NULL);
27976a3b334Sad
28076a3b334Sad sz = offsetof(struct pgflcache, color[uvmexp.ncolors]);
28176a3b334Sad ucpu->pgflcachememsz =
28276a3b334Sad (roundup2(sz * VM_NFREELIST, coherency_unit) + coherency_unit - 1);
28376a3b334Sad ucpu->pgflcachemem = kmem_zalloc(ucpu->pgflcachememsz, KM_SLEEP);
28476a3b334Sad }
28576a3b334Sad
28676a3b334Sad /*
28776a3b334Sad * uvm_pgflcache_fini_cpu: dump all cached pages back to global free list
28876a3b334Sad * and shut down caching on the CPU. Called on each CPU in the system via
28976a3b334Sad * xcall.
29076a3b334Sad */
29176a3b334Sad
29276a3b334Sad static void
uvm_pgflcache_fini_cpu(void * arg1 __unused,void * arg2 __unused)29376a3b334Sad uvm_pgflcache_fini_cpu(void *arg1 __unused, void *arg2 __unused)
29476a3b334Sad {
29576a3b334Sad struct uvm_cpu *ucpu;
29676a3b334Sad int fl, color, s;
29776a3b334Sad
29876a3b334Sad ucpu = curcpu()->ci_data.cpu_uvm;
29976a3b334Sad for (fl = 0; fl < VM_NFREELIST; fl++) {
30076a3b334Sad s = splvm();
30176a3b334Sad for (color = 0; color < uvmexp.ncolors; color++) {
30276a3b334Sad uvm_pgflcache_spill(ucpu, fl, color);
30376a3b334Sad }
30476a3b334Sad ucpu->pgflcache[fl] = NULL;
30576a3b334Sad splx(s);
30676a3b334Sad }
30776a3b334Sad }
30876a3b334Sad
30976a3b334Sad /*
31076a3b334Sad * uvm_pgflcache_pause: pause operation of the caches
31176a3b334Sad */
31276a3b334Sad
31376a3b334Sad void
uvm_pgflcache_pause(void)31476a3b334Sad uvm_pgflcache_pause(void)
31576a3b334Sad {
31676a3b334Sad uint64_t where;
31776a3b334Sad
31876a3b334Sad /* First one in starts draining. Everyone else waits. */
31976a3b334Sad mutex_enter(&uvm_pgflcache_lock);
32076a3b334Sad if (uvm_pgflcache_sem++ == 0) {
321166758fdSad where = xc_broadcast(XC_HIGHPRI, uvm_pgflcache_fini_cpu,
322166758fdSad (void *)1, NULL);
32376a3b334Sad xc_wait(where);
32476a3b334Sad }
32576a3b334Sad mutex_exit(&uvm_pgflcache_lock);
32676a3b334Sad }
32776a3b334Sad
32876a3b334Sad /*
32976a3b334Sad * uvm_pgflcache_resume: resume operation of the caches
33076a3b334Sad */
33176a3b334Sad
33276a3b334Sad void
uvm_pgflcache_resume(void)33376a3b334Sad uvm_pgflcache_resume(void)
33476a3b334Sad {
33576a3b334Sad CPU_INFO_ITERATOR cii;
33676a3b334Sad struct cpu_info *ci;
33776a3b334Sad struct uvm_cpu *ucpu;
33876a3b334Sad uintptr_t addr;
33976a3b334Sad size_t sz;
34076a3b334Sad int fl;
34176a3b334Sad
34276a3b334Sad /* Last guy out takes care of business. */
34376a3b334Sad mutex_enter(&uvm_pgflcache_lock);
34476a3b334Sad KASSERT(uvm_pgflcache_sem > 0);
34576a3b334Sad if (uvm_pgflcache_sem-- > 1) {
34676a3b334Sad mutex_exit(&uvm_pgflcache_lock);
34776a3b334Sad return;
34876a3b334Sad }
34976a3b334Sad
35076a3b334Sad /*
35176a3b334Sad * Make sure dependant data structure updates are remotely visible.
35276a3b334Sad * Essentially this functions as a global memory barrier.
35376a3b334Sad */
35476a3b334Sad xc_barrier(XC_HIGHPRI);
35576a3b334Sad
35676a3b334Sad /*
35776a3b334Sad * Then set all of the pointers in place on each CPU. As soon as
35876a3b334Sad * each pointer is set, caching is operational in that dimension.
35976a3b334Sad */
36076a3b334Sad sz = offsetof(struct pgflcache, color[uvmexp.ncolors]);
36176a3b334Sad for (CPU_INFO_FOREACH(cii, ci)) {
36276a3b334Sad ucpu = ci->ci_data.cpu_uvm;
36376a3b334Sad addr = roundup2((uintptr_t)ucpu->pgflcachemem, coherency_unit);
36476a3b334Sad for (fl = 0; fl < VM_NFREELIST; fl++) {
36576a3b334Sad ucpu->pgflcache[fl] = (struct pgflcache *)addr;
36676a3b334Sad addr += sz;
36776a3b334Sad }
36876a3b334Sad }
36976a3b334Sad mutex_exit(&uvm_pgflcache_lock);
37076a3b334Sad }
37176a3b334Sad
37276a3b334Sad /*
37376a3b334Sad * uvm_pgflcache_start: start operation of the cache.
37476a3b334Sad *
37576a3b334Sad * => called once only, when init(8) is about to be started
37676a3b334Sad */
37776a3b334Sad
37876a3b334Sad void
uvm_pgflcache_start(void)37976a3b334Sad uvm_pgflcache_start(void)
38076a3b334Sad {
38176a3b334Sad CPU_INFO_ITERATOR cii;
38276a3b334Sad struct cpu_info *ci;
38376a3b334Sad
38476a3b334Sad KASSERT(uvm_pgflcache_sem > 0);
38576a3b334Sad
38676a3b334Sad /*
38776a3b334Sad * There's not much point doing this if every CPU has its own
38876a3b334Sad * bucket (and that includes the uniprocessor case).
38976a3b334Sad */
39076a3b334Sad if (ncpu == uvm.bucketcount) {
39176a3b334Sad return;
39276a3b334Sad }
39376a3b334Sad
394ed821cb0Sad /* Create data structures for each CPU. */
39576a3b334Sad for (CPU_INFO_FOREACH(cii, ci)) {
39676a3b334Sad uvm_pgflcache_init_cpu(ci);
39776a3b334Sad }
39876a3b334Sad
39976a3b334Sad /* Kick it into action. */
400166758fdSad uvm_pgflcache_resume();
40176a3b334Sad }
40276a3b334Sad
40376a3b334Sad /*
40476a3b334Sad * uvm_pgflcache_init: set up data structures for the free page cache.
40576a3b334Sad */
40676a3b334Sad
40776a3b334Sad void
uvm_pgflcache_init(void)40876a3b334Sad uvm_pgflcache_init(void)
40976a3b334Sad {
41076a3b334Sad
41176a3b334Sad uvm_pgflcache_sem = 1;
41276a3b334Sad mutex_init(&uvm_pgflcache_lock, MUTEX_DEFAULT, IPL_NONE);
41376a3b334Sad }
41476a3b334Sad
41576a3b334Sad #else /* MULTIPROCESSOR */
41676a3b334Sad
41776a3b334Sad struct vm_page *
uvm_pgflcache_alloc(struct uvm_cpu * ucpu,int fl,int c)41876a3b334Sad uvm_pgflcache_alloc(struct uvm_cpu *ucpu, int fl, int c)
41976a3b334Sad {
42076a3b334Sad
42176a3b334Sad return NULL;
42276a3b334Sad }
42376a3b334Sad
42476a3b334Sad bool
uvm_pgflcache_free(struct uvm_cpu * ucpu,struct vm_page * pg)42576a3b334Sad uvm_pgflcache_free(struct uvm_cpu *ucpu, struct vm_page *pg)
42676a3b334Sad {
42776a3b334Sad
42876a3b334Sad return false;
42976a3b334Sad }
43076a3b334Sad
43176a3b334Sad void
uvm_pgflcache_fill(struct uvm_cpu * ucpu,int fl,int b,int c)43276a3b334Sad uvm_pgflcache_fill(struct uvm_cpu *ucpu, int fl, int b, int c)
43376a3b334Sad {
43476a3b334Sad
43576a3b334Sad }
43676a3b334Sad
43776a3b334Sad void
uvm_pgflcache_pause(void)43876a3b334Sad uvm_pgflcache_pause(void)
43976a3b334Sad {
44076a3b334Sad
44176a3b334Sad }
44276a3b334Sad
44376a3b334Sad void
uvm_pgflcache_resume(void)44476a3b334Sad uvm_pgflcache_resume(void)
44576a3b334Sad {
44676a3b334Sad
44776a3b334Sad }
44876a3b334Sad
44976a3b334Sad void
uvm_pgflcache_start(void)45076a3b334Sad uvm_pgflcache_start(void)
45176a3b334Sad {
45276a3b334Sad
45376a3b334Sad }
45476a3b334Sad
45576a3b334Sad void
uvm_pgflcache_init(void)45676a3b334Sad uvm_pgflcache_init(void)
45776a3b334Sad {
45876a3b334Sad
45976a3b334Sad }
46076a3b334Sad
46176a3b334Sad #endif /* MULTIPROCESSOR */
462