1 /*
2  * Copyright (c) 2003,2004,2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * pmap invalidation support code.  Certain hardware requirements must
37  * be dealt with when manipulating page table entries and page directory
38  * entries within a pmap.  In particular, we cannot safely manipulate
39  * page tables which are in active use by another cpu (even if it is
40  * running in userland) for two reasons: First, TLB writebacks will
41  * race against our own modifications and tests.  Second, even if we
42  * were to use bus-locked instruction we can still screw up the
43  * target cpu's instruction pipeline due to Intel cpu errata.
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/vmmeter.h>
51 #include <sys/thread2.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_object.h>
56 
57 #include <machine/cputypes.h>
58 #include <machine/md_var.h>
59 #include <machine/specialreg.h>
60 #include <machine/smp.h>
61 #include <machine/globaldata.h>
62 #include <machine/pmap.h>
63 #include <machine/pmap_inval.h>
64 
65 #ifdef SMP
66 
67 static void
68 _cpu_invltlb(void *dummy)
69 {
70     cpu_invltlb();
71 }
72 
73 static void
74 _cpu_invl1pg(void *data)
75 {
76     cpu_invlpg(data);
77 }
78 
79 #endif
80 
81 /*
82  * Initialize for add or flush
83  */
84 void
85 pmap_inval_init(pmap_inval_info_t info)
86 {
87     info->pir_flags = 0;
88     crit_enter_id("inval");
89 }
90 
91 /*
92  * Add a (pmap, va) pair to the invalidation list and protect access
93  * as appropriate.
94  *
95  * CPUMASK_LOCK is used to interlock thread switchins
96  */
97 void
98 pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
99 {
100 #ifdef SMP
101     cpumask_t oactive;
102     cpumask_t nactive;
103 
104     for (;;) {
105 	oactive = pmap->pm_active & ~CPUMASK_LOCK;
106 	nactive = oactive | CPUMASK_LOCK;
107 	if (atomic_cmpset_int(&pmap->pm_active, oactive, nactive))
108 		break;
109 	crit_enter();
110 	lwkt_process_ipiq();
111 	crit_exit();
112     }
113 
114     if ((info->pir_flags & PIRF_CPUSYNC) == 0) {
115 	info->pir_flags |= PIRF_CPUSYNC;
116 	info->pir_cpusync.cs_run_func = NULL;
117 	info->pir_cpusync.cs_fin1_func = NULL;
118 	info->pir_cpusync.cs_fin2_func = NULL;
119 	lwkt_cpusync_start(oactive, &info->pir_cpusync);
120     } else if (pmap->pm_active & ~info->pir_cpusync.cs_mask) {
121 	lwkt_cpusync_add(oactive, &info->pir_cpusync);
122     }
123 #else
124     if (pmap->pm_active == 0)
125 	return;
126 #endif
127     if ((info->pir_flags & (PIRF_INVLTLB|PIRF_INVL1PG)) == 0) {
128 	if (va == (vm_offset_t)-1) {
129 	    info->pir_flags |= PIRF_INVLTLB;
130 #ifdef SMP
131 	    info->pir_cpusync.cs_fin2_func = _cpu_invltlb;
132 #endif
133 	} else {
134 	    info->pir_flags |= PIRF_INVL1PG;
135 	    info->pir_cpusync.cs_data = (void *)va;
136 #ifdef SMP
137 	    info->pir_cpusync.cs_fin2_func = _cpu_invl1pg;
138 #endif
139 	}
140     } else {
141 	info->pir_flags |= PIRF_INVLTLB;
142 #ifdef SMP
143 	info->pir_cpusync.cs_fin2_func = _cpu_invltlb;
144 #endif
145     }
146 }
147 
148 void
149 pmap_inval_deinterlock(pmap_inval_info_t info, pmap_t pmap)
150 {
151 #ifdef SMP
152 	atomic_clear_int(&pmap->pm_active, CPUMASK_LOCK);
153 #endif
154 }
155 
156 /*
157  * Synchronize changes with target cpus.
158  */
159 void
160 pmap_inval_flush(pmap_inval_info_t info)
161 {
162 #ifdef SMP
163     if (info->pir_flags & PIRF_CPUSYNC)
164 	lwkt_cpusync_finish(&info->pir_cpusync);
165 #else
166     if (info->pir_flags & PIRF_INVLTLB)
167 	cpu_invltlb();
168     else if (info->pir_flags & PIRF_INVL1PG)
169 	cpu_invlpg(info->pir_cpusync.cs_data);
170 #endif
171     info->pir_flags = 0;
172 }
173 
174 void
175 pmap_inval_done(pmap_inval_info_t info)
176 {
177     pmap_inval_flush(info);
178     crit_exit_id("inval");
179 }
180 
181