1 /*-
2  * Copyright (c) 2011 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas of 3am Software Foundry.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 
32 __KERNEL_RCSID(0, "$NetBSD: pmap_synci.c,v 1.3 2016/07/11 16:06:09 matt Exp $");
33 
34 #define __PMAP_PRIVATE
35 
36 #include "opt_multiprocessor.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mutex.h>
41 #include <sys/atomic.h>
42 #include <sys/cpu.h>
43 
44 #include <uvm/uvm.h>
45 
46 #if defined(MULTIPROCESSOR)
47 u_int	pmap_tlb_synci_page_mask;
48 u_int	pmap_tlb_synci_map_mask;
49 
50 void
pmap_tlb_syncicache_ast(struct cpu_info * ci)51 pmap_tlb_syncicache_ast(struct cpu_info *ci)
52 {
53 	struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
54 
55 	KASSERT(kpreempt_disabled());
56 
57 	uint32_t page_bitmap = atomic_swap_32(&ti->ti_synci_page_bitmap, 0);
58 #if 0
59 	printf("%s: need to sync %#x\n", __func__, page_bitmap);
60 #endif
61 	ti->ti_evcnt_synci_asts.ev_count++;
62 	/*
63 	 * If every bit is set in the bitmap, sync the entire icache.
64 	 */
65 	if (page_bitmap == pmap_tlb_synci_map_mask) {
66 		pmap_md_icache_sync_all();
67 		ti->ti_evcnt_synci_all.ev_count++;
68 		ti->ti_evcnt_synci_pages.ev_count += pmap_tlb_synci_page_mask+1;
69 		return;
70 	}
71 
72 	/*
73 	 * Loop through the bitmap clearing each set of indices for each page.
74 	 */
75 	for (vaddr_t va = 0;
76 	     page_bitmap != 0;
77 	     page_bitmap >>= 1, va += PAGE_SIZE) {
78 		if (page_bitmap & 1) {
79 			/*
80 			 * Each bit set represents a page index to be synced.
81 			 */
82 			pmap_md_icache_sync_range_index(va, PAGE_SIZE);
83 			ti->ti_evcnt_synci_pages.ev_count++;
84 		}
85 	}
86 }
87 
88 void
pmap_tlb_syncicache(vaddr_t va,const kcpuset_t * page_onproc)89 pmap_tlb_syncicache(vaddr_t va, const kcpuset_t *page_onproc)
90 {
91 	KASSERT(kpreempt_disabled());
92 	/*
93 	 * We don't sync the icache here but let ast do it for us just before
94 	 * returning to userspace.  We do this because we don't really know
95 	 * on which CPU we will return to userspace and if we synch the icache
96 	 * now it might not be on the CPU we need it on.  In addition, others
97 	 * threads might sync the icache before we get to return to userland
98 	 * so there's no reason for us to do it.
99 	 *
100 	 * Each TLB/cache keeps a synci sequence number which gets advanced
101 	 * each time that TLB/cache performs a pmap_md_sync_icache_all.  When
102 	 * we return to userland, we check the pmap's corresponding synci
103 	 * sequence number for that TLB/cache.  If they match, it means that
104 	 * no one has yet synched the icache so we much do it ourselves.  If
105 	 * they don't match someone has already synced the icache for us.
106 	 *
107 	 * There is a small chance that the generation numbers will wrap and
108 	 * then become equal but that's a one in 4 billion cache and will
109 	 * just cause an extra sync of the icache.
110 	 */
111 	struct cpu_info * const ci = curcpu();
112 	kcpuset_t *onproc;
113 	kcpuset_create(&onproc, true);
114 	const uint32_t page_mask =
115 	    1L << ((va >> PGSHIFT) & pmap_tlb_synci_page_mask);
116 	for (size_t i = 0; i < pmap_ntlbs; i++) {
117 		struct pmap_tlb_info * const ti = pmap_tlbs[i];
118 		TLBINFO_LOCK(ti);
119 		for (;;) {
120 			uint32_t old_page_bitmap = ti->ti_synci_page_bitmap;
121 			if (old_page_bitmap & page_mask) {
122 				ti->ti_evcnt_synci_duplicate.ev_count++;
123 				break;
124 			}
125 
126 			uint32_t orig_page_bitmap = atomic_cas_32(
127 			    &ti->ti_synci_page_bitmap, old_page_bitmap,
128 			    old_page_bitmap | page_mask);
129 
130 			if (orig_page_bitmap == old_page_bitmap) {
131 				if (old_page_bitmap == 0) {
132 					kcpuset_merge(onproc, ti->ti_kcpuset);
133 				} else {
134 					ti->ti_evcnt_synci_deferred.ev_count++;
135 				}
136 				ti->ti_evcnt_synci_desired.ev_count++;
137 				break;
138 			}
139 		}
140 #if 0
141 		printf("%s: %s: %x to %x on cpus %#x\n", __func__,
142 		    ti->ti_name, page_mask, ti->ti_synci_page_bitmap,
143 		     onproc & page_onproc & ti->ti_cpu_mask);
144 #endif
145 		TLBINFO_UNLOCK(ti);
146 	}
147 	kcpuset_intersect(onproc, page_onproc);
148 	if (__predict_false(!kcpuset_iszero(onproc))) {
149 		/*
150 		 * If the cpu need to sync this page, tell the current lwp
151 		 * to sync the icache before it returns to userspace.
152 		 */
153 		if (kcpuset_isset(onproc, cpu_index(ci))) {
154 			if (ci->ci_flags & CPUF_USERPMAP) {
155 				curlwp->l_md.md_astpending = 1;	/* force call to ast() */
156 				ci->ci_evcnt_synci_onproc_rqst.ev_count++;
157 			} else {
158 				ci->ci_evcnt_synci_deferred_rqst.ev_count++;
159 			}
160 			kcpuset_clear(onproc, cpu_index(ci));
161 		}
162 
163 		/*
164 		 * For each cpu that is affect, send an IPI telling
165 		 * that CPU that the current thread needs to sync its icache.
166 		 * We might cause some spurious icache syncs but that's not
167 		 * going to break anything.
168 		 */
169 		for (cpuid_t n = kcpuset_ffs(onproc);
170 		     n-- > 0;
171 		     n = kcpuset_ffs(onproc)) {
172 			kcpuset_clear(onproc, n);
173 			cpu_send_ipi(cpu_lookup(n), IPI_SYNCICACHE);
174 		}
175 	}
176 	kcpuset_destroy(onproc);
177 }
178 
179 void
pmap_tlb_syncicache_wanted(struct cpu_info * ci)180 pmap_tlb_syncicache_wanted(struct cpu_info *ci)
181 {
182 	struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
183 
184 	KASSERT(cpu_intr_p());
185 
186 	TLBINFO_LOCK(ti);
187 
188 	/*
189 	 * We might have been notified because another CPU changed an exec
190 	 * page and now needs us to sync the icache so tell the current lwp
191 	 * to do the next time it returns to userland (which should be very
192 	 * soon).
193 	 */
194 	if (ti->ti_synci_page_bitmap && (ci->ci_flags & CPUF_USERPMAP)) {
195 		curlwp->l_md.md_astpending = 1;	/* force call to ast() */
196 		ci->ci_evcnt_synci_ipi_rqst.ev_count++;
197 	}
198 
199 	TLBINFO_UNLOCK(ti);
200 
201 }
202 #endif /* MULTIPROCESSOR */
203