1 /* $NetBSD: gdt.c,v 1.74 2023/07/16 19:55:43 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1996, 1997, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John T. Kohl, by Charles M. Hannum, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.74 2023/07/16 19:55:43 riastradh Exp $");
34
35 #include "opt_multiprocessor.h"
36 #include "opt_xen.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/mutex.h>
42 #include <sys/cpu.h>
43
44 #include <uvm/uvm.h>
45
46 #include <machine/gdt.h>
47 #include <machine/pmap_private.h>
48
49 #define NSLOTS(sz) \
50 (((sz) - DYNSEL_START) / sizeof(union descriptor))
51 #define NDYNSLOTS NSLOTS(MAXGDTSIZ)
52
53 typedef struct {
54 bool busy[NDYNSLOTS];
55 size_t nslots;
56 } gdt_bitmap_t;
57
58 /* size of GDT in bytes */
59 #ifdef XENPV
60 const size_t gdt_size = FIRST_RESERVED_GDT_BYTE;
61 #else
62 const size_t gdt_size = MAXGDTSIZ;
63 #endif
64
65 /* bitmap of busy slots */
66 static gdt_bitmap_t gdt_bitmap;
67
68 #ifndef XENPV
69 static int ldt_count; /* number of LDTs */
70 static int ldt_max = 1000;/* max number of LDTs */
71 static void setgdt(int, const void *, size_t, int, int, int, int);
72 static int gdt_get_slot(void);
73 static void gdt_put_slot(int);
74 #endif
75 void gdt_init(void);
76
77 void
update_descriptor(union descriptor * table,union descriptor * entry)78 update_descriptor(union descriptor *table, union descriptor *entry)
79 {
80 #ifndef XENPV
81 *table = *entry;
82 #else
83 paddr_t pa;
84 pt_entry_t *ptp;
85
86 ptp = kvtopte((vaddr_t)table);
87 pa = (*ptp & PTE_4KFRAME) | ((vaddr_t)table & ~PTE_4KFRAME);
88 if (HYPERVISOR_update_descriptor(pa, entry->raw[0], entry->raw[1]))
89 panic("HYPERVISOR_update_descriptor failed\n");
90 #endif
91 }
92
93 #ifndef XENPV
94 /*
95 * Called on a newly-allocated GDT slot, so no race between CPUs.
96 */
97 static void
setgdt(int slot,const void * base,size_t limit,int type,int dpl,int def32,int gran)98 setgdt(int slot, const void *base, size_t limit, int type, int dpl, int def32,
99 int gran)
100 {
101 struct segment_descriptor *sd;
102 CPU_INFO_ITERATOR cii;
103 struct cpu_info *ci;
104 int idx;
105
106 idx = IDXSEL(GDYNSEL(slot, SEL_KPL));
107 sd = &gdtstore[idx].sd;
108 setsegment(sd, base, limit, type, dpl, def32, gran);
109 for (CPU_INFO_FOREACH(cii, ci)) {
110 if (ci->ci_gdt != NULL)
111 update_descriptor(&ci->ci_gdt[idx],
112 (union descriptor *)sd);
113 }
114 }
115 #endif
116
117 /*
118 * gdt_init()
119 *
120 * Create a permanent Global Descriptor Table (GDT) for the
121 * primary CPU. This replaces the second tepmorary GDT that was
122 * allocated in pmap_bootstrap with pmap_bootstrap_valloc and
123 * pmap_bootstrap_palloc -- which in turn replaced the initial
124 * temporary GDT allocated on the stack early at boot and
125 * initialized with initgdt.
126 *
127 * 1. Allocate permanent space for the primary CPU's GDT with
128 * uvm_km(9).
129 *
130 * 2. Copy the temporary GDT's contents over. See initgdt for the
131 * original initialization; it was copied from the initial
132 * temporary GDT to the second temporary GDT in init386.
133 *
134 * 3. Make sure the GCPU_SEL segment descriptor points to
135 * &cpu_info_primary.
136 *
137 * XXX Is this necessary? It appears to be redundant with
138 * initgdt.
139 *
140 * 4. Load the permanent GDT address into the Global Descriptor
141 * Table Register (GDTR) with LGDT (via gdt_init_cpu).
142 */
143 void
gdt_init(void)144 gdt_init(void)
145 {
146 union descriptor *old_gdt;
147 struct vm_page *pg;
148 vaddr_t va;
149 struct cpu_info *ci = &cpu_info_primary;
150
151 /* Initialize the global values */
152 memset(&gdt_bitmap.busy, 0, sizeof(gdt_bitmap.busy));
153 gdt_bitmap.nslots = NSLOTS(gdt_size);
154
155 old_gdt = gdtstore;
156
157 /* Allocate gdt_size bytes of memory. */
158 gdtstore = (union descriptor *)uvm_km_alloc(kernel_map, gdt_size, 0,
159 UVM_KMF_VAONLY);
160 for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + gdt_size;
161 va += PAGE_SIZE) {
162 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
163 if (pg == NULL) {
164 panic("gdt_init: no pages");
165 }
166 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
167 VM_PROT_READ | VM_PROT_WRITE, 0);
168 }
169 pmap_update(pmap_kernel());
170
171 /* Copy the initial bootstrap GDT into the new area. */
172 memcpy(gdtstore, old_gdt, NGDT * sizeof(gdtstore[0]));
173 ci->ci_gdt = gdtstore;
174 setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci,
175 sizeof(struct cpu_info) - 1, SDT_MEMRWA, SEL_KPL, 1, 0);
176
177 gdt_init_cpu(ci);
178 }
179
180 /*
181 * Allocate shadow GDT for a secondary CPU. It contains the same values as the
182 * GDT present in cpu0 (gdtstore).
183 */
184 void
gdt_alloc_cpu(struct cpu_info * ci)185 gdt_alloc_cpu(struct cpu_info *ci)
186 {
187 struct vm_page *pg;
188 vaddr_t va;
189
190 ci->ci_gdt = (union descriptor *)uvm_km_alloc(kernel_map, gdt_size,
191 0, UVM_KMF_VAONLY);
192 for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + gdt_size;
193 va += PAGE_SIZE) {
194 while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
195 == NULL) {
196 uvm_wait("gdt_alloc_cpu");
197 }
198 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
199 VM_PROT_READ | VM_PROT_WRITE, 0);
200 }
201 pmap_update(pmap_kernel());
202
203 memcpy(ci->ci_gdt, gdtstore, gdt_size);
204
205 setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci,
206 sizeof(struct cpu_info) - 1, SDT_MEMRWA, SEL_KPL, 1, 0);
207 }
208
209 /*
210 * Load appropriate GDT descriptor into the currently running CPU, which must
211 * be ci.
212 */
213 void
gdt_init_cpu(struct cpu_info * ci)214 gdt_init_cpu(struct cpu_info *ci)
215 {
216 #ifndef XENPV
217 struct region_descriptor region;
218
219 setregion(®ion, ci->ci_gdt, gdt_size - 1);
220 lgdt(®ion);
221 #else
222 size_t len = roundup(gdt_size, PAGE_SIZE);
223 unsigned long frames[len >> PAGE_SHIFT];
224 vaddr_t va;
225 pt_entry_t *ptp;
226 size_t f;
227
228 for (va = (vaddr_t)ci->ci_gdt, f = 0;
229 va < (vaddr_t)ci->ci_gdt + gdt_size;
230 va += PAGE_SIZE, f++) {
231 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
232 ptp = kvtopte(va);
233 frames[f] = *ptp >> PAGE_SHIFT;
234
235 /*
236 * Our own
237 * pmap_pte_clearbits(ptp, PTE_W)
238 * but without spl(), since %fs is not set up properly yet; ie
239 * curcpu() won't work at this point and spl() will break.
240 */
241 if (HYPERVISOR_update_va_mapping((vaddr_t)va,
242 *ptp & ~PTE_W, UVMF_INVLPG) < 0) {
243 panic("%s page RO update failed.\n", __func__);
244 }
245 }
246
247 if (HYPERVISOR_set_gdt(frames, gdt_size / sizeof(gdtstore[0])))
248 panic("HYPERVISOR_set_gdt failed!\n");
249 lgdt_finish();
250 #endif
251 }
252
253 #ifndef XENPV
254 static int
gdt_get_slot(void)255 gdt_get_slot(void)
256 {
257 size_t i;
258
259 KASSERT(mutex_owned(&cpu_lock));
260
261 for (i = 0; i < gdt_bitmap.nslots; i++) {
262 if (!gdt_bitmap.busy[i]) {
263 gdt_bitmap.busy[i] = true;
264 return (int)i;
265 }
266 }
267 panic("gdt_get_slot: out of memory");
268
269 /* NOTREACHED */
270 return 0;
271 }
272
273 static void
gdt_put_slot(int slot)274 gdt_put_slot(int slot)
275 {
276 KASSERT(mutex_owned(&cpu_lock));
277 KASSERT(slot < gdt_bitmap.nslots);
278 gdt_bitmap.busy[slot] = false;
279 }
280
281 int
tss_alloc(const struct i386tss * tss)282 tss_alloc(const struct i386tss *tss)
283 {
284 int slot;
285
286 mutex_enter(&cpu_lock);
287 slot = gdt_get_slot();
288 setgdt(slot, tss, sizeof(struct i386tss) + IOMAPSIZE - 1,
289 SDT_SYS386TSS, SEL_KPL, 0, 0);
290 mutex_exit(&cpu_lock);
291
292 return GDYNSEL(slot, SEL_KPL);
293 }
294
295 void
tss_free(int sel)296 tss_free(int sel)
297 {
298
299 mutex_enter(&cpu_lock);
300 gdt_put_slot(IDXDYNSEL(sel));
301 mutex_exit(&cpu_lock);
302 }
303
304 int
ldt_alloc(void * ldtp,size_t len)305 ldt_alloc(void *ldtp, size_t len)
306 {
307 int slot;
308
309 KASSERT(mutex_owned(&cpu_lock));
310
311 if (ldt_count >= ldt_max) {
312 return -1;
313 }
314 ldt_count++;
315
316 slot = gdt_get_slot();
317 setgdt(slot, ldtp, len - 1, SDT_SYSLDT, SEL_KPL, 0, 0);
318
319 return GDYNSEL(slot, SEL_KPL);
320 }
321
322 void
ldt_free(int sel)323 ldt_free(int sel)
324 {
325 int slot;
326
327 KASSERT(mutex_owned(&cpu_lock));
328 KASSERT(ldt_count > 0);
329
330 slot = IDXDYNSEL(sel);
331 gdt_put_slot(slot);
332 ldt_count--;
333 }
334 #endif
335