xref: /netbsd/sys/arch/i386/i386/gdt.c (revision 6550d01e)
1 /*	$NetBSD: gdt.c,v 1.50 2009/11/21 03:11:00 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John T. Kohl, by Charles M. Hannum, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.50 2009/11/21 03:11:00 rmind Exp $");
34 
35 #include "opt_multiprocessor.h"
36 #include "opt_xen.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/mutex.h>
42 #include <sys/cpu.h>
43 
44 #include <uvm/uvm.h>
45 
46 #include <machine/gdt.h>
47 
48 #ifndef XEN
49 int gdt_size[1];	/* total number of GDT entries */
50 int gdt_count[1];	/* number of GDT entries in use */
51 int gdt_next[1];	/* next available slot for sweeping */
52 int gdt_free[1];	/* next free slot; terminated with GNULL_SEL */
53 #else
54 int gdt_size[2];	/* total number of GDT entries */
55 int gdt_count[2];	/* number of GDT entries in use */
56 int gdt_next[2];	/* next available slot for sweeping */
57 int gdt_free[2];	/* next free slot; terminated with GNULL_SEL */
58 #endif
59 
60 static int ldt_count;	/* number of LDTs */
61 static int ldt_max = 1000;/* max number of LDTs */
62 
63 void gdt_init(void);
64 void gdt_grow(int);
65 int gdt_get_slot1(int);
66 void gdt_put_slot1(int, int);
67 
68 void
69 update_descriptor(union descriptor *table, union descriptor *entry)
70 {
71 #ifndef XEN
72 	*table = *entry;
73 #else
74 	paddr_t pa;
75 	pt_entry_t *ptp;
76 
77 	ptp = kvtopte((vaddr_t)table);
78 	pa = (*ptp & PG_FRAME) | ((vaddr_t)table & ~PG_FRAME);
79 	if (HYPERVISOR_update_descriptor(pa, entry->raw[0], entry->raw[1]))
80 		panic("HYPERVISOR_update_descriptor failed\n");
81 #endif
82 }
83 
84 void
85 setgdt(int sel, const void *base, size_t limit,
86     int type, int dpl, int def32, int gran)
87 {
88 	struct segment_descriptor *sd = &gdt[sel].sd;
89 	CPU_INFO_ITERATOR cii;
90 	struct cpu_info *ci;
91 
92 #ifdef XEN
93 	if (type == SDT_SYS386TSS) {
94 		/* printk("XXX TSS descriptor not supported in GDT\n"); */
95 		return;
96 	}
97 #endif
98 	setsegment(sd, base, limit, type, dpl, def32, gran);
99 	for (CPU_INFO_FOREACH(cii, ci)) {
100 		if (ci->ci_gdt != NULL)
101 			update_descriptor(&ci->ci_gdt[sel],
102 			    (union descriptor *)sd);
103 	}
104 }
105 
106 /*
107  * Initialize the GDT subsystem.  Called from autoconf().
108  */
109 void
110 gdt_init(void)
111 {
112 	size_t max_len, min_len;
113 	union descriptor *old_gdt;
114 	struct vm_page *pg;
115 	vaddr_t va;
116 	struct cpu_info *ci = &cpu_info_primary;
117 
118 	max_len = MAXGDTSIZ * sizeof(gdt[0]);
119 	min_len = MINGDTSIZ * sizeof(gdt[0]);
120 
121 	gdt_size[0] = MINGDTSIZ;
122 	gdt_count[0] = NGDT;
123 	gdt_next[0] = NGDT;
124 	gdt_free[0] = GNULL_SEL;
125 #ifdef XEN
126 	max_len = max_len * 2;
127 	gdt_size[1] = 0;
128 	gdt_count[1] = MAXGDTSIZ;
129 	gdt_next[1] = MAXGDTSIZ;
130 	gdt_free[1] = GNULL_SEL;
131 #endif
132 
133 	old_gdt = gdt;
134 	gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len,
135 	    0, UVM_KMF_VAONLY);
136 	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
137 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
138 		if (pg == NULL) {
139 			panic("gdt_init: no pages");
140 		}
141 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
142 		    VM_PROT_READ | VM_PROT_WRITE, 0);
143 	}
144 	pmap_update(pmap_kernel());
145 	memcpy(gdt, old_gdt, NGDT * sizeof(gdt[0]));
146 	ci->ci_gdt = gdt;
147 	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, 0xfffff,
148 	    SDT_MEMRWA, SEL_KPL, 1, 1);
149 
150 	gdt_init_cpu(ci);
151 }
152 
153 /*
154  * Allocate shadow GDT for a slave CPU.
155  */
156 void
157 gdt_alloc_cpu(struct cpu_info *ci)
158 {
159 	int max_len = MAXGDTSIZ * sizeof(gdt[0]);
160 	int min_len = MINGDTSIZ * sizeof(gdt[0]);
161 	struct vm_page *pg;
162 	vaddr_t va;
163 
164 	ci->ci_gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len,
165 	    0, UVM_KMF_VAONLY);
166 	for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + min_len;
167 	    va += PAGE_SIZE) {
168 		while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
169 		    == NULL) {
170 			uvm_wait("gdt_alloc_cpu");
171 		}
172 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
173 		    VM_PROT_READ | VM_PROT_WRITE, 0);
174 	}
175 	pmap_update(pmap_kernel());
176 	memset(ci->ci_gdt, 0, min_len);
177 	memcpy(ci->ci_gdt, gdt, gdt_count[0] * sizeof(gdt[0]));
178 	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, 0xfffff,
179 	    SDT_MEMRWA, SEL_KPL, 1, 1);
180 }
181 
182 
183 /*
184  * Load appropriate gdt descriptor; we better be running on *ci
185  * (for the most part, this is how a CPU knows who it is).
186  */
187 void
188 gdt_init_cpu(struct cpu_info *ci)
189 {
190 #ifndef XEN
191 	struct region_descriptor region;
192 	size_t max_len;
193 
194 	max_len = MAXGDTSIZ * sizeof(gdt[0]);
195 	setregion(&region, ci->ci_gdt, max_len - 1);
196 	lgdt(&region);
197 #else
198 	size_t len = gdt_size[0] * sizeof(gdt[0]);
199 	unsigned long frames[len >> PAGE_SHIFT];
200 	vaddr_t va;
201 	pt_entry_t *ptp;
202 	int f;
203 
204 	for (va = (vaddr_t)ci->ci_gdt, f = 0;
205 	     va < (vaddr_t)ci->ci_gdt + len;
206 	     va += PAGE_SIZE, f++) {
207 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
208 		ptp = kvtopte(va);
209 		frames[f] = *ptp >> PAGE_SHIFT;
210 		pmap_pte_clearbits(ptp, PG_RW);
211 	}
212 	/* printk("loading gdt %x, %d entries, %d pages", */
213 	    /* frames[0] << PAGE_SHIFT, gdt_size[0], len >> PAGE_SHIFT); */
214 	if (HYPERVISOR_set_gdt(frames, gdt_size[0]))
215 		panic("HYPERVISOR_set_gdt failed!\n");
216 	lgdt_finish();
217 #endif
218 }
219 
220 #ifdef MULTIPROCESSOR
221 
222 void
223 gdt_reload_cpu(struct cpu_info *ci)
224 {
225 	struct region_descriptor region;
226 	size_t max_len;
227 
228 	max_len = MAXGDTSIZ * sizeof(gdt[0]);
229 	setregion(&region, ci->ci_gdt, max_len - 1);
230 	lgdt(&region);
231 }
232 #endif
233 
234 
235 /*
236  * Grow the GDT.
237  */
238 void
239 gdt_grow(int which)
240 {
241 	size_t old_len, new_len;
242 	CPU_INFO_ITERATOR cii;
243 	struct cpu_info *ci;
244 	struct vm_page *pg;
245 	vaddr_t va;
246 
247 	old_len = gdt_size[which] * sizeof(gdt[0]);
248 	gdt_size[which] <<= 1;
249 	new_len = old_len << 1;
250 
251 #ifdef XEN
252 	if (which != 0) {
253 		size_t max_len = MAXGDTSIZ * sizeof(gdt[0]);
254 		if (old_len == 0) {
255 			gdt_size[which] = MINGDTSIZ;
256 			new_len = gdt_size[which] * sizeof(gdt[0]);
257 		}
258 		for(va = (vaddr_t)(cpu_info_primary.ci_gdt) + old_len + max_len;
259 		    va < (vaddr_t)(cpu_info_primary.ci_gdt) + new_len + max_len;
260 		    va += PAGE_SIZE) {
261 			while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
262 			    == NULL) {
263 				uvm_wait("gdt_grow");
264 			}
265 			pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
266 			    VM_PROT_READ | VM_PROT_WRITE, 0);
267 		}
268 		return;
269 	}
270 #endif
271 
272 	for (CPU_INFO_FOREACH(cii, ci)) {
273 		for (va = (vaddr_t)(ci->ci_gdt) + old_len;
274 		     va < (vaddr_t)(ci->ci_gdt) + new_len;
275 		     va += PAGE_SIZE) {
276 			while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
277 			    NULL) {
278 				uvm_wait("gdt_grow");
279 			}
280 			pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
281 			    VM_PROT_READ | VM_PROT_WRITE, 0);
282 		}
283 	}
284 
285 	pmap_update(pmap_kernel());
286 }
287 
288 /*
289  * Allocate a GDT slot as follows:
290  * 1) If there are entries on the free list, use those.
291  * 2) If there are fewer than gdt_size entries in use, there are free slots
292  *    near the end that we can sweep through.
293  * 3) As a last resort, we increase the size of the GDT, and sweep through
294  *    the new slots.
295  */
296 
297 int
298 gdt_get_slot(void)
299 {
300 
301 	KASSERT(mutex_owned(&cpu_lock));
302 
303 	return gdt_get_slot1(0);
304 }
305 
306 int
307 gdt_get_slot1(int which)
308 {
309 	int slot;
310 	size_t offset;
311 
312 	KASSERT(mutex_owned(&cpu_lock));
313 
314 	if (gdt_free[which] != GNULL_SEL) {
315 		slot = gdt_free[which];
316 		gdt_free[which] = gdt[slot].gd.gd_selector;
317 	} else {
318 		offset = which * MAXGDTSIZ * sizeof(gdt[0]);
319 		if (gdt_next[which] != gdt_count[which] + offset)
320 			panic("gdt_get_slot botch 1");
321 		if (gdt_next[which] - offset >= gdt_size[which]) {
322 			if (gdt_size[which] >= MAXGDTSIZ)
323 				panic("gdt_get_slot botch 2");
324 			gdt_grow(which);
325 		}
326 		slot = gdt_next[which]++;
327 	}
328 
329 	gdt_count[which]++;
330 	return (slot);
331 }
332 
333 /*
334  * Deallocate a GDT slot, putting it on the free list.
335  */
336 void
337 gdt_put_slot(int slot)
338 {
339 
340 	KASSERT(mutex_owned(&cpu_lock));
341 
342 	gdt_put_slot1(slot, 0);
343 }
344 
345 void
346 gdt_put_slot1(int slot, int which)
347 {
348 	union descriptor d;
349 	d.raw[0] = 0;
350 	d.raw[1] = 0;
351 
352 	KASSERT(mutex_owned(&cpu_lock));
353 
354 	gdt_count[which]--;
355 
356 	d.gd.gd_type = SDT_SYSNULL;
357 	d.gd.gd_selector = gdt_free[which];
358 	update_descriptor(&gdt[slot], &d);
359 
360 	gdt_free[which] = slot;
361 }
362 
363 #ifndef XEN
364 int
365 tss_alloc(const struct i386tss *tss)
366 {
367 	int slot;
368 
369 	mutex_enter(&cpu_lock);
370 	slot = gdt_get_slot();
371 	setgdt(slot, tss, sizeof(struct i386tss) + IOMAPSIZE - 1,
372 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
373 	mutex_exit(&cpu_lock);
374 
375 	return GSEL(slot, SEL_KPL);
376 }
377 
378 void
379 tss_free(int sel)
380 {
381 
382 	mutex_enter(&cpu_lock);
383 	gdt_put_slot(IDXSEL(sel));
384 	mutex_exit(&cpu_lock);
385 }
386 #endif
387 
388 int
389 ldt_alloc(union descriptor *ldtp, size_t len)
390 {
391 	int slot;
392 
393 	KASSERT(mutex_owned(&cpu_lock));
394 
395 	if (ldt_count >= ldt_max) {
396 		return -1;
397 	}
398 	ldt_count++;
399 
400 #ifndef XEN
401 	slot = gdt_get_slot();
402 	setgdt(slot, ldtp, len - 1, SDT_SYSLDT, SEL_KPL, 0, 0);
403 #else
404 	slot = gdt_get_slot1(1);
405 	cpu_info_primary.ci_gdt[slot].ld.ld_base = (uint32_t)ldtp;
406 	cpu_info_primary.ci_gdt[slot].ld.ld_entries =
407 	    len / sizeof(union descriptor);
408 #endif
409 
410 	return GSEL(slot, SEL_KPL);
411 }
412 
413 void
414 ldt_free(int sel)
415 {
416 	int slot;
417 
418 	KASSERT(mutex_owned(&cpu_lock));
419 	KASSERT(ldt_count > 0);
420 
421 	slot = IDXSEL(sel);
422 #ifndef XEN
423 	gdt_put_slot(slot);
424 #else
425 	gdt_put_slot1(slot, 1);
426 #endif
427 	ldt_count--;
428 }
429