xref: /netbsd/sys/arch/i386/i386/gdt.c (revision c4a72b64)
1 /*	$NetBSD: gdt.c,v 1.27 2002/10/08 20:16:09 fvdl Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John T. Kohl and Charles M. Hannum.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.27 2002/10/08 20:16:09 fvdl Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/proc.h>
45 #include <sys/lock.h>
46 #include <sys/user.h>
47 
48 #include <uvm/uvm.h>
49 
50 #include <machine/gdt.h>
51 
52 int gdt_size;		/* total number of GDT entries */
53 int gdt_count;		/* number of GDT entries in use */
54 int gdt_next;		/* next available slot for sweeping */
55 int gdt_free;		/* next free slot; terminated with GNULL_SEL */
56 
57 struct lock gdt_lock_store;
58 
59 static __inline void gdt_lock __P((void));
60 static __inline void gdt_unlock __P((void));
61 #if 0
62 void gdt_compact __P((void));
63 #endif
64 void gdt_init __P((void));
65 void gdt_grow __P((void));
66 #if 0
67 void gdt_shrink __P((void));
68 #endif
69 int gdt_get_slot __P((void));
70 void gdt_put_slot __P((int));
71 
72 /*
73  * Lock and unlock the GDT, to avoid races in case gdt_{ge,pu}t_slot() sleep
74  * waiting for memory.
75  *
76  * Note that the locking done here is not sufficient for multiprocessor
77  * systems.  A freshly allocated slot will still be of type SDT_SYSNULL for
78  * some time after the GDT is unlocked, so gdt_compact() could attempt to
79  * reclaim it.
80  */
81 static __inline void
82 gdt_lock()
83 {
84 
85 	(void) lockmgr(&gdt_lock_store, LK_EXCLUSIVE, NULL);
86 }
87 
88 static __inline void
89 gdt_unlock()
90 {
91 
92 	(void) lockmgr(&gdt_lock_store, LK_RELEASE, NULL);
93 }
94 
95 void
96 setgdt(int sel, void *base, size_t limit,
97     int type, int dpl, int def32, int gran)
98 {
99 	struct segment_descriptor *sd = &gdt[sel].sd;
100 	CPU_INFO_ITERATOR cii;
101 	struct cpu_info *ci;
102 
103 	setsegment(sd, base, limit, type, dpl, def32, gran);
104 	for (CPU_INFO_FOREACH(cii, ci)) {
105 		if (ci->ci_gdt != NULL)
106 			ci->ci_gdt[sel].sd = *sd;
107 	}
108 }
109 
110 /*
111  * Initialize the GDT subsystem.  Called from autoconf().
112  */
113 void
114 gdt_init()
115 {
116 	size_t max_len, min_len;
117 	union descriptor *old_gdt;
118 	struct vm_page *pg;
119 	vaddr_t va;
120 	struct cpu_info *ci = &cpu_info_primary;
121 
122 	lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);
123 
124 	max_len = MAXGDTSIZ * sizeof(gdt[0]);
125 	min_len = MINGDTSIZ * sizeof(gdt[0]);
126 
127 	gdt_size = MINGDTSIZ;
128 	gdt_count = NGDT;
129 	gdt_next = NGDT;
130 	gdt_free = GNULL_SEL;
131 
132 	old_gdt = gdt;
133 	gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
134 	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
135 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
136 		if (pg == NULL) {
137 			panic("gdt_init: no pages");
138 		}
139 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
140 		    VM_PROT_READ | VM_PROT_WRITE);
141 	}
142 	memcpy(gdt, old_gdt, NGDT * sizeof(gdt[0]));
143 	ci->ci_gdt = gdt;
144 	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
145 	    SDT_MEMRWA, SEL_KPL, 1, 1);
146 
147 	gdt_init_cpu(ci);
148 }
149 
150 /*
151  * Allocate shadow GDT for a slave cpu.
152  */
153 void
154 gdt_alloc_cpu(struct cpu_info *ci)
155 {
156 	int max_len = MAXGDTSIZ * sizeof(gdt[0]);
157 	int min_len = MINGDTSIZ * sizeof(gdt[0]);
158 
159 	ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
160 	uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt,
161 	    (vaddr_t)ci->ci_gdt + min_len, FALSE, FALSE);
162 	memset(ci->ci_gdt, 0, min_len);
163 	memcpy(ci->ci_gdt, gdt, gdt_count * sizeof(gdt[0]));
164 	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
165 	    SDT_MEMRWA, SEL_KPL, 1, 1);
166 }
167 
168 
169 /*
170  * Load appropriate gdt descriptor; we better be running on *ci
171  * (for the most part, this is how a cpu knows who it is).
172  */
173 void
174 gdt_init_cpu(struct cpu_info *ci)
175 {
176 	struct region_descriptor region;
177 	size_t max_len;
178 
179 	max_len = MAXGDTSIZ * sizeof(gdt[0]);
180 	setregion(&region, ci->ci_gdt, max_len - 1);
181 	lgdt(&region);
182 }
183 
184 #ifdef MULTIPROCESSOR
185 
186 void
187 gdt_reload_cpu(struct cpu_info *ci)
188 {
189 	struct region_descriptor region;
190 	size_t max_len;
191 
192 	max_len = MAXGDTSIZ * sizeof(gdt[0]);
193 	setregion(&region, ci->ci_gdt, max_len - 1);
194 	lgdt(&region);
195 }
196 #endif
197 
198 
199 /*
200  * Grow or shrink the GDT.
201  */
202 void
203 gdt_grow()
204 {
205 	size_t old_len, new_len;
206 	CPU_INFO_ITERATOR cii;
207 	struct cpu_info *ci;
208 	struct vm_page *pg;
209 	vaddr_t va;
210 
211 	old_len = gdt_size * sizeof(gdt[0]);
212 	gdt_size <<= 1;
213 	new_len = old_len << 1;
214 
215 	for (CPU_INFO_FOREACH(cii, ci)) {
216 		for (va = (vaddr_t)(ci->ci_gdt) + old_len;
217 		     va < (vaddr_t)(ci->ci_gdt) + new_len;
218 		     va += PAGE_SIZE) {
219 			while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
220 			    NULL) {
221 				uvm_wait("gdt_grow");
222 			}
223 			pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
224 			    VM_PROT_READ | VM_PROT_WRITE);
225 		}
226 	}
227 }
228 
229 #if 0
230 void
231 gdt_shrink()
232 {
233 	size_t old_len, new_len;
234 	struct vm_page *pg;
235 	paddr_t pa;
236 	vaddr_t va;
237 
238 	old_len = gdt_size * sizeof(gdt[0]);
239 	gdt_size >>= 1;
240 	new_len = old_len >> 1;
241 
242 	for (va = (vaddr_t)gdt + new_len; va < (vaddr_t)gdt + old_len;
243 	    va += PAGE_SIZE) {
244 		if (!pmap_extract(pmap_kernel(), va, &pa)) {
245 			panic("gdt_shrink botch");
246 		}
247 		pg = PHYS_TO_VM_PAGE(pa);
248 		pmap_kremove(va, PAGE_SIZE);
249 		uvm_pagefree(pg);
250 	}
251 }
252 #endif
253 /*
254  * Allocate a GDT slot as follows:
255  * 1) If there are entries on the free list, use those.
256  * 2) If there are fewer than gdt_size entries in use, there are free slots
257  *    near the end that we can sweep through.
258  * 3) As a last resort, we increase the size of the GDT, and sweep through
259  *    the new slots.
260  */
261 int
262 gdt_get_slot()
263 {
264 	int slot;
265 
266 	gdt_lock();
267 
268 	if (gdt_free != GNULL_SEL) {
269 		slot = gdt_free;
270 		gdt_free = gdt[slot].gd.gd_selector;
271 	} else {
272 		if (gdt_next != gdt_count)
273 			panic("gdt_get_slot botch 1");
274 		if (gdt_next >= gdt_size) {
275 			if (gdt_size >= MAXGDTSIZ)
276 				panic("gdt_get_slot botch 2");
277 			gdt_grow();
278 		}
279 		slot = gdt_next++;
280 	}
281 
282 	gdt_count++;
283 	gdt_unlock();
284 	return (slot);
285 }
286 
287 /*
288  * Deallocate a GDT slot, putting it on the free list.
289  */
290 void
291 gdt_put_slot(int slot)
292 {
293 
294 	gdt_lock();
295 	gdt_count--;
296 
297 	gdt[slot].gd.gd_type = SDT_SYSNULL;
298 #if 0
299 	/*
300 	 * shrink the GDT if we're using less than 1/4 of it.
301 	 * Shrinking at that point means we'll still have room for
302 	 * almost 2x as many processes as are now running without
303 	 * having to grow the GDT.
304 	 */
305 	if (gdt_size > MINGDTSIZ && gdt_count <= gdt_size / 4) {
306 		gdt_compact();
307 		gdt_shrink();
308 	} else {
309 #endif
310 		gdt[slot].gd.gd_selector = gdt_free;
311 		gdt_free = slot;
312 #if 0
313 	}
314 #endif
315 
316 	gdt_unlock();
317 }
318 
319 int
320 tss_alloc(struct pcb *pcb)
321 {
322 	int slot;
323 
324 	slot = gdt_get_slot();
325 	setgdt(slot, &pcb->pcb_tss, sizeof(struct pcb) - 1,
326 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
327 	return GSEL(slot, SEL_KPL);
328 }
329 
330 void
331 tss_free(int sel)
332 {
333 
334 	gdt_put_slot(IDXSEL(sel));
335 }
336 
337 /*
338  * Caller must have pmap locked for both of these functions.
339  */
340 void
341 ldt_alloc(struct pmap *pmap, union descriptor *ldt, size_t len)
342 {
343 	int slot;
344 
345 	slot = gdt_get_slot();
346 	setgdt(slot, ldt, len - 1, SDT_SYSLDT, SEL_KPL, 0, 0);
347 	pmap->pm_ldt_sel = GSEL(slot, SEL_KPL);
348 }
349 
350 void
351 ldt_free(struct pmap *pmap)
352 {
353 	int slot;
354 
355 	slot = IDXSEL(pmap->pm_ldt_sel);
356 
357 	gdt_put_slot(slot);
358 }
359