xref: /netbsd/sys/arch/i386/i386/gdt.c (revision bf9ec67e)
1 /*	$NetBSD: gdt.c,v 1.25 2001/11/18 19:28:34 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John T. Kohl and Charles M. Hannum.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.25 2001/11/18 19:28:34 chs Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/proc.h>
45 #include <sys/lock.h>
46 #include <sys/user.h>
47 
48 #include <uvm/uvm.h>
49 
50 #include <machine/gdt.h>
51 
52 #define	MINGDTSIZ	512
53 #define	MAXGDTSIZ	8192
54 
55 int gdt_size;		/* total number of GDT entries */
56 int gdt_count;		/* number of GDT entries in use */
57 int gdt_next;		/* next available slot for sweeping */
58 int gdt_free;		/* next free slot; terminated with GNULL_SEL */
59 
60 struct lock gdt_lock_store;
61 
62 static __inline void gdt_lock __P((void));
63 static __inline void gdt_unlock __P((void));
64 void gdt_compact __P((void));
65 void gdt_init __P((void));
66 void gdt_grow __P((void));
67 void gdt_shrink __P((void));
68 int gdt_get_slot __P((void));
69 void gdt_put_slot __P((int));
70 
71 /*
72  * Lock and unlock the GDT, to avoid races in case gdt_{ge,pu}t_slot() sleep
73  * waiting for memory.
74  *
75  * Note that the locking done here is not sufficient for multiprocessor
76  * systems.  A freshly allocated slot will still be of type SDT_SYSNULL for
77  * some time after the GDT is unlocked, so gdt_compact() could attempt to
78  * reclaim it.
79  */
80 static __inline void
81 gdt_lock()
82 {
83 
84 	(void) lockmgr(&gdt_lock_store, LK_EXCLUSIVE, NULL);
85 }
86 
87 static __inline void
88 gdt_unlock()
89 {
90 
91 	(void) lockmgr(&gdt_lock_store, LK_RELEASE, NULL);
92 }
93 
94 /*
95  * Compact the GDT as follows:
96  * 0) We partition the GDT into two areas, one of the slots before gdt_count,
97  *    and one of the slots after.  After compaction, the former part should be
98  *    completely filled, and the latter part should be completely empty.
99  * 1) Step through the process list, looking for TSS and LDT descriptors in
100  *    the second section, and swap them with empty slots in the first section.
101  * 2) Arrange for new allocations to sweep through the empty section.  Since
102  *    we're sweeping through all of the empty entries, and we'll create a free
103  *    list as things are deallocated, we do not need to create a new free list
104  *    here.
105  */
106 void
107 gdt_compact()
108 {
109 	struct proc *p;
110 	pmap_t pmap;
111 	int slot = NGDT, oslot;
112 
113 	proclist_lock_read();
114 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
115 		pmap = p->p_vmspace->vm_map.pmap;
116 		oslot = IDXSEL(p->p_md.md_tss_sel);
117 		if (oslot >= gdt_count) {
118 			while (gdt[slot].sd.sd_type != SDT_SYSNULL) {
119 				if (++slot >= gdt_count)
120 					panic("gdt_compact botch 1");
121 			}
122 			gdt[slot] = gdt[oslot];
123 			gdt[oslot].gd.gd_type = SDT_SYSNULL;
124 			p->p_md.md_tss_sel = GSEL(slot, SEL_KPL);
125 		}
126 		simple_lock(&pmap->pm_lock);
127 		oslot = IDXSEL(pmap->pm_ldt_sel);
128 		if (oslot >= gdt_count) {
129 			while (gdt[slot].sd.sd_type != SDT_SYSNULL) {
130 				if (++slot >= gdt_count)
131 					panic("gdt_compact botch 2");
132 			}
133 			gdt[slot] = gdt[oslot];
134 			gdt[oslot].gd.gd_type = SDT_SYSNULL;
135 			pmap->pm_ldt_sel = GSEL(slot, SEL_KPL);
136 			/*
137 			 * XXXSMP: if the pmap is in use on other
138 			 * processors, they need to reload thier
139 			 * LDT!
140 			 */
141 		}
142 		simple_unlock(&pmap->pm_lock);
143 	}
144 	for (; slot < gdt_count; slot++)
145 		if (gdt[slot].gd.gd_type == SDT_SYSNULL)
146 			panic("gdt_compact botch 3");
147 	for (slot = gdt_count; slot < gdt_size; slot++)
148 		if (gdt[slot].gd.gd_type != SDT_SYSNULL)
149 			panic("gdt_compact botch 4");
150 	gdt_next = gdt_count;
151 	gdt_free = GNULL_SEL;
152 	proclist_unlock_read();
153 }
154 
155 /*
156  * Initialize the GDT.
157  */
158 void
159 gdt_init()
160 {
161 	size_t max_len, min_len;
162 	struct region_descriptor region;
163 	union descriptor *old_gdt;
164 	struct vm_page *pg;
165 	vaddr_t va;
166 
167 	lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);
168 
169 	max_len = MAXGDTSIZ * sizeof(gdt[0]);
170 	min_len = MINGDTSIZ * sizeof(gdt[0]);
171 
172 	gdt_size = MINGDTSIZ;
173 	gdt_count = NGDT;
174 	gdt_next = NGDT;
175 	gdt_free = GNULL_SEL;
176 
177 	old_gdt = gdt;
178 	gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
179 	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
180 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
181 		if (pg == NULL) {
182 			panic("gdt_init: no pages");
183 		}
184 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
185 		    VM_PROT_READ | VM_PROT_WRITE);
186 	}
187 	memcpy(gdt, old_gdt, NGDT * sizeof(gdt[0]));
188 	setregion(&region, gdt, max_len - 1);
189 	lgdt(&region);
190 }
191 
192 /*
193  * Grow or shrink the GDT.
194  */
195 void
196 gdt_grow()
197 {
198 	size_t old_len, new_len;
199 	struct vm_page *pg;
200 	vaddr_t va;
201 
202 	old_len = gdt_size * sizeof(gdt[0]);
203 	gdt_size <<= 1;
204 	new_len = old_len << 1;
205 
206 	for (va = (vaddr_t)gdt + old_len; va < (vaddr_t)gdt + new_len;
207 	    va += PAGE_SIZE) {
208 		while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
209 		       NULL) {
210 			uvm_wait("gdt_grow");
211 		}
212 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
213 		    VM_PROT_READ | VM_PROT_WRITE);
214 	}
215 }
216 
217 void
218 gdt_shrink()
219 {
220 	size_t old_len, new_len;
221 	struct vm_page *pg;
222 	paddr_t pa;
223 	vaddr_t va;
224 
225 	old_len = gdt_size * sizeof(gdt[0]);
226 	gdt_size >>= 1;
227 	new_len = old_len >> 1;
228 
229 	for (va = (vaddr_t)gdt + new_len; va < (vaddr_t)gdt + old_len;
230 	    va += PAGE_SIZE) {
231 		if (!pmap_extract(pmap_kernel(), va, &pa)) {
232 			panic("gdt_shrink botch");
233 		}
234 		pg = PHYS_TO_VM_PAGE(pa);
235 		pmap_kremove(va, PAGE_SIZE);
236 		uvm_pagefree(pg);
237 	}
238 }
239 
240 /*
241  * Allocate a GDT slot as follows:
242  * 1) If there are entries on the free list, use those.
243  * 2) If there are fewer than gdt_size entries in use, there are free slots
244  *    near the end that we can sweep through.
245  * 3) As a last resort, we increase the size of the GDT, and sweep through
246  *    the new slots.
247  */
248 int
249 gdt_get_slot()
250 {
251 	int slot;
252 
253 	gdt_lock();
254 
255 	if (gdt_free != GNULL_SEL) {
256 		slot = gdt_free;
257 		gdt_free = gdt[slot].gd.gd_selector;
258 	} else {
259 		if (gdt_next != gdt_count)
260 			panic("gdt_get_slot botch 1");
261 		if (gdt_next >= gdt_size) {
262 			if (gdt_size >= MAXGDTSIZ)
263 				panic("gdt_get_slot botch 2");
264 			gdt_grow();
265 		}
266 		slot = gdt_next++;
267 	}
268 
269 	gdt_count++;
270 	gdt_unlock();
271 	return (slot);
272 }
273 
274 /*
275  * Deallocate a GDT slot, putting it on the free list.
276  */
277 void
278 gdt_put_slot(slot)
279 	int slot;
280 {
281 
282 	gdt_lock();
283 	gdt_count--;
284 
285 	gdt[slot].gd.gd_type = SDT_SYSNULL;
286 	/*
287 	 * shrink the GDT if we're using less than 1/4 of it.
288 	 * Shrinking at that point means we'll still have room for
289 	 * almost 2x as many processes as are now running without
290 	 * having to grow the GDT.
291 	 */
292 	if (gdt_size > MINGDTSIZ && gdt_count <= gdt_size / 4) {
293 		gdt_compact();
294 		gdt_shrink();
295 	} else {
296 		gdt[slot].gd.gd_selector = gdt_free;
297 		gdt_free = slot;
298 	}
299 
300 	gdt_unlock();
301 }
302 
303 void
304 tss_alloc(p)
305 	struct proc *p;
306 {
307 	struct pcb *pcb = &p->p_addr->u_pcb;
308 	int slot;
309 
310 	slot = gdt_get_slot();
311 	setsegment(&gdt[slot].sd, &pcb->pcb_tss, sizeof(struct pcb) - 1,
312 	    SDT_SYS386TSS, SEL_KPL, 0, 0);
313 	p->p_md.md_tss_sel = GSEL(slot, SEL_KPL);
314 }
315 
316 void
317 tss_free(p)
318 	struct proc *p;
319 {
320 
321 	gdt_put_slot(IDXSEL(p->p_md.md_tss_sel));
322 }
323 
324 void
325 ldt_alloc(pmap, ldt, len)
326 	struct pmap *pmap;
327 	union descriptor *ldt;
328 	size_t len;
329 {
330 	int slot;
331 
332 	slot = gdt_get_slot();
333 	setsegment(&gdt[slot].sd, ldt, len - 1, SDT_SYSLDT, SEL_KPL, 0, 0);
334 	simple_lock(&pmap->pm_lock);
335 	pmap->pm_ldt_sel = GSEL(slot, SEL_KPL);
336 	simple_unlock(&pmap->pm_lock);
337 }
338 
339 void
340 ldt_free(pmap)
341 	struct pmap *pmap;
342 {
343 	int slot;
344 
345 	simple_lock(&pmap->pm_lock);
346 	slot = IDXSEL(pmap->pm_ldt_sel);
347 	simple_unlock(&pmap->pm_lock);
348 
349 	gdt_put_slot(slot);
350 }
351