xref: /386bsd/usr/src/kernel/kern/i386/segments.h (revision a2142627)
1 /*
2  * Copyright (c) 1994 William F. Jolitz.
3  * 386BSD Copyright Restrictions Apply. All Other Rights Reserved.
4  *
5  * $Id: segments.h,v 1.1 94/10/19 17:40:05 bill Exp $
6  *
7  * 386 Segmentation Data Structures and definitions.
8  */
9 
10 /*
11  * Selectors
12  */
13 typedef	u_short sel_t;
14 #include "sel.h"
15 
16 #define	ISLDT(s)	((s) & SEL_LDT)		/* is it local or global */
17 #define	SEL_LDT	4				/* local descriptor table */
18 #define	IDXSEL(s)	(((s)>>3) & 0x1fff)	/* index of selector */
19 #define	LSEL(s, r)	(((s)<<3) | SEL_LDT | r) /* a local selector */
20 #define	GSEL(s, r)	(((s)<<3) | r)		/* a global selector */
21 
22 /*
23  * Memory and System segment descriptors
24  */
25 struct	segment_descriptor	{
26 	unsigned __PACK(sd_lolimit:16);	/* segment extent (lsb) */
27 	unsigned __PACK(sd_lobase:24);	/* segment base address (lsb) */
28 	unsigned __PACK(sd_type:5);	/* segment type */
29 	unsigned __PACK(sd_dpl:2);	/* segment descriptor priority level */
30 	unsigned __PACK(sd_p:1);	/* segment descriptor present */
31 	unsigned __PACK(sd_hilimit:4);	/* segment extent (msb) */
32 	unsigned __PACK(sd_xx:2);	/* unused */
33 	unsigned __PACK(sd_def32:1);	/* default 32 vs 16 bit size */
34 	unsigned __PACK(sd_gran:1);	/* limit granularity (byte/page units)*/
35 	unsigned __PACK(sd_hibase:8);	/* segment base address  (msb) */
36 } ;
37 
38 /*
39  * Gate descriptors (e.g. indirect descriptors)
40  */
41 struct	gate_descriptor	{
42 	unsigned __PACK(gd_looffset:16); /* gate offset (lsb) */
43 	unsigned __PACK(gd_selector:16); /* gate segment selector */
44 	unsigned __PACK(gd_stkcpy:5);	/* number of stack wds to cpy */
45 	unsigned __PACK(gd_xx:3);	/* unused */
46 	unsigned __PACK(gd_type:5);	/* segment type */
47 	unsigned __PACK(gd_dpl:2);	/* segment descriptor priority level */
48 	unsigned __PACK(gd_p:1);	/* segment descriptor present */
49 	unsigned __PACK(gd_hioffset:16); /* gate offset (msb) */
50 } ;
51 
52 /*
53  * Generic descriptor
54  */
55 union	descriptor	{
56 	struct	segment_descriptor sd;
57 	struct	gate_descriptor gd;
58 };
59 
60 	/* system segments and gate types */
61 #define	SDT_SYSNULL	 0	/* system null */
62 #define	SDT_SYS286TSS	 1	/* system 286 TSS available */
63 #define	SDT_SYSLDT	 2	/* system local descriptor table */
64 #define	SDT_SYS286BSY	 3	/* system 286 TSS busy */
65 #define	SDT_SYS286CGT	 4	/* system 286 call gate */
66 #define	SDT_SYSTASKGT	 5	/* system task gate */
67 #define	SDT_SYS286IGT	 6	/* system 286 interrupt gate */
68 #define	SDT_SYS286TGT	 7	/* system 286 trap gate */
69 #define	SDT_SYSNULL2	 8	/* system null again */
70 #define	SDT_SYS386TSS	 9	/* system 386 TSS available */
71 #define	SDT_SYSNULL3	10	/* system null again */
72 #define	SDT_SYS386BSY	11	/* system 386 TSS busy */
73 #define	SDT_SYS386CGT	12	/* system 386 call gate */
74 #define	SDT_SYSNULL4	13	/* system null again */
75 #define	SDT_SYS386IGT	14	/* system 386 interrupt gate */
76 #define	SDT_SYS386TGT	15	/* system 386 trap gate */
77 
78 	/* memory segment types */
79 #define	SDT_MEMRO	16	/* memory read only */
80 #define	SDT_MEMROA	17	/* memory read only accessed */
81 #define	SDT_MEMRW	18	/* memory read write */
82 #define	SDT_MEMRWA	19	/* memory read write accessed */
83 #define	SDT_MEMROD	20	/* memory read only expand dwn limit */
84 #define	SDT_MEMRODA	21	/* memory read only expand dwn limit accessed */
85 #define	SDT_MEMRWD	22	/* memory read write expand dwn limit */
86 #define	SDT_MEMRWDA	23	/* memory read write expand dwn limit acessed */
87 #define	SDT_MEME	24	/* memory execute only */
88 #define	SDT_MEMEA	25	/* memory execute only accessed */
89 #define	SDT_MEMER	26	/* memory execute read */
90 #define	SDT_MEMERA	27	/* memory execute read accessed */
91 #define	SDT_MEMEC	28	/* memory execute only conforming */
92 #define	SDT_MEMEAC	29	/* memory execute only accessed conforming */
93 #define	SDT_MEMERC	30	/* memory execute read conforming */
94 #define	SDT_MEMERAC	31	/* memory execute read accessed conforming */
95 
96 /* is memory segment descriptor pointer ? */
97 #define ISMEMSDP(s)	((s->d_type) >= SDT_MEMRO && (s->d_type) <= SDT_MEMERAC)
98 
99 /* is 286 gate descriptor pointer ? */
100 #define IS286GDP(s)	(((s->d_type) >= SDT_SYS286CGT \
101 				 && (s->d_type) < SDT_SYS286TGT))
102 
103 /* is 386 gate descriptor pointer ? */
104 #define IS386GDP(s)	(((s->d_type) >= SDT_SYS386CGT \
105 				&& (s->d_type) < SDT_SYS386TGT))
106 
107 /* is gate descriptor pointer ? */
108 #define ISGDP(s)	(IS286GDP(s) || IS386GDP(s))
109 
110 /* is segment descriptor pointer ? */
111 #define ISSDP(s)	(ISMEMSDP(s) || !ISGDP(s))
112 
113 /* is system segment descriptor pointer ? */
114 #define ISSYSSDP(s)	(!ISMEMSDP(s) && !ISGDP(s))
115 
116 /*
117  * Software definitions are in this convenient format,
118  * which are translated into inconvenient segment descriptors
119  * when needed to be used by the 386 hardware
120  */
121 
122 struct	soft_segment_descriptor	{
123 	unsigned ssd_base ;		/* segment base address  */
124 	unsigned ssd_limit ;		/* segment extent */
125 	unsigned ssd_type:5 ;		/* segment type */
126 	unsigned ssd_dpl:2 ;		/* segment descriptor priority level */
127 	unsigned ssd_p:1 ;		/* segment descriptor present */
128 	unsigned ssd_xx:4 ;		/* unused */
129 	unsigned ssd_xx1:2 ;		/* unused */
130 	unsigned ssd_def32:1 ;		/* default 32 vs 16 bit size */
131 	unsigned ssd_gran:1 ;		/* limit granularity (byte/page units)*/
132 };
133 
134 void ssdtosd(struct soft_segment_descriptor *, union descriptor *);
135 
136 /*
137  * region descriptors, used to load gdt/idt tables before segments yet exist.
138  */
139 struct region_descriptor {
140 	unsigned __PACK(rd_limit:16);	/* segment extent */
141 	unsigned __PACK(rd_base:32);		/* base address  */
142 };
143 
144 /* load global descriptor table */
145 #define	lgdt(s) ({ \
146 	struct region_descriptor rd = s; \
147 	asm volatile (" \
148 	lgdt %0 ; \
149 	/* flush prefetch queue */ \
150 	jmp 1f ; nop ; 1:  \
151 	/* reload stale selectors */ \
152 	movw %w1, %%ds ; movw %w1, %%es ; movw %w1, %%ss ; \
153 	/* reload code selector by using an intersegmental return */ \
154 	pushl %2 ; pushl $1f ; lret ; 1: " \
155 	 : : "m"(rd), "r"(KDSEL), "r"(KCSEL)); \
156 })
157 
158 /* load interrupt descriptor table */
159 #define	lidt(s) ({ \
160 	struct region_descriptor rd = s; \
161 	asm volatile ("lidt %0 " : : "m"(rd)); \
162 })
163 
164 /* load local descriptor table */
165 #define	lldt(s) ({ \
166 	sel_t sel = s; \
167 	asm volatile ("lldt %w0 " : : "r"(sel)); \
168 })
169 
170 /* load current task state */
171 #define	ltr(s) ({ \
172 	sel_t sel = s; \
173 	asm volatile ("ltr %w0 " : : "r"(sel)); \
174 })
175 
176 /*
177  * Segment Protection Exception code bits
178  */
179 
180 #define	SEGEX_EXT	0x01	/* recursive or externally induced */
181 #define	SEGEX_IDT	0x02	/* interrupt descriptor table */
182 #define	SEGEX_TI	0x04	/* local descriptor table */
183 				/* other bits are affected descriptor index */
184 #define SEGEX_IDX(s)	((s)>>3)&0x1fff)
185 
186 /*
187  * Size of IDT table
188  */
189 
190 #define	NIDT	256
191 #define	NRSVIDT	32		/* reserved entries for cpu exceptions */
192 
193 /* special selectors in the kernel */
194 extern sel_t _udatasel, _ucodesel, _exit_tss_sel;
195 
196 /* global descriptor table */
197 extern union descriptor *gdt, gdt_bootstrap[];
198 
199 /* global descriptor table allocation pointers/counters */
200 extern struct segment_descriptor *sdfirstp_gdesc, *sdlast_gdesc,
201     *sdlastfree_gdesc;
202 extern int sdngd_gdesc, sdnfree_gdesc;
203 
204 void expanddesctable(void);
205 
206 /*
207  * Allocate a global descriptor to the kernel. If no free descriptors,
208  * expand the table.
209  */
210 extern inline struct segment_descriptor *
allocdesc(void)211 allocdesc(void)
212 {
213 	struct segment_descriptor *sdp;
214 	int fullsearch = 1;
215 
216 tryagain:
217 	/* out of global descriptors? then, make more */
218 	if (sdnfree_gdesc == 0)
219 		expanddesctable();
220 
221 	/* find a free descriptor, starting with last freed descriptor */
222 	for (sdp = sdlastfree_gdesc; sdp <= sdlast_gdesc && sdp->sd_p ; sdp++)
223 		;
224 
225 	/* if did not find a descriptor, start at beginning of table again */
226 	if (sdp > sdlast_gdesc && fullsearch) {
227 		sdlastfree_gdesc = sdfirstp_gdesc;
228 		fullsearch = 0;
229 		goto tryagain;
230 	}
231 
232 #ifdef DIAGNOSTIC
233 	if(sdp->sd_p)
234 		panic("allocdesc: no free descriptor");
235 #endif
236 
237 	/* next place to try? */
238 	if (sdp < sdlast_gdesc)
239 		sdlastfree_gdesc = sdp + 1;
240 	sdnfree_gdesc--;
241 
242 	/* fill in the blanks */
243 	/* memset(sdp, 0, sizeof(*sdp));		/* XXX overkill */
244 	*(int *) sdp = 0; /* clear lower word */
245 	*(((int *) sdp) + 1) = 0; /* clear upper word */
246 	sdp->sd_p = 1;
247 	return (sdp);
248 }
249 
250 /*
251  * Return a Global descriptor to free status, so it may be reused.
252  */
253 extern inline void
freedesc(struct segment_descriptor * sdp)254 freedesc(struct segment_descriptor *sdp)
255 {
256 	sdp->sd_p = 0;		/* will generate an invalid tss if used */
257 	sdnfree_gdesc++;
258 	if (sdlastfree_gdesc > sdp)
259 		sdlastfree_gdesc = sdp;
260 	/* XXX reduce table size if nfreedesc grows to larger than half of
261 	   total table side, and if we can compact the table. when we have
262 	   a surplus of descriptors, restrict allocation to first half, and
263 	   keep seperate counts on still allocated upper/lower halfs. We can
264 	   shrink the table by half when the outstanding allocated descriptors
265 	   in the top half drops to zero -- too hard for now. */
266 }
267 
268 
269 #ifdef _PROC_H_
270 /*
271  * Allocate a TSS descriptor to a kernel thread, in the course of
272  * creating a new thread. Special version of allocdesc().
273  */
274 extern inline
alloctss(struct proc * p)275 alloctss(struct proc *p) {
276 	struct segment_descriptor *sdp = allocdesc();
277 	sdp->sd_lolimit = sizeof(struct i386tss) - 1;
278 	sdp->sd_lobase = (int)p->p_addr;
279 	sdp->sd_hibase = ((int)p->p_addr) >> 24;
280 	sdp->sd_type = SDT_SYS386TSS;
281 
282 	/* construct selector for new tss */
283 	p->p_md.md_tsel = GSEL((sdp - &gdt[0].sd), SEL_KPL);
284 }
285 #endif
286 
287 /*
288  * Return to the free pool the TSS descriptor of a thread being
289  * deallocated. Special case of freedesc().
290  */
291 extern inline void
freetss(sel_t tss_sel)292 freetss(sel_t tss_sel) {
293 
294 	/*
295 	 * if running on this thread, change to a interim
296          * tss until we swtch()
297 	 */
298 	/* if (tss_sel == str()) */
299 		ltr(_exit_tss_sel); /* busy until final ljmp */
300 
301 	freedesc(&gdt[IDXSEL(tss_sel)].sd);
302 }
303