1 /*
2  * librm: a library for interfacing to real-mode code
3  *
4  * Michael Brown <mbrown@fensystems.co.uk>
5  *
6  */
7 
8 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
9 
10 #include <stdint.h>
11 #include <strings.h>
12 #include <assert.h>
13 #include <ipxe/profile.h>
14 #include <realmode.h>
15 #include <pic8259.h>
16 
17 /*
18  * This file provides functions for managing librm.
19  *
20  */
21 
22 /** The interrupt wrapper */
23 extern char interrupt_wrapper[];
24 
25 /** The interrupt vectors */
26 static struct interrupt_vector intr_vec[NUM_INT];
27 
28 /** The 32-bit interrupt descriptor table */
29 static struct interrupt32_descriptor
30 idt32[NUM_INT] __attribute__ (( aligned ( 16 ) ));
31 
32 /** The 32-bit interrupt descriptor table register */
33 struct idtr32 idtr32 = {
34 	.limit = ( sizeof ( idt32 ) - 1 ),
35 };
36 
37 /** The 64-bit interrupt descriptor table */
38 static struct interrupt64_descriptor
39 idt64[NUM_INT] __attribute__ (( aligned ( 16 ) ));
40 
41 /** The interrupt descriptor table register */
42 struct idtr64 idtr64 = {
43 	.limit = ( sizeof ( idt64 ) - 1 ),
44 };
45 
46 /** Timer interrupt profiler */
47 static struct profiler timer_irq_profiler __profiler = { .name = "irq.timer" };
48 
49 /** Other interrupt profiler */
50 static struct profiler other_irq_profiler __profiler = { .name = "irq.other" };
51 
52 /**
53  * Allocate space on the real-mode stack and copy data there from a
54  * user buffer
55  *
56  * @v data		User buffer
57  * @v size		Size of stack data
58  * @ret sp		New value of real-mode stack pointer
59  */
copy_user_to_rm_stack(userptr_t data,size_t size)60 uint16_t copy_user_to_rm_stack ( userptr_t data, size_t size ) {
61 	userptr_t rm_stack;
62 	rm_sp -= size;
63 	rm_stack = real_to_user ( rm_ss, rm_sp );
64 	memcpy_user ( rm_stack, 0, data, 0, size );
65 	return rm_sp;
66 };
67 
68 /**
69  * Deallocate space on the real-mode stack, optionally copying back
70  * data to a user buffer.
71  *
72  * @v data		User buffer
73  * @v size		Size of stack data
74  */
remove_user_from_rm_stack(userptr_t data,size_t size)75 void remove_user_from_rm_stack ( userptr_t data, size_t size ) {
76 	if ( data ) {
77 		userptr_t rm_stack = real_to_user ( rm_ss, rm_sp );
78 		memcpy_user ( rm_stack, 0, data, 0, size );
79 	}
80 	rm_sp += size;
81 };
82 
83 /**
84  * Set interrupt vector
85  *
86  * @v intr		Interrupt number
87  * @v vector		Interrupt vector, or NULL to disable
88  */
set_interrupt_vector(unsigned int intr,void * vector)89 void set_interrupt_vector ( unsigned int intr, void *vector ) {
90 	struct interrupt32_descriptor *idte32;
91 	struct interrupt64_descriptor *idte64;
92 	intptr_t addr = ( ( intptr_t ) vector );
93 
94 	/* Populate 32-bit interrupt descriptor */
95 	idte32 = &idt32[intr];
96 	idte32->segment = VIRTUAL_CS;
97 	idte32->attr = ( vector ? ( IDTE_PRESENT | IDTE_TYPE_IRQ32 ) : 0 );
98 	idte32->low = ( addr >> 0 );
99 	idte32->high = ( addr >> 16 );
100 
101 	/* Populate 64-bit interrupt descriptor, if applicable */
102 	if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
103 		idte64 = &idt64[intr];
104 		idte64->segment = LONG_CS;
105 		idte64->attr = ( vector ?
106 				 ( IDTE_PRESENT | IDTE_TYPE_IRQ64 ) : 0 );
107 		idte64->low = ( addr >> 0 );
108 		idte64->mid = ( addr >> 16 );
109 		idte64->high = ( ( ( uint64_t ) addr ) >> 32 );
110 	}
111 }
112 
113 /**
114  * Initialise interrupt descriptor table
115  *
116  */
init_idt(void)117 void init_idt ( void ) {
118 	struct interrupt_vector *vec;
119 	unsigned int intr;
120 
121 	/* Initialise the interrupt descriptor table and interrupt vectors */
122 	for ( intr = 0 ; intr < NUM_INT ; intr++ ) {
123 		vec = &intr_vec[intr];
124 		vec->push = PUSH_INSN;
125 		vec->movb = MOVB_INSN;
126 		vec->intr = intr;
127 		vec->jmp = JMP_INSN;
128 		vec->offset = ( ( intptr_t ) interrupt_wrapper -
129 				( intptr_t ) vec->next );
130 		set_interrupt_vector ( intr, vec );
131 	}
132 	DBGC ( &intr_vec[0], "INTn vector at %p+%zxn (phys %#lx+%zxn)\n",
133 	       intr_vec, sizeof ( intr_vec[0] ),
134 	       virt_to_phys ( intr_vec ), sizeof ( intr_vec[0] ) );
135 
136 	/* Initialise the 32-bit interrupt descriptor table register */
137 	idtr32.base = virt_to_phys ( idt32 );
138 
139 	/* Initialise the 64-bit interrupt descriptor table register,
140 	 * if applicable.
141 	 */
142 	if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) )
143 		idtr64.base = virt_to_phys ( idt64 );
144 }
145 
146 /**
147  * Determine interrupt profiler (for debugging)
148  *
149  * @v intr		Interrupt number
150  * @ret profiler	Profiler
151  */
interrupt_profiler(int intr)152 static struct profiler * interrupt_profiler ( int intr ) {
153 
154 	switch ( intr ) {
155 	case IRQ_INT ( 0 ) :
156 		return &timer_irq_profiler;
157 	default:
158 		return &other_irq_profiler;
159 	}
160 }
161 
162 /**
163  * Interrupt handler
164  *
165  * @v intr		Interrupt number
166  */
interrupt(int intr)167 void __attribute__ (( regparm ( 1 ) )) interrupt ( int intr ) {
168 	struct profiler *profiler = interrupt_profiler ( intr );
169 	uint32_t discard_eax;
170 
171 	/* Reissue interrupt in real mode */
172 	profile_start ( profiler );
173 	__asm__ __volatile__ ( REAL_CODE ( "movb %%al, %%cs:(1f + 1)\n\t"
174 					   "\n1:\n\t"
175 					   "int $0x00\n\t" )
176 			       : "=a" ( discard_eax ) : "0" ( intr ) );
177 	profile_stop ( profiler );
178 	profile_exclude ( profiler );
179 }
180 
181 /**
182  * Map pages for I/O
183  *
184  * @v bus_addr		Bus address
185  * @v len		Length of region
186  * @ret io_addr		I/O address
187  */
ioremap_pages(unsigned long bus_addr,size_t len)188 static void * ioremap_pages ( unsigned long bus_addr, size_t len ) {
189 	unsigned long start;
190 	unsigned int count;
191 	unsigned int stride;
192 	unsigned int first;
193 	unsigned int i;
194 	size_t offset;
195 	void *io_addr;
196 
197 	DBGC ( &io_pages, "IO mapping %08lx+%zx\n", bus_addr, len );
198 
199 	/* Sanity check */
200 	if ( ! len )
201 		return NULL;
202 
203 	/* Round down start address to a page boundary */
204 	start = ( bus_addr & ~( IO_PAGE_SIZE - 1 ) );
205 	offset = ( bus_addr - start );
206 	assert ( offset < IO_PAGE_SIZE );
207 
208 	/* Calculate number of pages required */
209 	count = ( ( offset + len + IO_PAGE_SIZE - 1 ) / IO_PAGE_SIZE );
210 	assert ( count != 0 );
211 	assert ( count < ( sizeof ( io_pages.page ) /
212 			   sizeof ( io_pages.page[0] ) ) );
213 
214 	/* Round up number of pages to a power of two */
215 	stride = ( 1 << ( fls ( count ) - 1 ) );
216 	assert ( count <= stride );
217 
218 	/* Allocate pages */
219 	for ( first = 0 ; first < ( sizeof ( io_pages.page ) /
220 				    sizeof ( io_pages.page[0] ) ) ;
221 	      first += stride ) {
222 
223 		/* Calculate I/O address */
224 		io_addr = ( IO_BASE + ( first * IO_PAGE_SIZE ) + offset );
225 
226 		/* Check that page table entries are available */
227 		for ( i = first ; i < ( first + count ) ; i++ ) {
228 			if ( io_pages.page[i] & PAGE_P ) {
229 				io_addr = NULL;
230 				break;
231 			}
232 		}
233 		if ( ! io_addr )
234 			continue;
235 
236 		/* Create page table entries */
237 		for ( i = first ; i < ( first + count ) ; i++ ) {
238 			io_pages.page[i] = ( start | PAGE_P | PAGE_RW |
239 					     PAGE_US | PAGE_PWT | PAGE_PCD |
240 					     PAGE_PS );
241 			start += IO_PAGE_SIZE;
242 		}
243 
244 		/* Mark last page as being the last in this allocation */
245 		io_pages.page[ i - 1 ] |= PAGE_LAST;
246 
247 		/* Return I/O address */
248 		DBGC ( &io_pages, "IO mapped %08lx+%zx to %p using PTEs "
249 		       "[%d-%d]\n", bus_addr, len, io_addr, first,
250 		       ( first + count - 1 ) );
251 		return io_addr;
252 	}
253 
254 	DBGC ( &io_pages, "IO could not map %08lx+%zx\n", bus_addr, len );
255 	return NULL;
256 }
257 
258 /**
259  * Unmap pages for I/O
260  *
261  * @v io_addr		I/O address
262  */
iounmap_pages(volatile const void * io_addr)263 static void iounmap_pages ( volatile const void *io_addr ) {
264 	volatile const void *invalidate = io_addr;
265 	unsigned int first;
266 	unsigned int i;
267 	int is_last;
268 
269 	DBGC ( &io_pages, "IO unmapping %p\n", io_addr );
270 
271 	/* Calculate first page table entry */
272 	first = ( ( io_addr - IO_BASE ) / IO_PAGE_SIZE );
273 
274 	/* Clear page table entries */
275 	for ( i = first ; ; i++ ) {
276 
277 		/* Sanity check */
278 		assert ( io_pages.page[i] & PAGE_P );
279 
280 		/* Check if this is the last page in this allocation */
281 		is_last = ( io_pages.page[i] & PAGE_LAST );
282 
283 		/* Clear page table entry */
284 		io_pages.page[i] = 0;
285 
286 		/* Invalidate TLB for this page */
287 		__asm__ __volatile__ ( "invlpg (%0)" : : "r" ( invalidate ) );
288 		invalidate += IO_PAGE_SIZE;
289 
290 		/* Terminate if this was the last page */
291 		if ( is_last )
292 			break;
293 	}
294 
295 	DBGC ( &io_pages, "IO unmapped %p using PTEs [%d-%d]\n",
296 	       io_addr, first, i );
297 }
298 
299 PROVIDE_UACCESS_INLINE ( librm, phys_to_user );
300 PROVIDE_UACCESS_INLINE ( librm, user_to_phys );
301 PROVIDE_UACCESS_INLINE ( librm, virt_to_user );
302 PROVIDE_UACCESS_INLINE ( librm, user_to_virt );
303 PROVIDE_UACCESS_INLINE ( librm, userptr_add );
304 PROVIDE_UACCESS_INLINE ( librm, memcpy_user );
305 PROVIDE_UACCESS_INLINE ( librm, memmove_user );
306 PROVIDE_UACCESS_INLINE ( librm, memset_user );
307 PROVIDE_UACCESS_INLINE ( librm, strlen_user );
308 PROVIDE_UACCESS_INLINE ( librm, memchr_user );
309 PROVIDE_IOMAP_INLINE ( pages, io_to_bus );
310 PROVIDE_IOMAP ( pages, ioremap, ioremap_pages );
311 PROVIDE_IOMAP ( pages, iounmap, iounmap_pages );
312