1 /*
2 Linux Real Mode Interface - A library of DPMI-like functions for Linux.
3
4 Copyright (C) 1998 by Josh Vanderhoof
5
6 You are free to distribute and modify this file, as long as you
7 do not remove this copyright notice and clearly label modified
8 versions as being modified.
9
10 This software has NO WARRANTY. Use it at your own risk.
11 */
12
13 #include <stdio.h>
14 #include <string.h>
15 #include <asm/vm86.h>
16
17 #ifdef USE_LIBC_VM86
18 #include <sys/vm86.h>
19 #endif
20
21 #include <sys/types.h>
22 #include <sys/stat.h>
23 #include <sys/mman.h>
24 #include <unistd.h>
25 #include <fcntl.h>
26
27 #include "lrmi.h"
28
29 #define REAL_MEM_BASE ((void *)0x10000)
30 #define REAL_MEM_SIZE 0x10000
31 #define REAL_MEM_BLOCKS 0x100
32
33 int __svgalib_lrmi_cpu_type=CPU_386;
34
35 struct mem_block
36 {
37 unsigned int size : 20;
38 unsigned int free : 1;
39 };
40
41 static struct
42 {
43 int ready;
44 int count;
45 struct mem_block blocks[REAL_MEM_BLOCKS];
46 } mem_info = { 0 };
47
48 static int
real_mem_init(void)49 real_mem_init(void)
50 {
51 void *m;
52 int fd_zero;
53
54 if (mem_info.ready)
55 return 1;
56
57 fd_zero = open("/dev/zero", O_RDONLY);
58 if (fd_zero == -1)
59 {
60 perror("open /dev/zero");
61 return 0;
62 }
63
64 m = mmap((void *)REAL_MEM_BASE, REAL_MEM_SIZE,
65 PROT_READ | PROT_WRITE | PROT_EXEC,
66 MAP_FIXED | MAP_PRIVATE, fd_zero, 0);
67
68 if (m == (void *)-1)
69 {
70 perror("mmap /dev/zero");
71 close(fd_zero);
72 return 0;
73 }
74
75 mem_info.ready = 1;
76 mem_info.count = 1;
77 mem_info.blocks[0].size = REAL_MEM_SIZE;
78 mem_info.blocks[0].free = 1;
79
80 return 1;
81 }
82
83
84 static void
insert_block(int i)85 insert_block(int i)
86 {
87 memmove(
88 mem_info.blocks + i + 1,
89 mem_info.blocks + i,
90 (mem_info.count - i) * sizeof(struct mem_block));
91
92 mem_info.count++;
93 }
94
95 static void
delete_block(int i)96 delete_block(int i)
97 {
98 mem_info.count--;
99
100 memmove(
101 mem_info.blocks + i,
102 mem_info.blocks + i + 1,
103 (mem_info.count - i) * sizeof(struct mem_block));
104 }
105
106 void *
LRMI_alloc_real(int size)107 LRMI_alloc_real(int size)
108 {
109 int i;
110 char *r = (char *)REAL_MEM_BASE;
111
112 if (!mem_info.ready)
113 return NULL;
114
115 if (mem_info.count == REAL_MEM_BLOCKS)
116 return NULL;
117
118 size = (size + 15) & ~15;
119
120 for (i = 0; i < mem_info.count; i++)
121 {
122 if (mem_info.blocks[i].free && size < mem_info.blocks[i].size)
123 {
124 insert_block(i);
125
126 mem_info.blocks[i].size = size;
127 mem_info.blocks[i].free = 0;
128 mem_info.blocks[i + 1].size -= size;
129
130 return (void *)r;
131 }
132
133 r += mem_info.blocks[i].size;
134 }
135
136 return NULL;
137 }
138
139
140 void
LRMI_free_real(void * m)141 LRMI_free_real(void *m)
142 {
143 int i;
144 char *r = (char *)REAL_MEM_BASE;
145
146 if (!mem_info.ready)
147 return;
148
149 i = 0;
150 while (m != (void *)r)
151 {
152 r += mem_info.blocks[i].size;
153 i++;
154 if (i == mem_info.count)
155 return;
156 }
157
158 mem_info.blocks[i].free = 1;
159
160 if (i + 1 < mem_info.count && mem_info.blocks[i + 1].free)
161 {
162 mem_info.blocks[i].size += mem_info.blocks[i + 1].size;
163 delete_block(i + 1);
164 }
165
166 if (i - 1 >= 0 && mem_info.blocks[i - 1].free)
167 {
168 mem_info.blocks[i - 1].size += mem_info.blocks[i].size;
169 delete_block(i);
170 }
171 }
172
173
174 #define DEFAULT_VM86_FLAGS (IF_MASK | IOPL_MASK)
175 #define DEFAULT_STACK_SIZE 0x1000
176 #define RETURN_TO_32_INT 255
177
178 static struct
179 {
180 int ready;
181 unsigned short ret_seg, ret_off;
182 unsigned short stack_seg, stack_off;
183 struct vm86_struct vm;
184 } context = { 0 };
185
186
187 static inline void
set_bit(unsigned int bit,void * array)188 set_bit(unsigned int bit, void *array)
189 {
190 unsigned char *a = array;
191
192 a[bit / 8] |= (1 << (bit % 8));
193 }
194
195
196 static inline unsigned int
get_int_seg(int i)197 get_int_seg(int i)
198 {
199 return *(unsigned short *)(i * 4 + 2);
200 }
201
202
203 static inline unsigned int
get_int_off(int i)204 get_int_off(int i)
205 {
206 return *(unsigned short *)(i * 4);
207 }
208
209
210 static inline void
pushw(unsigned short i)211 pushw(unsigned short i)
212 {
213 struct vm86_regs *r = &context.vm.regs;
214 r->esp -= 2;
215 *(unsigned short *)(((unsigned int)r->ss << 4) + r->esp) = i;
216 }
217
218
219 int
LRMI_init(void)220 LRMI_init(void)
221 {
222 void *m;
223 int fd_mem;
224
225 if (context.ready)
226 return 1;
227
228 if (!real_mem_init())
229 return 0;
230
231 /*
232 Map the Interrupt Vectors (0x0 - 0x400) + BIOS data (0x400 - 0x502)
233 and the ROM (0xa0000 - 0x100000)
234 */
235 fd_mem = open("/dev/mem", O_RDWR);
236
237 if (fd_mem == -1)
238 {
239 perror("open /dev/mem");
240 return 0;
241 }
242
243 m = mmap((void *)0, 0x502,
244 PROT_READ | PROT_WRITE | PROT_EXEC,
245 MAP_FIXED | MAP_PRIVATE, fd_mem, 0);
246
247 if (m == (void *)-1)
248 {
249 perror("mmap /dev/mem");
250 return 0;
251 }
252
253 m = mmap((void *)0xa0000, 0x100000 - 0xa0000,
254 PROT_READ | PROT_WRITE,
255 MAP_FIXED | MAP_SHARED, fd_mem, 0xa0000);
256
257 if (m == (void *)-1)
258 {
259 perror("mmap /dev/mem");
260 return 0;
261 }
262
263
264 /*
265 Allocate a stack
266 */
267 m = LRMI_alloc_real(DEFAULT_STACK_SIZE);
268
269 context.stack_seg = (unsigned int)m >> 4;
270 context.stack_off = DEFAULT_STACK_SIZE;
271
272 /*
273 Allocate the return to 32 bit routine
274 */
275 m = LRMI_alloc_real(2);
276
277 context.ret_seg = (unsigned int)m >> 4;
278 context.ret_off = (unsigned int)m & 0xf;
279
280 ((unsigned char *)m)[0] = 0xcd; /* int opcode */
281 ((unsigned char *)m)[1] = RETURN_TO_32_INT;
282
283 memset(&context.vm, 0, sizeof(context.vm));
284
285 context.vm.cpu_type = __svgalib_lrmi_cpu_type;
286
287 /*
288 Enable kernel emulation of all ints except RETURN_TO_32_INT
289 */
290 memset(&context.vm.int_revectored, 0, sizeof(context.vm.int_revectored));
291 set_bit(RETURN_TO_32_INT, &context.vm.int_revectored);
292
293 context.ready = 1;
294
295 return 1;
296 }
297
298
299 static void
set_regs(struct LRMI_regs * r)300 set_regs(struct LRMI_regs *r)
301 {
302 context.vm.regs.edi = r->edi;
303 context.vm.regs.esi = r->esi;
304 context.vm.regs.ebp = r->ebp;
305 context.vm.regs.ebx = r->ebx;
306 context.vm.regs.edx = r->edx;
307 context.vm.regs.ecx = r->ecx;
308 context.vm.regs.eax = r->eax;
309 context.vm.regs.eflags = DEFAULT_VM86_FLAGS;
310 context.vm.regs.es = r->es;
311 context.vm.regs.ds = r->ds;
312 context.vm.regs.fs = r->fs;
313 context.vm.regs.gs = r->gs;
314 }
315
316
317 static void
get_regs(struct LRMI_regs * r)318 get_regs(struct LRMI_regs *r)
319 {
320 r->edi = context.vm.regs.edi;
321 r->esi = context.vm.regs.esi;
322 r->ebp = context.vm.regs.ebp;
323 r->ebx = context.vm.regs.ebx;
324 r->edx = context.vm.regs.edx;
325 r->ecx = context.vm.regs.ecx;
326 r->eax = context.vm.regs.eax;
327 r->flags = context.vm.regs.eflags;
328 r->es = context.vm.regs.es;
329 r->ds = context.vm.regs.ds;
330 r->fs = context.vm.regs.fs;
331 r->gs = context.vm.regs.gs;
332 }
333
334 #define DIRECTION_FLAG (1 << 10)
335
336 static void
em_ins(int size)337 em_ins(int size)
338 {
339 unsigned int edx, edi;
340
341 edx = context.vm.regs.edx & 0xffff;
342 edi = context.vm.regs.edi & 0xffff;
343 edi += (unsigned int)context.vm.regs.ds << 4;
344
345 if (context.vm.regs.eflags & DIRECTION_FLAG)
346 {
347 if (size == 4)
348 asm volatile ("std; insl; cld"
349 : "=D" (edi) : "d" (edx), "0" (edi));
350 else if (size == 2)
351 asm volatile ("std; insw; cld"
352 : "=D" (edi) : "d" (edx), "0" (edi));
353 else
354 asm volatile ("std; insb; cld"
355 : "=D" (edi) : "d" (edx), "0" (edi));
356 }
357 else
358 {
359 if (size == 4)
360 asm volatile ("cld; insl"
361 : "=D" (edi) : "d" (edx), "0" (edi));
362 else if (size == 2)
363 asm volatile ("cld; insw"
364 : "=D" (edi) : "d" (edx), "0" (edi));
365 else
366 asm volatile ("cld; insb"
367 : "=D" (edi) : "d" (edx), "0" (edi));
368 }
369
370 edi -= (unsigned int)context.vm.regs.ds << 4;
371
372 context.vm.regs.edi &= 0xffff0000;
373 context.vm.regs.edi |= edi & 0xffff;
374 }
375
376 static void
em_rep_ins(int size)377 em_rep_ins(int size)
378 {
379 unsigned int ecx, edx, edi;
380
381 ecx = context.vm.regs.ecx & 0xffff;
382 edx = context.vm.regs.edx & 0xffff;
383 edi = context.vm.regs.edi & 0xffff;
384 edi += (unsigned int)context.vm.regs.ds << 4;
385
386 if (context.vm.regs.eflags & DIRECTION_FLAG)
387 {
388 if (size == 4)
389 asm volatile ("std; rep; insl; cld"
390 : "=D" (edi), "=c" (ecx)
391 : "d" (edx), "0" (edi), "1" (ecx));
392 else if (size == 2)
393 asm volatile ("std; rep; insw; cld"
394 : "=D" (edi), "=c" (ecx)
395 : "d" (edx), "0" (edi), "1" (ecx));
396 else
397 asm volatile ("std; rep; insb; cld"
398 : "=D" (edi), "=c" (ecx)
399 : "d" (edx), "0" (edi), "1" (ecx));
400 }
401 else
402 {
403 if (size == 4)
404 asm volatile ("cld; rep; insl"
405 : "=D" (edi), "=c" (ecx)
406 : "d" (edx), "0" (edi), "1" (ecx));
407 else if (size == 2)
408 asm volatile ("cld; rep; insw"
409 : "=D" (edi), "=c" (ecx)
410 : "d" (edx), "0" (edi), "1" (ecx));
411 else
412 asm volatile ("cld; rep; insb"
413 : "=D" (edi), "=c" (ecx)
414 : "d" (edx), "0" (edi), "1" (ecx));
415 }
416
417 edi -= (unsigned int)context.vm.regs.ds << 4;
418
419 context.vm.regs.edi &= 0xffff0000;
420 context.vm.regs.edi |= edi & 0xffff;
421
422 context.vm.regs.ecx &= 0xffff0000;
423 context.vm.regs.ecx |= ecx & 0xffff;
424 }
425
426 static void
em_outs(int size)427 em_outs(int size)
428 {
429 unsigned int edx, esi;
430
431 edx = context.vm.regs.edx & 0xffff;
432 esi = context.vm.regs.esi & 0xffff;
433 esi += (unsigned int)context.vm.regs.ds << 4;
434
435 if (context.vm.regs.eflags & DIRECTION_FLAG)
436 {
437 if (size == 4)
438 asm volatile ("std; outsl; cld"
439 : "=S" (esi) : "d" (edx), "0" (esi));
440 else if (size == 2)
441 asm volatile ("std; outsw; cld"
442 : "=S" (esi) : "d" (edx), "0" (esi));
443 else
444 asm volatile ("std; outsb; cld"
445 : "=S" (esi) : "d" (edx), "0" (esi));
446 }
447 else
448 {
449 if (size == 4)
450 asm volatile ("cld; outsl"
451 : "=S" (esi) : "d" (edx), "0" (esi));
452 else if (size == 2)
453 asm volatile ("cld; outsw"
454 : "=S" (esi) : "d" (edx), "0" (esi));
455 else
456 asm volatile ("cld; outsb"
457 : "=S" (esi) : "d" (edx), "0" (esi));
458 }
459
460 esi -= (unsigned int)context.vm.regs.ds << 4;
461
462 context.vm.regs.esi &= 0xffff0000;
463 context.vm.regs.esi |= esi & 0xffff;
464 }
465
466 static void
em_rep_outs(int size)467 em_rep_outs(int size)
468 {
469 unsigned int ecx, edx, esi;
470
471 ecx = context.vm.regs.ecx & 0xffff;
472 edx = context.vm.regs.edx & 0xffff;
473 esi = context.vm.regs.esi & 0xffff;
474 esi += (unsigned int)context.vm.regs.ds << 4;
475
476 if (context.vm.regs.eflags & DIRECTION_FLAG)
477 {
478 if (size == 4)
479 asm volatile ("std; rep; outsl; cld"
480 : "=S" (esi), "=c" (ecx)
481 : "d" (edx), "0" (esi), "1" (ecx));
482 else if (size == 2)
483 asm volatile ("std; rep; outsw; cld"
484 : "=S" (esi), "=c" (ecx)
485 : "d" (edx), "0" (esi), "1" (ecx));
486 else
487 asm volatile ("std; rep; outsb; cld"
488 : "=S" (esi), "=c" (ecx)
489 : "d" (edx), "0" (esi), "1" (ecx));
490 }
491 else
492 {
493 if (size == 4)
494 asm volatile ("cld; rep; outsl"
495 : "=S" (esi), "=c" (ecx)
496 : "d" (edx), "0" (esi), "1" (ecx));
497 else if (size == 2)
498 asm volatile ("cld; rep; outsw"
499 : "=S" (esi), "=c" (ecx)
500 : "d" (edx), "0" (esi), "1" (ecx));
501 else
502 asm volatile ("cld; rep; outsb"
503 : "=S" (esi), "=c" (ecx)
504 : "d" (edx), "0" (esi), "1" (ecx));
505 }
506
507 esi -= (unsigned int)context.vm.regs.ds << 4;
508
509 context.vm.regs.esi &= 0xffff0000;
510 context.vm.regs.esi |= esi & 0xffff;
511
512 context.vm.regs.ecx &= 0xffff0000;
513 context.vm.regs.ecx |= ecx & 0xffff;
514 }
515
516 static void
em_inb(void)517 em_inb(void)
518 {
519 asm volatile ("inb (%w1), %b0"
520 : "=a" (context.vm.regs.eax)
521 : "d" (context.vm.regs.edx), "0" (context.vm.regs.eax));
522 }
523
524 static void
em_inw(void)525 em_inw(void)
526 {
527 asm volatile ("inw (%w1), %w0"
528 : "=a" (context.vm.regs.eax)
529 : "d" (context.vm.regs.edx), "0" (context.vm.regs.eax));
530 }
531
532 static void
em_inl(void)533 em_inl(void)
534 {
535 asm volatile ("inl (%w1), %0"
536 : "=a" (context.vm.regs.eax)
537 : "d" (context.vm.regs.edx));
538 }
539
540 static void
em_outb(void)541 em_outb(void)
542 {
543 asm volatile ("outb %b0, (%w1)"
544 : : "a" (context.vm.regs.eax),
545 "d" (context.vm.regs.edx));
546 }
547
548 static void
em_outw(void)549 em_outw(void)
550 {
551 asm volatile ("outw %w0, (%w1)"
552 : : "a" (context.vm.regs.eax),
553 "d" (context.vm.regs.edx));
554 }
555
556 static void
em_outl(void)557 em_outl(void)
558 {
559 asm volatile ("outl %0, (%w1)"
560 : : "a" (context.vm.regs.eax),
561 "d" (context.vm.regs.edx));
562 }
563
564 static int
emulate(void)565 emulate(void)
566 {
567 unsigned char *insn;
568 struct
569 {
570 unsigned int size : 1;
571 unsigned int rep : 1;
572 } prefix = { 0, 0 };
573 int i = 0;
574
575 insn = (unsigned char *)((unsigned int)context.vm.regs.cs << 4);
576 insn += context.vm.regs.eip;
577
578 while (1)
579 {
580 if (insn[i] == 0x66)
581 {
582 prefix.size = 1 - prefix.size;
583 i++;
584 }
585 else if (insn[i] == 0xf3)
586 {
587 prefix.rep = 1;
588 i++;
589 }
590 else if (insn[i] == 0xf0 || insn[i] == 0xf2
591 || insn[i] == 0x26 || insn[i] == 0x2e
592 || insn[i] == 0x36 || insn[i] == 0x3e
593 || insn[i] == 0x64 || insn[i] == 0x65
594 || insn[i] == 0x67)
595 {
596 /* these prefixes are just ignored */
597 i++;
598 }
599 else if (insn[i] == 0x6c)
600 {
601 if (prefix.rep)
602 em_rep_ins(1);
603 else
604 em_ins(1);
605 i++;
606 break;
607 }
608 else if (insn[i] == 0x6d)
609 {
610 if (prefix.rep)
611 {
612 if (prefix.size)
613 em_rep_ins(4);
614 else
615 em_rep_ins(2);
616 }
617 else
618 {
619 if (prefix.size)
620 em_ins(4);
621 else
622 em_ins(2);
623 }
624 i++;
625 break;
626 }
627 else if (insn[i] == 0x6e)
628 {
629 if (prefix.rep)
630 em_rep_outs(1);
631 else
632 em_outs(1);
633 i++;
634 break;
635 }
636 else if (insn[i] == 0x6f)
637 {
638 if (prefix.rep)
639 {
640 if (prefix.size)
641 em_rep_outs(4);
642 else
643 em_rep_outs(2);
644 }
645 else
646 {
647 if (prefix.size)
648 em_outs(4);
649 else
650 em_outs(2);
651 }
652 i++;
653 break;
654 }
655 else if (insn[i] == 0xec)
656 {
657 em_inb();
658 i++;
659 break;
660 }
661 else if (insn[i] == 0xed)
662 {
663 if (prefix.size)
664 em_inl();
665 else
666 em_inw();
667 i++;
668 break;
669 }
670 else if (insn[i] == 0xee)
671 {
672 em_outb();
673 i++;
674 break;
675 }
676 else if (insn[i] == 0xef)
677 {
678 if (prefix.size)
679 em_outl();
680 else
681 em_outw();
682
683 i++;
684 break;
685 }
686 else
687 return 0;
688 }
689
690 context.vm.regs.eip += i;
691 return 1;
692 }
693
694
695 /*
696 I don't know how to make sure I get the right vm86() from libc.
697 The one I want is syscall # 113 (vm86old() in libc 5, vm86() in glibc)
698 which should be declared as "int vm86(struct vm86_struct *);" in
699 <sys/vm86.h>.
700
701 This just does syscall 113 with inline asm, which should work
702 for both libc's (I hope).
703 */
704 #if !defined(USE_LIBC_VM86)
705 static int
lrmi_vm86(struct vm86_struct * vm)706 lrmi_vm86(struct vm86_struct *vm)
707 {
708 int r;
709 #ifdef __PIC__
710 asm volatile (
711 "pushl %%ebx\n\t"
712 "movl %2, %%ebx\n\t"
713 "int $0x80\n\t"
714 "popl %%ebx"
715 : "=a" (r)
716 : "0" (113), "r" (vm));
717 #else
718 asm volatile (
719 "int $0x80"
720 : "=a" (r)
721 : "0" (113), "b" (vm));
722 #endif
723 return r;
724 }
725 #else
726 #define lrmi_vm86 vm86
727 #endif
728
729
730 static void
debug_info(int vret)731 debug_info(int vret)
732 {
733 int i;
734 unsigned char *p;
735
736 fputs("vm86() failed\n", stderr);
737 fprintf(stderr, "return = 0x%x\n", vret);
738 fprintf(stderr, "eax = 0x%08lx\n", context.vm.regs.eax);
739 fprintf(stderr, "ebx = 0x%08lx\n", context.vm.regs.ebx);
740 fprintf(stderr, "ecx = 0x%08lx\n", context.vm.regs.ecx);
741 fprintf(stderr, "edx = 0x%08lx\n", context.vm.regs.edx);
742 fprintf(stderr, "esi = 0x%08lx\n", context.vm.regs.esi);
743 fprintf(stderr, "edi = 0x%08lx\n", context.vm.regs.edi);
744 fprintf(stderr, "ebp = 0x%08lx\n", context.vm.regs.ebp);
745 fprintf(stderr, "eip = 0x%08lx\n", context.vm.regs.eip);
746 fprintf(stderr, "cs = 0x%04x\n", context.vm.regs.cs);
747 fprintf(stderr, "esp = 0x%08lx\n", context.vm.regs.esp);
748 fprintf(stderr, "ss = 0x%04x\n", context.vm.regs.ss);
749 fprintf(stderr, "ds = 0x%04x\n", context.vm.regs.ds);
750 fprintf(stderr, "es = 0x%04x\n", context.vm.regs.es);
751 fprintf(stderr, "fs = 0x%04x\n", context.vm.regs.fs);
752 fprintf(stderr, "gs = 0x%04x\n", context.vm.regs.gs);
753 fprintf(stderr, "eflags = 0x%08lx\n", context.vm.regs.eflags);
754
755 fputs("cs:ip = [ ", stderr);
756
757 p = (unsigned char *)((context.vm.regs.cs << 4) + (context.vm.regs.eip & 0xffff));
758
759 for (i = 0; i < 16; ++i)
760 fprintf(stderr, "%02x ", (unsigned int)p[i]);
761
762 fputs("]\n", stderr);
763 }
764
765
766 static int
run_vm86(void)767 run_vm86(void)
768 {
769 unsigned int vret;
770
771 while (1)
772 {
773 vret = lrmi_vm86(&context.vm);
774
775 if (VM86_TYPE(vret) == VM86_INTx)
776 {
777 unsigned int v = VM86_ARG(vret);
778
779 if (v == RETURN_TO_32_INT)
780 return 1;
781
782 pushw(context.vm.regs.eflags);
783 pushw(context.vm.regs.cs);
784 pushw(context.vm.regs.eip);
785
786 context.vm.regs.cs = get_int_seg(v);
787 context.vm.regs.eip = get_int_off(v);
788 context.vm.regs.eflags &= ~(VIF_MASK | TF_MASK);
789
790 continue;
791 }
792
793 if (VM86_TYPE(vret) != VM86_UNKNOWN)
794 break;
795
796 if (!emulate())
797 break;
798 }
799
800 debug_info(vret);
801
802 return 0;
803 }
804
805
806 int
LRMI_call(struct LRMI_regs * r)807 LRMI_call(struct LRMI_regs *r)
808 {
809 unsigned int vret;
810
811 memset(&context.vm.regs, 0, sizeof(context.vm.regs));
812
813 set_regs(r);
814
815 context.vm.regs.cs = r->cs;
816 context.vm.regs.eip = r->ip;
817
818 if (r->ss == 0 && r->sp == 0)
819 {
820 context.vm.regs.ss = context.stack_seg;
821 context.vm.regs.esp = context.stack_off;
822 }
823 else
824 {
825 context.vm.regs.ss = r->ss;
826 context.vm.regs.esp = r->sp;
827 }
828
829 pushw(context.ret_seg);
830 pushw(context.ret_off);
831
832 vret = run_vm86();
833
834 get_regs(r);
835
836 return vret;
837 }
838
839
840 int
LRMI_int(int i,struct LRMI_regs * r)841 LRMI_int(int i, struct LRMI_regs *r)
842 {
843 unsigned int vret;
844 unsigned int seg, off;
845
846 seg = get_int_seg(i);
847 off = get_int_off(i);
848
849 /*
850 If the interrupt is in regular memory, it's probably
851 still pointing at a dos TSR (which is now gone).
852 */
853 if (seg < 0xa000 || (seg << 4) + off >= 0x100000)
854 {
855 fprintf(stderr, "Int 0x%x is not in rom (%04x:%04x)\n", i, seg, off);
856 return 0;
857 }
858
859 memset(&context.vm.regs, 0, sizeof(context.vm.regs));
860
861 set_regs(r);
862
863 context.vm.regs.cs = seg;
864 context.vm.regs.eip = off;
865
866 if (r->ss == 0 && r->sp == 0)
867 {
868 context.vm.regs.ss = context.stack_seg;
869 context.vm.regs.esp = context.stack_off;
870 }
871 else
872 {
873 context.vm.regs.ss = r->ss;
874 context.vm.regs.esp = r->sp;
875 }
876
877 pushw(DEFAULT_VM86_FLAGS);
878 pushw(context.ret_seg);
879 pushw(context.ret_off);
880
881 vret = run_vm86();
882
883 get_regs(r);
884
885 return vret;
886 }
887
888