1 // Code for manipulating stack locations.
2 //
3 // Copyright (C) 2009-2015  Kevin O'Connor <kevin@koconnor.net>
4 //
5 // This file may be distributed under the terms of the GNU LGPLv3 license.
6 
7 #include "biosvar.h" // GET_GLOBAL
8 #include "bregs.h" // CR0_PE
9 #include "fw/paravirt.h" // PORT_SMI_CMD
10 #include "hw/rtc.h" // rtc_use
11 #include "list.h" // hlist_node
12 #include "malloc.h" // free
13 #include "output.h" // dprintf
14 #include "romfile.h" // romfile_loadint
15 #include "stacks.h" // struct mutex_s
16 #include "string.h" // memset
17 #include "util.h" // useRTC
18 
19 #define MAIN_STACK_MAX (1024*1024)
20 
21 
22 /****************************************************************
23  * 16bit / 32bit calling
24  ****************************************************************/
25 
26 struct {
27     u8 method;
28     u8 cmosindex;
29     u8 a20;
30     u16 ss, fs, gs;
31     u32 cr0;
32     struct descloc_s gdt;
33 } Call16Data VARLOW;
34 
35 #define C16_BIG 1
36 #define C16_SMM 2
37 
38 int HaveSmmCall32 VARFSEG;
39 
40 // Backup state in preparation for call32
41 static int
call32_prep(u8 method)42 call32_prep(u8 method)
43 {
44     if (!CONFIG_CALL32_SMM || method != C16_SMM) {
45         // Backup cr0
46         u32 cr0 = cr0_read();
47         if (cr0 & CR0_PE)
48             // Called in 16bit protected mode?!
49             return -1;
50         SET_LOW(Call16Data.cr0, cr0);
51 
52         // Backup fs/gs and gdt
53         SET_LOW(Call16Data.fs, GET_SEG(FS));
54         SET_LOW(Call16Data.gs, GET_SEG(GS));
55         struct descloc_s gdt;
56         sgdt(&gdt);
57         SET_LOW(Call16Data.gdt.length, gdt.length);
58         SET_LOW(Call16Data.gdt.addr, gdt.addr);
59 
60         // Enable a20 and backup its previous state
61         SET_LOW(Call16Data.a20, set_a20(1));
62     }
63 
64     // Backup ss
65     SET_LOW(Call16Data.ss, GET_SEG(SS));
66 
67     // Backup cmos index register and disable nmi
68     u8 cmosindex = inb(PORT_CMOS_INDEX);
69     if (!(cmosindex & NMI_DISABLE_BIT)) {
70         outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX);
71         inb(PORT_CMOS_DATA);
72     }
73     SET_LOW(Call16Data.cmosindex, cmosindex);
74 
75     SET_LOW(Call16Data.method, method);
76     return 0;
77 }
78 
79 // Restore state backed up during call32
80 static u8
call32_post(void)81 call32_post(void)
82 {
83     u8 method = GET_LOW(Call16Data.method);
84     SET_LOW(Call16Data.method, 0);
85     SET_LOW(Call16Data.ss, 0);
86 
87     if (!CONFIG_CALL32_SMM || method != C16_SMM) {
88         // Restore a20
89         u8 a20 = GET_LOW(Call16Data.a20);
90         if (!a20)
91             set_a20(0);
92 
93         // Restore gdt and fs/gs
94         struct descloc_s gdt;
95         gdt.length = GET_LOW(Call16Data.gdt.length);
96         gdt.addr = GET_LOW(Call16Data.gdt.addr);
97         lgdt(&gdt);
98         SET_SEG(FS, GET_LOW(Call16Data.fs));
99         SET_SEG(GS, GET_LOW(Call16Data.gs));
100 
101         // Restore cr0
102         u32 cr0_caching = GET_LOW(Call16Data.cr0) & (CR0_CD|CR0_NW);
103         if (cr0_caching)
104             cr0_mask(CR0_CD|CR0_NW, cr0_caching);
105     }
106 
107     // Restore cmos index register
108     u8 cmosindex = GET_LOW(Call16Data.cmosindex);
109     if (!(cmosindex & NMI_DISABLE_BIT)) {
110         outb(cmosindex, PORT_CMOS_INDEX);
111         inb(PORT_CMOS_DATA);
112     }
113     return method;
114 }
115 
116 // Force next call16() to restore to a pristine cpu environment state
117 static void
call16_override(int big)118 call16_override(int big)
119 {
120     ASSERT32FLAT();
121     if (getesp() > BUILD_STACK_ADDR)
122         panic("call16_override with invalid stack\n");
123     memset(&Call16Data, 0, sizeof(Call16Data));
124     if (big) {
125         Call16Data.method = C16_BIG;
126         Call16Data.a20 = 1;
127     } else {
128         Call16Data.a20 = !CONFIG_DISABLE_A20;
129     }
130 }
131 
132 // 16bit handler code called from call16() / call16_smm()
133 u32 VISIBLE16
call16_helper(u32 eax,u32 edx,u32 (* func)(u32 eax,u32 edx))134 call16_helper(u32 eax, u32 edx, u32 (*func)(u32 eax, u32 edx))
135 {
136     u8 method = call32_post();
137     u32 ret = func(eax, edx);
138     call32_prep(method);
139     return ret;
140 }
141 
142 #define ASM32_SWITCH16 "  .pushsection .text.32fseg." UNIQSEC "\n  .code16\n"
143 #define ASM32_BACK32   "  .popsection\n  .code32\n"
144 #define ASM16_SWITCH32 "  .code32\n"
145 #define ASM16_BACK16   "  .code16gcc\n"
146 
147 // Call a SeaBIOS C function in 32bit mode using smm trampoline
148 static u32
call32_smm(void * func,u32 eax)149 call32_smm(void *func, u32 eax)
150 {
151     ASSERT16();
152     dprintf(9, "call32_smm %p %x\n", func, eax);
153     call32_prep(C16_SMM);
154     u32 bkup_esp;
155     asm volatile(
156         // Backup esp / set esp to flat stack location
157         "  movl %%esp, %0\n"
158         "  movl %%ss, %%eax\n"
159         "  shll $4, %%eax\n"
160         "  addl %%eax, %%esp\n"
161 
162         // Transition to 32bit mode, call func, return to 16bit
163         "  movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
164         "  movl $" __stringify(CALL32SMM_ENTERID) ", %%ecx\n"
165         "  movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%ebx\n"
166         "  outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
167         "  rep; nop\n"
168         "  hlt\n"
169 
170         ASM16_SWITCH32
171         "1:movl %1, %%eax\n"
172         "  calll *%2\n"
173         "  movl %%eax, %1\n"
174 
175         "  movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
176         "  movl $" __stringify(CALL32SMM_RETURNID) ", %%ecx\n"
177         "  movl $2f, %%ebx\n"
178         "  outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
179         "  rep; nop\n"
180         "  hlt\n"
181 
182         // Restore esp
183         ASM16_BACK16
184         "2:movl %0, %%esp\n"
185         : "=&r" (bkup_esp), "+r" (eax)
186         : "r" (func)
187         : "eax", "ecx", "edx", "ebx", "cc", "memory");
188     call32_post();
189 
190     dprintf(9, "call32_smm done %p %x\n", func, eax);
191     return eax;
192 }
193 
194 static u32
call16_smm(u32 eax,u32 edx,void * func)195 call16_smm(u32 eax, u32 edx, void *func)
196 {
197     ASSERT32FLAT();
198     if (!CONFIG_CALL32_SMM)
199         return eax;
200     func -= BUILD_BIOS_ADDR;
201     dprintf(9, "call16_smm %p %x %x\n", func, eax, edx);
202     u32 stackoffset = Call16Data.ss << 4;
203     asm volatile(
204         // Restore esp
205         "  subl %0, %%esp\n"
206 
207         // Transition to 16bit mode, call func, return to 32bit
208         "  movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
209         "  movl $" __stringify(CALL32SMM_RETURNID) ", %%ecx\n"
210         "  movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%ebx\n"
211         "  outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
212         "  rep; nop\n"
213         "  hlt\n"
214 
215         ASM32_SWITCH16
216         "1:movl %1, %%eax\n"
217         "  movl %3, %%ecx\n"
218         "  calll _cfunc16_call16_helper\n"
219         "  movl %%eax, %1\n"
220 
221         "  movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
222         "  movl $" __stringify(CALL32SMM_ENTERID) ", %%ecx\n"
223         "  movl $2f, %%ebx\n"
224         "  outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
225         "  rep; nop\n"
226         "  hlt\n"
227 
228         // Set esp to flat stack location
229         ASM32_BACK32
230         "2:addl %0, %%esp\n"
231         : "+r" (stackoffset), "+r" (eax), "+d" (edx)
232         : "r" (func)
233         : "eax", "ecx", "ebx", "cc", "memory");
234     return eax;
235 }
236 
237 // Call a 32bit SeaBIOS function from a 16bit SeaBIOS function.
238 u32 VISIBLE16
__call32(void * func,u32 eax,u32 errret)239 __call32(void *func, u32 eax, u32 errret)
240 {
241     ASSERT16();
242     if (CONFIG_CALL32_SMM && GET_GLOBAL(HaveSmmCall32))
243         return call32_smm(func, eax);
244     // Jump direclty to 32bit mode - this clobbers the 16bit segment
245     // selector registers.
246     int ret = call32_prep(C16_BIG);
247     if (ret)
248         return errret;
249     u32 bkup_ss, bkup_esp;
250     asm volatile(
251         // Backup ss/esp / set esp to flat stack location
252         "  movl %%ss, %0\n"
253         "  movl %%esp, %1\n"
254         "  shll $4, %0\n"
255         "  addl %0, %%esp\n"
256         "  shrl $4, %0\n"
257 
258         // Transition to 32bit mode, call func, return to 16bit
259         "  movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%edx\n"
260         "  jmp transition32_nmi_off\n"
261         ASM16_SWITCH32
262         "1:calll *%3\n"
263         "  movl $2f, %%edx\n"
264         "  jmp transition16big\n"
265 
266         // Restore ds/ss/esp
267         ASM16_BACK16
268         "2:movl %0, %%ds\n"
269         "  movl %0, %%ss\n"
270         "  movl %1, %%esp\n"
271         : "=&r" (bkup_ss), "=&r" (bkup_esp), "+a" (eax)
272         : "r" (func)
273         : "ecx", "edx", "cc", "memory");
274     call32_post();
275     return eax;
276 }
277 
278 // Call a 16bit SeaBIOS function, restoring the mode from last call32().
279 static u32
call16(u32 eax,u32 edx,void * func)280 call16(u32 eax, u32 edx, void *func)
281 {
282     ASSERT32FLAT();
283     if (getesp() > MAIN_STACK_MAX)
284         panic("call16 with invalid stack\n");
285     if (CONFIG_CALL32_SMM && Call16Data.method == C16_SMM)
286         return call16_smm(eax, edx, func);
287 
288     extern void transition16big(void);
289     extern void transition16(void);
290     void *thunk = transition16;
291     if (Call16Data.method == C16_BIG || in_post())
292         thunk = transition16big;
293     func -= BUILD_BIOS_ADDR;
294     u32 stackseg = Call16Data.ss;
295     asm volatile(
296         // Transition to 16bit mode
297         "  movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n"
298         "  jmp *%%ecx\n"
299         // Setup ss/esp and call func
300         ASM32_SWITCH16
301         "1:movl %2, %%ecx\n"
302         "  shll $4, %2\n"
303         "  movw %%cx, %%ss\n"
304         "  subl %2, %%esp\n"
305         "  movw %%cx, %%ds\n"
306         "  movl %4, %%edx\n"
307         "  movl %3, %%ecx\n"
308         "  calll _cfunc16_call16_helper\n"
309         // Return to 32bit and restore esp
310         "  movl $2f, %%edx\n"
311         "  jmp transition32_nmi_off\n"
312         ASM32_BACK32
313         "2:addl %2, %%esp\n"
314         : "+a" (eax), "+c"(thunk), "+r"(stackseg)
315         : "r" (func), "r" (edx)
316         : "edx", "cc", "memory");
317     return eax;
318 }
319 
320 
321 /****************************************************************
322  * Extra 16bit stack
323  ****************************************************************/
324 
325 // Space for a stack for 16bit code.
326 u8 ExtraStack[BUILD_EXTRA_STACK_SIZE+1] VARLOW __aligned(8);
327 u8 *StackPos VARLOW;
328 
329 // Test if currently on the extra stack
330 int
on_extra_stack(void)331 on_extra_stack(void)
332 {
333     return MODE16 && GET_SEG(SS) == SEG_LOW && getesp() > (u32)ExtraStack;
334 }
335 
336 // Switch to the extra stack and call a function.
337 u32
__stack_hop(u32 eax,u32 edx,void * func)338 __stack_hop(u32 eax, u32 edx, void *func)
339 {
340     if (on_extra_stack())
341         return ((u32 (*)(u32, u32))func)(eax, edx);
342     ASSERT16();
343     u16 stack_seg = SEG_LOW;
344     u32 bkup_ss, bkup_esp;
345     asm volatile(
346         // Backup current %ss/%esp values.
347         "movw %%ss, %w3\n"
348         "movl %%esp, %4\n"
349         // Copy stack seg to %ds/%ss and set %esp
350         "movw %w6, %%ds\n"
351         "movw %w6, %%ss\n"
352         "movl %5, %%esp\n"
353         "pushl %3\n"
354         "pushl %4\n"
355         // Call func
356         "calll *%2\n"
357         "popl %4\n"
358         "popl %3\n"
359         // Restore segments and stack
360         "movw %w3, %%ds\n"
361         "movw %w3, %%ss\n"
362         "movl %4, %%esp"
363         : "+a" (eax), "+d" (edx), "+c" (func), "=&r" (bkup_ss), "=&r" (bkup_esp)
364         : "m" (StackPos), "r" (stack_seg)
365         : "cc", "memory");
366     return eax;
367 }
368 
369 // Switch back to original caller's stack and call a function.
370 u32
__stack_hop_back(u32 eax,u32 edx,void * func)371 __stack_hop_back(u32 eax, u32 edx, void *func)
372 {
373     if (!MODESEGMENT)
374         return call16(eax, edx, func);
375     if (!MODE16 || !on_extra_stack())
376         return ((u32 (*)(u32, u32))func)(eax, edx);
377     ASSERT16();
378     u16 bkup_ss;
379     u32 bkup_stack_pos, temp;
380     asm volatile(
381         // Backup stack_pos and current %ss/%esp
382         "movl %6, %4\n"
383         "movw %%ss, %w3\n"
384         "movl %%esp, %6\n"
385         // Restore original callers' %ss/%esp
386         "movl -4(%4), %5\n"
387         "movl %5, %%ss\n"
388         "movw %%ds:-8(%4), %%sp\n"
389         "movl %5, %%ds\n"
390         // Call func
391         "calll *%2\n"
392         // Restore %ss/%esp and stack_pos
393         "movw %w3, %%ds\n"
394         "movw %w3, %%ss\n"
395         "movl %6, %%esp\n"
396         "movl %4, %6"
397         : "+a" (eax), "+d" (edx), "+c" (func), "=&r" (bkup_ss)
398           , "=&r" (bkup_stack_pos), "=&r" (temp), "+m" (StackPos)
399         :
400         : "cc", "memory");
401     return eax;
402 }
403 
404 
405 /****************************************************************
406  * External 16bit interface calling
407  ****************************************************************/
408 
409 // Far call 16bit code with a specified register state.
410 void VISIBLE16
_farcall16(struct bregs * callregs,u16 callregseg)411 _farcall16(struct bregs *callregs, u16 callregseg)
412 {
413     if (need_hop_back()) {
414         stack_hop_back(_farcall16, callregs, callregseg);
415         return;
416     }
417     ASSERT16();
418     asm volatile(
419         "calll __farcall16\n"
420         : "+a" (callregs), "+m" (*callregs), "+d" (callregseg)
421         :
422         : "ebx", "ecx", "esi", "edi", "cc", "memory");
423 }
424 
425 // Invoke external 16bit code.
426 void
farcall16(struct bregs * callregs)427 farcall16(struct bregs *callregs)
428 {
429     call16_override(0);
430     _farcall16(callregs, 0);
431 }
432 
433 // Invoke external 16bit code in "big real" mode.
434 void
farcall16big(struct bregs * callregs)435 farcall16big(struct bregs *callregs)
436 {
437     call16_override(1);
438     _farcall16(callregs, 0);
439 }
440 
441 // Invoke a 16bit software interrupt.
442 void
__call16_int(struct bregs * callregs,u16 offset)443 __call16_int(struct bregs *callregs, u16 offset)
444 {
445     callregs->code.offset = offset;
446     if (!MODESEGMENT) {
447         callregs->code.seg = SEG_BIOS;
448         _farcall16((void*)callregs - Call16Data.ss * 16, Call16Data.ss);
449         return;
450     }
451     callregs->code.seg = GET_SEG(CS);
452     _farcall16(callregs, GET_SEG(SS));
453 }
454 
455 // Reset the machine
456 void
reset(void)457 reset(void)
458 {
459     extern void reset_vector(void) __noreturn;
460     if (!MODE16)
461         call16(0, 0, reset_vector);
462     reset_vector();
463 }
464 
465 
466 /****************************************************************
467  * Threads
468  ****************************************************************/
469 
470 // Thread info - stored at bottom of each thread stack - don't change
471 // without also updating the inline assembler below.
472 struct thread_info {
473     void *stackpos;
474     struct hlist_node node;
475 };
476 struct thread_info MainThread VARFSEG = {
477     NULL, { &MainThread.node, &MainThread.node.next }
478 };
479 #define THREADSTACKSIZE 4096
480 
481 // Check if any threads are running.
482 static int
have_threads(void)483 have_threads(void)
484 {
485     return (CONFIG_THREADS
486             && GET_FLATPTR(MainThread.node.next) != &MainThread.node);
487 }
488 
489 // Return the 'struct thread_info' for the currently running thread.
490 struct thread_info *
getCurThread(void)491 getCurThread(void)
492 {
493     u32 esp = getesp();
494     if (esp <= MAIN_STACK_MAX)
495         return &MainThread;
496     return (void*)ALIGN_DOWN(esp, THREADSTACKSIZE);
497 }
498 
499 static u8 CanInterrupt, ThreadControl;
500 
501 // Initialize the support for internal threads.
502 void
thread_setup(void)503 thread_setup(void)
504 {
505     CanInterrupt = 1;
506     call16_override(1);
507     if (! CONFIG_THREADS)
508         return;
509     ThreadControl = romfile_loadint("etc/threads", 1);
510 }
511 
512 // Should hardware initialization threads run during optionrom execution.
513 int
threads_during_optionroms(void)514 threads_during_optionroms(void)
515 {
516     return CONFIG_THREADS && CONFIG_RTC_TIMER && ThreadControl == 2 && in_post();
517 }
518 
519 // Switch to next thread stack.
520 static void
switch_next(struct thread_info * cur)521 switch_next(struct thread_info *cur)
522 {
523     struct thread_info *next = container_of(
524         cur->node.next, struct thread_info, node);
525     if (cur == next)
526         // Nothing to do.
527         return;
528     asm volatile(
529         "  pushl $1f\n"                 // store return pc
530         "  pushl %%ebp\n"               // backup %ebp
531         "  movl %%esp, (%%eax)\n"       // cur->stackpos = %esp
532         "  movl (%%ecx), %%esp\n"       // %esp = next->stackpos
533         "  popl %%ebp\n"                // restore %ebp
534         "  retl\n"                      // restore pc
535         "1:\n"
536         : "+a"(cur), "+c"(next)
537         :
538         : "ebx", "edx", "esi", "edi", "cc", "memory");
539 }
540 
541 // Last thing called from a thread (called on MainThread stack).
542 static void
__end_thread(struct thread_info * old)543 __end_thread(struct thread_info *old)
544 {
545     hlist_del(&old->node);
546     dprintf(DEBUG_thread, "\\%08x/ End thread\n", (u32)old);
547     free(old);
548     if (!have_threads())
549         dprintf(1, "All threads complete.\n");
550 }
551 
552 // Create a new thread and start executing 'func' in it.
553 void
run_thread(void (* func)(void *),void * data)554 run_thread(void (*func)(void*), void *data)
555 {
556     ASSERT32FLAT();
557     if (! CONFIG_THREADS || ! ThreadControl)
558         goto fail;
559     struct thread_info *thread;
560     thread = memalign_tmphigh(THREADSTACKSIZE, THREADSTACKSIZE);
561     if (!thread)
562         goto fail;
563 
564     dprintf(DEBUG_thread, "/%08x\\ Start thread\n", (u32)thread);
565     thread->stackpos = (void*)thread + THREADSTACKSIZE;
566     struct thread_info *cur = getCurThread();
567     hlist_add_after(&thread->node, &cur->node);
568     asm volatile(
569         // Start thread
570         "  pushl $1f\n"                 // store return pc
571         "  pushl %%ebp\n"               // backup %ebp
572         "  movl %%esp, (%%edx)\n"       // cur->stackpos = %esp
573         "  movl (%%ebx), %%esp\n"       // %esp = thread->stackpos
574         "  calll *%%ecx\n"              // Call func
575 
576         // End thread
577         "  movl %%ebx, %%eax\n"         // %eax = thread
578         "  movl 4(%%ebx), %%ebx\n"      // %ebx = thread->node.next
579         "  movl (%5), %%esp\n"          // %esp = MainThread.stackpos
580         "  calll %4\n"                  // call __end_thread(thread)
581         "  movl -4(%%ebx), %%esp\n"     // %esp = next->stackpos
582         "  popl %%ebp\n"                // restore %ebp
583         "  retl\n"                      // restore pc
584         "1:\n"
585         : "+a"(data), "+c"(func), "+b"(thread), "+d"(cur)
586         : "m"(*(u8*)__end_thread), "m"(MainThread)
587         : "esi", "edi", "cc", "memory");
588     return;
589 
590 fail:
591     func(data);
592 }
593 
594 
595 /****************************************************************
596  * Thread helpers
597  ****************************************************************/
598 
599 // Low-level irq enable.
600 void VISIBLE16
check_irqs(void)601 check_irqs(void)
602 {
603     if (!MODESEGMENT && !CanInterrupt) {
604         // Can't enable interrupts (PIC and/or IVT not yet setup)
605         cpu_relax();
606         return;
607     }
608     if (need_hop_back()) {
609         stack_hop_back(check_irqs, 0, 0);
610         return;
611     }
612     if (MODE16)
613         clock_poll_irq();
614     asm volatile("sti ; nop ; rep ; nop ; cli ; cld" : : :"memory");
615 }
616 
617 // Briefly permit irqs to occur.
618 void
yield(void)619 yield(void)
620 {
621     if (MODESEGMENT || !CONFIG_THREADS) {
622         check_irqs();
623         return;
624     }
625     struct thread_info *cur = getCurThread();
626     if (cur == &MainThread)
627         // Permit irqs to fire
628         check_irqs();
629 
630     // Switch to the next thread
631     switch_next(cur);
632 }
633 
634 void VISIBLE16
wait_irq(void)635 wait_irq(void)
636 {
637     if (need_hop_back()) {
638         stack_hop_back(wait_irq, 0, 0);
639         return;
640     }
641     asm volatile("sti ; hlt ; cli ; cld": : :"memory");
642 }
643 
644 // Wait for next irq to occur.
645 void
yield_toirq(void)646 yield_toirq(void)
647 {
648     if (!CONFIG_HARDWARE_IRQ
649         || (!MODESEGMENT && (have_threads() || !CanInterrupt))) {
650         // Threads still active or irqs not available - do a yield instead.
651         yield();
652         return;
653     }
654     wait_irq();
655 }
656 
657 // Wait for all threads (other than the main thread) to complete.
658 void
wait_threads(void)659 wait_threads(void)
660 {
661     ASSERT32FLAT();
662     while (have_threads())
663         yield();
664 }
665 
666 void
mutex_lock(struct mutex_s * mutex)667 mutex_lock(struct mutex_s *mutex)
668 {
669     ASSERT32FLAT();
670     if (! CONFIG_THREADS)
671         return;
672     while (mutex->isLocked)
673         yield();
674     mutex->isLocked = 1;
675 }
676 
677 void
mutex_unlock(struct mutex_s * mutex)678 mutex_unlock(struct mutex_s *mutex)
679 {
680     ASSERT32FLAT();
681     if (! CONFIG_THREADS)
682         return;
683     mutex->isLocked = 0;
684 }
685 
686 
687 /****************************************************************
688  * Thread preemption
689  ****************************************************************/
690 
691 int CanPreempt VARFSEG;
692 static u32 PreemptCount;
693 
694 // Turn on RTC irqs and arrange for them to check the 32bit threads.
695 void
start_preempt(void)696 start_preempt(void)
697 {
698     if (! threads_during_optionroms())
699         return;
700     CanPreempt = 1;
701     PreemptCount = 0;
702     rtc_use();
703 }
704 
705 // Turn off RTC irqs / stop checking for thread execution.
706 void
finish_preempt(void)707 finish_preempt(void)
708 {
709     if (! threads_during_optionroms()) {
710         yield();
711         return;
712     }
713     CanPreempt = 0;
714     rtc_release();
715     dprintf(9, "Done preempt - %d checks\n", PreemptCount);
716     yield();
717 }
718 
719 // Check if preemption is on, and wait for it to complete if so.
720 int
wait_preempt(void)721 wait_preempt(void)
722 {
723     if (MODESEGMENT || !CONFIG_THREADS || !CanPreempt
724         || getesp() < MAIN_STACK_MAX)
725         return 0;
726     while (CanPreempt)
727         yield();
728     return 1;
729 }
730 
731 // Try to execute 32bit threads.
732 void VISIBLE32INIT
yield_preempt(void)733 yield_preempt(void)
734 {
735     PreemptCount++;
736     switch_next(&MainThread);
737 }
738 
739 // 16bit code that checks if threads are pending and executes them if so.
740 void
check_preempt(void)741 check_preempt(void)
742 {
743     if (CONFIG_THREADS && GET_GLOBAL(CanPreempt) && have_threads())
744         call32(yield_preempt, 0, 0);
745 }
746 
747 
748 /****************************************************************
749  * call32 helper
750  ****************************************************************/
751 
752 struct call32_params_s {
753     void *func;
754     u32 eax, edx, ecx;
755 };
756 
757 u32 VISIBLE32FLAT
call32_params_helper(struct call32_params_s * params)758 call32_params_helper(struct call32_params_s *params)
759 {
760     return ((u32 (*)(u32, u32, u32))params->func)(
761         params->eax, params->edx, params->ecx);
762 }
763 
764 u32
__call32_params(void * func,u32 eax,u32 edx,u32 ecx,u32 errret)765 __call32_params(void *func, u32 eax, u32 edx, u32 ecx, u32 errret)
766 {
767     ASSERT16();
768     struct call32_params_s params = {func, eax, edx, ecx};
769     return call32(call32_params_helper, MAKE_FLATPTR(GET_SEG(SS), &params)
770                   , errret);
771 }
772