1 /* -----------------------------------------------------------------------------
2  *
3  * (c) The GHC Team 1998-2005
4  *
5  * Prototypes for functions in Schedule.c
6  * (RTS internal scheduler interface)
7  *
8  * -------------------------------------------------------------------------*/
9 
10 #pragma once
11 
12 #include "rts/OSThreads.h"
13 #include "Capability.h"
14 #include "Trace.h"
15 
16 #include "BeginPrivate.h"
17 
18 /* initScheduler(), exitScheduler()
19  * Called from STG :  no
20  * Locks assumed   :  none
21  */
22 void initScheduler (void);
23 void exitScheduler (bool wait_foreign);
24 void freeScheduler (void);
25 void markScheduler (evac_fn evac, void *user);
26 
27 // Place a new thread on the run queue of the current Capability
28 void scheduleThread (Capability *cap, StgTSO *tso);
29 
30 // Place a new thread on the run queue of a specified Capability
31 // (cap is the currently owned Capability, cpu is the number of
32 // the desired Capability).
33 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
34 
35 /* wakeUpRts()
36  *
37  * Causes an OS thread to wake up and run the scheduler, if necessary.
38  */
39 #if defined(THREADED_RTS)
40 void wakeUpRts(void);
41 #endif
42 
43 /* raiseExceptionHelper */
44 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
45 
46 /* findRetryFrameHelper */
47 StgWord findRetryFrameHelper (Capability *cap, StgTSO *tso);
48 
49 /* findAtomicallyFrameHelper */
50 StgWord findAtomicallyFrameHelper (Capability *cap, StgTSO *tso);
51 
52 /* Entry point for a new worker */
53 void scheduleWorker (Capability *cap, Task *task);
54 
55 #if defined(THREADED_RTS)
56 void stopAllCapabilitiesWith (Capability **pCap, Task *task, SyncType sync_type);
57 void stopAllCapabilities (Capability **pCap, Task *task);
58 void releaseAllCapabilities(uint32_t n, Capability *keep_cap, Task *task);
59 #endif
60 
61 /* The state of the scheduler.  This is used to control the sequence
62  * of events during shutdown.  See Note [shutdown] in Schedule.c.
63  */
64 #define SCHED_RUNNING       0  /* running as normal */
65 #define SCHED_INTERRUPTING  1  /* before threads are deleted */
66 #define SCHED_SHUTTING_DOWN 2  /* final shutdown */
67 
68 extern volatile StgWord sched_state;
69 
70 /*
71  * flag that tracks whether we have done any execution in this time
72  * slice, and controls the disabling of the interval timer.
73  *
74  * The timer interrupt transitions ACTIVITY_YES into
75  * ACTIVITY_MAYBE_NO, waits for RtsFlags.GcFlags.idleGCDelayTime,
76  * and then:
77  *   - if idle GC is on, set ACTIVITY_INACTIVE and wakeUpRts()
78  *   - if idle GC is off, set ACTIVITY_DONE_GC and stopTimer()
79  *
80  * If the scheduler finds ACTIVITY_INACTIVE, then it sets
81  * ACTIVITY_DONE_GC, performs the GC and calls stopTimer().
82  *
83  * If the scheduler finds ACTIVITY_DONE_GC and it has a thread to run,
84  * it enables the timer again with startTimer().
85  */
86 #define ACTIVITY_YES      0
87   // the RTS is active
88 #define ACTIVITY_MAYBE_NO 1
89   // no activity since the last timer signal
90 #define ACTIVITY_INACTIVE 2
91   // RtsFlags.GcFlags.idleGCDelayTime has passed with no activity
92 #define ACTIVITY_DONE_GC  3
93   // like ACTIVITY_INACTIVE, but we've done a GC too (if idle GC is
94   // enabled) and the interval timer is now turned off.
95 
96 /* Recent activity flag.
97  * Locks required  : Transition from MAYBE_NO to INACTIVE
98  * happens in the timer signal, so it is atomic.  Trnasition from
99  * INACTIVE to DONE_GC happens under sched_mutex.  No lock required
100  * to set it to ACTIVITY_YES.
101  */
102 extern volatile StgWord recent_activity;
103 
104 /* Thread queues.
105  * Locks required  : sched_mutex
106  */
107 #if !defined(THREADED_RTS)
108 extern  StgTSO *blocked_queue_hd, *blocked_queue_tl;
109 extern  StgTSO *sleeping_queue;
110 #endif
111 
112 extern bool heap_overflow;
113 
114 #if defined(THREADED_RTS)
115 extern Mutex sched_mutex;
116 #endif
117 
118 /* Called by shutdown_handler(). */
119 void interruptStgRts (void);
120 
121 void resurrectThreads (StgTSO *);
122 
123 /* -----------------------------------------------------------------------------
124  * Some convenient macros/inline functions...
125  */
126 
127 #if !IN_STG_CODE
128 
129 /* END_TSO_QUEUE and friends now defined in includes/stg/MiscClosures.h */
130 
131 /* Add a thread to the end of the run queue.
132  * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
133  * ASSUMES: cap->running_task is the current task.
134  */
135 EXTERN_INLINE void
136 appendToRunQueue (Capability *cap, StgTSO *tso);
137 
138 EXTERN_INLINE void
appendToRunQueue(Capability * cap,StgTSO * tso)139 appendToRunQueue (Capability *cap, StgTSO *tso)
140 {
141     ASSERT(tso->_link == END_TSO_QUEUE);
142     if (cap->run_queue_hd == END_TSO_QUEUE) {
143         cap->run_queue_hd = tso;
144         tso->block_info.prev = END_TSO_QUEUE;
145     } else {
146         setTSOLink(cap, cap->run_queue_tl, tso);
147         setTSOPrev(cap, tso, cap->run_queue_tl);
148     }
149     cap->run_queue_tl = tso;
150     cap->n_run_queue++;
151 }
152 
153 /* Push a thread on the beginning of the run queue.
154  * ASSUMES: cap->running_task is the current task.
155  */
156 EXTERN_INLINE void
157 pushOnRunQueue (Capability *cap, StgTSO *tso);
158 
159 EXTERN_INLINE void
pushOnRunQueue(Capability * cap,StgTSO * tso)160 pushOnRunQueue (Capability *cap, StgTSO *tso)
161 {
162     setTSOLink(cap, tso, cap->run_queue_hd);
163     tso->block_info.prev = END_TSO_QUEUE;
164     if (cap->run_queue_hd != END_TSO_QUEUE) {
165         setTSOPrev(cap, cap->run_queue_hd, tso);
166     }
167     cap->run_queue_hd = tso;
168     if (cap->run_queue_tl == END_TSO_QUEUE) {
169         cap->run_queue_tl = tso;
170     }
171     cap->n_run_queue++;
172 }
173 
174 /* Pop the first thread off the runnable queue.
175  */
176 INLINE_HEADER StgTSO *
popRunQueue(Capability * cap)177 popRunQueue (Capability *cap)
178 {
179     ASSERT(cap->n_run_queue != 0);
180     StgTSO *t = cap->run_queue_hd;
181     ASSERT(t != END_TSO_QUEUE);
182     cap->run_queue_hd = t->_link;
183 
184     StgTSO *link = RELAXED_LOAD(&t->_link);
185     if (link != END_TSO_QUEUE) {
186         link->block_info.prev = END_TSO_QUEUE;
187     }
188     RELAXED_STORE(&t->_link, END_TSO_QUEUE); // no write barrier req'd
189 
190     if (cap->run_queue_hd == END_TSO_QUEUE) {
191         cap->run_queue_tl = END_TSO_QUEUE;
192     }
193     cap->n_run_queue--;
194     return t;
195 }
196 
197 INLINE_HEADER StgTSO *
peekRunQueue(Capability * cap)198 peekRunQueue (Capability *cap)
199 {
200     return cap->run_queue_hd;
201 }
202 
203 void promoteInRunQueue (Capability *cap, StgTSO *tso);
204 
205 /* Add a thread to the end of the blocked queue.
206  */
207 #if !defined(THREADED_RTS)
208 INLINE_HEADER void
appendToBlockedQueue(StgTSO * tso)209 appendToBlockedQueue(StgTSO *tso)
210 {
211     ASSERT(tso->_link == END_TSO_QUEUE);
212     if (blocked_queue_hd == END_TSO_QUEUE) {
213         blocked_queue_hd = tso;
214     } else {
215         setTSOLink(&MainCapability, blocked_queue_tl, tso);
216     }
217     blocked_queue_tl = tso;
218 }
219 #endif
220 
221 /* Check whether various thread queues are empty
222  */
223 INLINE_HEADER bool
emptyQueue(StgTSO * q)224 emptyQueue (StgTSO *q)
225 {
226     return (q == END_TSO_QUEUE);
227 }
228 
229 INLINE_HEADER bool
emptyRunQueue(Capability * cap)230 emptyRunQueue(Capability *cap)
231 {
232     // Can only be called by the task owning the capability.
233     TSAN_ANNOTATE_BENIGN_RACE(&cap->n_run_queue, "emptyRunQueue");
234     return cap->n_run_queue == 0;
235 }
236 
237 INLINE_HEADER void
truncateRunQueue(Capability * cap)238 truncateRunQueue(Capability *cap)
239 {
240     // Can only be called by the task owning the capability.
241     TSAN_ANNOTATE_BENIGN_RACE(&cap->run_queue_hd, "truncateRunQueue");
242     TSAN_ANNOTATE_BENIGN_RACE(&cap->run_queue_tl, "truncateRunQueue");
243     TSAN_ANNOTATE_BENIGN_RACE(&cap->n_run_queue, "truncateRunQueue");
244     cap->run_queue_hd = END_TSO_QUEUE;
245     cap->run_queue_tl = END_TSO_QUEUE;
246     cap->n_run_queue = 0;
247 }
248 
249 #if !defined(THREADED_RTS)
250 #define EMPTY_BLOCKED_QUEUE()  (emptyQueue(blocked_queue_hd))
251 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
252 #endif
253 
254 INLINE_HEADER bool
emptyThreadQueues(Capability * cap)255 emptyThreadQueues(Capability *cap)
256 {
257     return emptyRunQueue(cap)
258 #if !defined(THREADED_RTS)
259         && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
260 #endif
261     ;
262 }
263 
264 #endif /* !IN_STG_CODE */
265 
266 #include "EndPrivate.h"
267