1 // -*- c-basic-offset: 4; related-file-name: "../../lib/routerthread.cc" -*-
2 #ifndef CLICK_ROUTERTHREAD_HH
3 #define CLICK_ROUTERTHREAD_HH
4 #include <click/sync.hh>
5 #include <click/vector.hh>
6 #include <click/timerset.hh>
7 #if CLICK_LINUXMODULE
8 # include <click/cxxprotect.h>
9 CLICK_CXX_PROTECT
10 # include <linux/sched.h>
11 CLICK_CXX_UNPROTECT
12 # include <click/cxxunprotect.h>
13 #elif CLICK_BSDMODULE
14 # include <click/cxxprotect.h>
15 CLICK_CXX_PROTECT
16 # include <sys/systm.h>
17 CLICK_CXX_UNPROTECT
18 # include <click/cxxunprotect.h>
19 #elif CLICK_USERLEVEL
20 # include <click/selectset.hh>
21 #endif
22 
23 // NB: user must #include <click/task.hh> before <click/routerthread.hh>.
24 // We cannot #include <click/task.hh> ourselves because of circular #include
25 // dependency.
26 CLICK_DECLS
27 
28 class RouterThread { public:
29 
30     enum { THREAD_QUIESCENT = -1, THREAD_UNKNOWN = -1000 };
31 
32     inline int thread_id() const;
33 
34     inline Master *master() const;
timer_set()35     inline TimerSet &timer_set()                { return _timers; }
timer_set() const36     inline const TimerSet &timer_set() const    { return _timers; }
37 #if CLICK_USERLEVEL
select_set()38     inline SelectSet &select_set()              { return _selects; }
select_set() const39     inline const SelectSet &select_set() const  { return _selects; }
40 #endif
41 
42     // Task list functions
43     inline bool active() const;
44     inline Task *task_begin() const;
45     inline Task *task_next(Task *task) const;
46     inline Task *task_end() const;
47     void scheduled_tasks(Router *router, Vector<Task *> &x);
48 
49     inline void lock_tasks();
50     inline void unlock_tasks();
51 
52     inline void schedule_block_tasks();
53     inline void block_tasks(bool scheduled);
54     inline void unblock_tasks();
55 
56     inline bool stop_flag() const;
57 
58     inline void mark_driver_entry();
59     void driver();
60 
61     void kill_router(Router *router);
62 
63 #if HAVE_ADAPTIVE_SCHEDULER
64     // min_cpu_share() and max_cpu_share() are expressed on a scale with
65     // Task::MAX_UTILIZATION == 100%.
min_cpu_share() const66     unsigned min_cpu_share() const      { return _min_click_share; }
max_cpu_share() const67     unsigned max_cpu_share() const      { return _max_click_share; }
cur_cpu_share() const68     unsigned cur_cpu_share() const      { return _cur_click_share; }
69     void set_cpu_share(unsigned min_share, unsigned max_share);
70 #endif
71 
72 #if CLICK_LINUXMODULE || CLICK_BSDMODULE
greedy() const73     bool greedy() const                 { return _greedy; }
set_greedy(bool g)74     void set_greedy(bool g)             { _greedy = g; }
75 #endif
76 
77     inline void wake();
78 
79 #if CLICK_USERLEVEL
80     inline void run_signals();
81 #endif
82 
83     enum { S_PAUSED, S_BLOCKED, S_TIMERWAIT,
84            S_LOCKSELECT, S_LOCKTASKS,
85            S_RUNTASK, S_RUNTIMER, S_RUNSIGNAL, S_RUNPENDING, S_RUNSELECT,
86            NSTATES };
87     inline void set_thread_state(int state);
88     inline void set_thread_state_for_blocking(int delay_type);
89 #if CLICK_DEBUG_SCHEDULING
thread_state() const90     int thread_state() const            { return _thread_state; }
91     static String thread_state_name(int state);
driver_epoch() const92     uint32_t driver_epoch() const       { return _driver_epoch; }
driver_task_epoch() const93     uint32_t driver_task_epoch() const  { return _driver_task_epoch; }
94     Timestamp task_epoch_time(uint32_t epoch) const;
95 # if CLICK_LINUXMODULE
sleeper() const96     struct task_struct *sleeper() const { return _linux_task; }
97 # endif
98 # if CLICK_DEBUG_SCHEDULING > 1
99     inline Timestamp thread_state_time(int state) const;
100     inline uint64_t thread_state_count(int state) const;
101 # endif
102 #endif
103 
104   private:
105 
106 #if HAVE_TASK_HEAP
107     struct task_heap_element {
108         unsigned pass;
109         Task *t;
task_heap_elementRouterThread::task_heap_element110         task_heap_element() {
111         }
task_heap_elementRouterThread::task_heap_element112         task_heap_element(Task *t_)
113             : pass(t_->_pass), t(t_) {
114         }
115     };
116 #endif
117 
118     // LOCAL STATE GROUP
119     TaskLink _task_link;
120     volatile int _stop_flag;
121 #if HAVE_TASK_HEAP
122     Vector<task_heap_element> _task_heap;
123 #endif
124 
125     TimerSet _timers;
126 #if CLICK_USERLEVEL
127     SelectSet _selects;
128 #endif
129 
130 #if HAVE_ADAPTIVE_SCHEDULER
131     enum { C_CLICK, C_KERNEL, NCLIENTS };
132     struct Client {                     // top-level stride clients
133         unsigned pass;
134         unsigned stride;
135         int tickets;
ClientRouterThread::Client136         Client() : pass(0), tickets(0)  { }
137     };
138     Client _clients[NCLIENTS];
139     unsigned _global_pass;              // global pass
140     unsigned _max_click_share;          // maximum allowed Click share of CPU
141     unsigned _min_click_share;          // minimum allowed Click share of CPU
142     unsigned _cur_click_share;          // current Click share
143     Timestamp _adaptive_restride_timestamp;
144     int _adaptive_restride_iter;
145 #endif
146 
147     // EXTERNAL STATE GROUP
148     Spinlock _task_lock CLICK_ALIGNED(CLICK_CACHE_LINE_SIZE);
149     atomic_uint32_t _task_blocker;
150     atomic_uint32_t _task_blocker_waiting;
151 
152     Task::Pending _pending_head;
153     Task::Pending *_pending_tail;
154     SpinlockIRQ _pending_lock;
155 
156     // SHARED STATE GROUP
157     Master *_master CLICK_ALIGNED(CLICK_CACHE_LINE_SIZE);
158     int _id;
159     bool _driver_entered;
160 #if HAVE_MULTITHREAD && !(CLICK_LINUXMODULE || CLICK_MINIOS)
161     click_processor_t _running_processor;
162 #endif
163 #if CLICK_LINUXMODULE
164     bool _greedy;
165     struct task_struct *_linux_task;
166 #endif
167 #if CLICK_MINIOS
168     struct thread *_minios_thread;
169 #endif
170   public:
171     unsigned _tasks_per_iter;
172     unsigned _iters_per_os;
173   private:
174 
175 #if CLICK_NS
176     Timestamp _ns_scheduled;
177     Timestamp _ns_last_active;
178     int _ns_active_iter;
179     enum { ns_iters_per_time = 1000 };
180 #endif
181 
182 #if CLICK_BSDMODULE
183     // XXX FreeBSD
184     u_int64_t _old_tsc; /* MARKO - temp. */
185     void *_sleep_ident;
186     int _oticks;
187     bool _greedy;
188 #endif
189 
190 #if CLICK_DEBUG_SCHEDULING
191     int _thread_state;
192     uint32_t _driver_epoch;
193     uint32_t _driver_task_epoch;
194     enum { TASK_EPOCH_BUFSIZ = 32 };
195     uint32_t _task_epoch_first;
196     Timestamp _task_epoch_time[TASK_EPOCH_BUFSIZ];
197 # if CLICK_DEBUG_SCHEDULING > 1
198     Timestamp _thread_state_time[NSTATES];
199     uint64_t _thread_state_count[NSTATES];
200     Timestamp _thread_state_timestamp;
201 # endif
202 #endif
203 
204     // called by Master
205     RouterThread(Master *master, int threadno);
206     ~RouterThread();
207 
208     // task requests
209     inline void add_pending();
210 #if HAVE_STRIDE_SCHED
pass() const211     inline unsigned pass() const {
212 # if HAVE_TASK_HEAP
213         return _task_heap.size() ? _task_heap.unchecked_at(0).pass : 0;
214 # else
215         return _task_link._next->_pass;
216 # endif
217     }
218 #endif
219 
220     // task running functions
221     inline void driver_lock_tasks();
222     inline void driver_unlock_tasks();
223     inline void run_tasks(int ntasks);
224     inline void process_pending();
225     inline void run_os();
226 #if HAVE_ADAPTIVE_SCHEDULER
227     void client_set_tickets(int client, int tickets);
228     inline void client_update_pass(int client, const Timestamp &before);
229 #endif
230 #if HAVE_TASK_HEAP
231     void task_reheapify_from(int pos, Task*);
232 #endif
233     static inline bool running_in_interrupt();
234     inline bool current_thread_is_running() const;
235     inline bool current_thread_is_running_cleanup() const;
236     void request_stop();
237     inline void request_go();
238 
239     friend class Task;
240     friend class Master;
241 #if CLICK_USERLEVEL
242     friend class SelectSet;
243 #endif
244 
245 };
246 
247 
248 /** @brief Returns this thread's ID.
249  *
250  * The result is >= 0 for true threads, and < 0 for threads that never run any
251  * of their associated Tasks.
252  */
253 inline int
thread_id() const254 RouterThread::thread_id() const
255 {
256     return _id;
257 }
258 
259 /** @brief Returns this thread's associated Master. */
260 inline Master*
master() const261 RouterThread::master() const
262 {
263     return _master;
264 }
265 
266 /** @brief Returns whether any tasks are scheduled.
267  *
268  * Returns false iff no tasks are scheduled and no events are pending.  Since
269  * not all events actually matter (for example, a Task might have been
270  * scheduled and then subsequently unscheduled), active() may temporarily
271  * return true even when no real events are outstanding.
272  */
273 inline bool
active() const274 RouterThread::active() const
275 {
276     click_compiler_fence();
277 #if HAVE_TASK_HEAP
278     return _task_heap.size() != 0 || _pending_head.x;
279 #else
280     return _task_link._next != &_task_link || _pending_head.x;
281 #endif
282 }
283 
284 /** @brief Returns the beginning of the scheduled task list.
285  *
286  * Each RouterThread maintains a list of all currently-scheduled tasks.
287  * Elements may traverse this list with the task_begin(), task_next(), and
288  * task_end() functions, using iterator-like code such as:
289  *
290  * @code
291  * thread->lock_tasks();
292  * for (Task *t = thread->task_begin();
293  *      t != thread->task_end();
294  *      t = thread->task_next(t)) {
295  *     // ... do something with t...
296  * }
297  * thread->unlock_tasks();
298  * @endcode
299  *
300  * The thread's task lock must be held during the traversal, as shown above.
301  *
302  * The return value may not be a real task.  Test it against task_end() before
303  * use.
304  *
305  * @sa task_next, task_end, lock_tasks, unlock_tasks
306  */
307 inline Task *
task_begin() const308 RouterThread::task_begin() const
309 {
310 #if HAVE_TASK_HEAP
311     return (_task_heap.size() ? _task_heap.unchecked_at(0).t : 0);
312 #else
313     return static_cast<Task *>(_task_link._next);
314 #endif
315 }
316 
317 /** @brief Returns the task following @a task in the scheduled task list.
318  * @param task the current task
319  *
320  * The return value may not be a real task.  Test it against task_end() before
321  * use.  However, the @a task argument must be a real task; do not attempt to
322  * call task_next(task_end()).
323  *
324  * @sa task_begin for usage, task_end
325  */
326 inline Task *
task_next(Task * task) const327 RouterThread::task_next(Task *task) const
328 {
329 #if HAVE_TASK_HEAP
330     int p = task->_schedpos + 1;
331     return (p < _task_heap.size() ? _task_heap.unchecked_at(p).t : 0);
332 #else
333     return static_cast<Task *>(task->_next);
334 #endif
335 }
336 
337 /** @brief Returns the end of the scheduled task list.
338  *
339  * The return value is not a real task.
340  *
341  * @sa task_begin for usage, task_next
342  */
343 inline Task *
task_end() const344 RouterThread::task_end() const
345 {
346 #if HAVE_TASK_HEAP
347     return 0;
348 #else
349     return static_cast<Task *>(const_cast<TaskLink *>(&_task_link));
350 #endif
351 }
352 
353 inline bool
running_in_interrupt()354 RouterThread::running_in_interrupt()
355 {
356 #if CLICK_LINUXMODULE
357     return in_interrupt();
358 #else
359     return false;
360 #endif
361 }
362 
363 inline void
mark_driver_entry()364 RouterThread::mark_driver_entry()
365 {
366     _driver_entered = true;
367 }
368 
369 inline bool
current_thread_is_running() const370 RouterThread::current_thread_is_running() const
371 {
372 #if CLICK_LINUXMODULE
373     return current == _linux_task && !running_in_interrupt();
374 #elif CLICK_MINIOS
375     return get_current() == _minios_thread;
376 #elif CLICK_USERLEVEL && HAVE_MULTITHREAD && HAVE___THREAD_STORAGE_CLASS
377     return click_current_thread_id == (_id | 0x40000000);
378 #elif CLICK_USERLEVEL && HAVE_MULTITHREAD
379     return click_current_processor() == _running_processor;
380 #else
381     return true;
382 #endif
383 }
384 
385 inline bool
current_thread_is_running_cleanup() const386 RouterThread::current_thread_is_running_cleanup() const
387 {
388     return current_thread_is_running() || (!_driver_entered && _id >= 0);
389 }
390 
391 inline void
schedule_block_tasks()392 RouterThread::schedule_block_tasks()
393 {
394     assert(!current_thread_is_running());
395     ++_task_blocker_waiting;
396 }
397 
398 inline void
block_tasks(bool scheduled)399 RouterThread::block_tasks(bool scheduled)
400 {
401     assert(!current_thread_is_running() && !running_in_interrupt());
402     if (!scheduled)
403         ++_task_blocker_waiting;
404     while (1) {
405         uint32_t blocker = _task_blocker.value();
406         if ((int32_t) blocker >= 0
407             && _task_blocker.compare_swap(blocker, blocker + 1) == blocker)
408             break;
409 #if CLICK_LINUXMODULE
410         // 3.Nov.2008: Must allow other threads a chance to run.  Otherwise,
411         // soft lock is possible: the thread in block_tasks() waits for
412         // RouterThread::_linux_task to complete a task set, but
413         // RouterThread::_linux_task can't run until the thread in
414         // block_tasks() relinquishes the CPU.
415         //
416         // We might be able to avoid schedule() in some cases, but don't
417         // bother to try.
418         schedule();
419 #endif
420     }
421     --_task_blocker_waiting;
422 }
423 
424 inline void
unblock_tasks()425 RouterThread::unblock_tasks()
426 {
427     assert((int32_t) _task_blocker.value() > 0);
428     --_task_blocker;
429 }
430 
431 inline void
lock_tasks()432 RouterThread::lock_tasks()
433 {
434     assert(!running_in_interrupt());
435     if (unlikely(!current_thread_is_running())) {
436         block_tasks(false);
437         _task_lock.acquire();
438     }
439 }
440 
441 inline void
unlock_tasks()442 RouterThread::unlock_tasks()
443 {
444     assert(!running_in_interrupt());
445     if (unlikely(!current_thread_is_running())) {
446         _task_lock.release();
447         unblock_tasks();
448     }
449 }
450 
451 inline void
wake()452 RouterThread::wake()
453 {
454 #if CLICK_LINUXMODULE
455     struct task_struct *task = _linux_task;
456     if (task)
457         wake_up_process(task);
458 #elif CLICK_USERLEVEL
459     // see also Master::add_select()
460     if (!current_thread_is_running())
461         _selects.wake_immediate();
462 #elif CLICK_BSDMODULE && !BSD_NETISRSCHED
463     if (_sleep_ident)
464         wakeup_one(&_sleep_ident);
465 #endif
466 }
467 
468 inline void
add_pending()469 RouterThread::add_pending()
470 {
471     wake();
472 }
473 
474 inline bool
stop_flag() const475 RouterThread::stop_flag() const
476 {
477     return _stop_flag;
478 }
479 
480 inline void
request_go()481 RouterThread::request_go()
482 {
483     _stop_flag = 0;
484 }
485 
486 inline void
set_thread_state(int state)487 RouterThread::set_thread_state(int state)
488 {
489     (void) state;
490 #if CLICK_DEBUG_SCHEDULING
491     assert(state >= 0 && state < NSTATES);
492 # if CLICK_DEBUG_SCHEDULING > 1
493     Timestamp now = Timestamp::now();
494     if (_thread_state_timestamp)
495         _thread_state_time[_thread_state] += now - _thread_state_timestamp;
496     if (_thread_state != state)
497         ++_thread_state_count[_thread_state];
498     _thread_state_timestamp = now;
499 # endif
500     _thread_state = state;
501 #endif
502 }
503 
504 inline void
set_thread_state_for_blocking(int delay_type)505 RouterThread::set_thread_state_for_blocking(int delay_type)
506 {
507     if (delay_type < 0)
508         set_thread_state(S_BLOCKED);
509     else
510         set_thread_state(delay_type ? S_TIMERWAIT : S_PAUSED);
511 }
512 
513 #if CLICK_DEBUG_SCHEDULING > 1
514 inline Timestamp
thread_state_time(int state) const515 RouterThread::thread_state_time(int state) const
516 {
517     assert(state >= 0 && state < NSTATES);
518     return _thread_state_time[state];
519 }
520 
521 inline uint64_t
thread_state_count(int state) const522 RouterThread::thread_state_count(int state) const
523 {
524     assert(state >= 0 && state < NSTATES);
525     return _thread_state_count[state];
526 }
527 #endif
528 
529 CLICK_ENDDECLS
530 #endif
531