1 /** @file
2
3 A brief file description
4
5 @section license License
6
7 Licensed to the Apache Software Foundation (ASF) under one
8 or more contributor license agreements. See the NOTICE file
9 distributed with this work for additional information
10 regarding copyright ownership. The ASF licenses this file
11 to you under the Apache License, Version 2.0 (the
12 "License"); you may not use this file except in compliance
13 with the License. You may obtain a copy of the License at
14
15 http://www.apache.org/licenses/LICENSE-2.0
16
17 Unless required by applicable law or agreed to in writing, software
18 distributed under the License is distributed on an "AS IS" BASIS,
19 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 See the License for the specific language governing permissions and
21 limitations under the License.
22 */
23
24 #pragma once
25
26 #include <tscore/TSSystemState.h>
27
28 #include "tscore/ink_align.h"
29 #include "I_EventProcessor.h"
30
31 const int LOAD_BALANCE_INTERVAL = 1;
32
33 TS_INLINE off_t
allocate(int size)34 EventProcessor::allocate(int size)
35 {
36 static off_t start = INK_ALIGN(offsetof(EThread, thread_private), 16);
37 static off_t loss = start - offsetof(EThread, thread_private);
38 size = INK_ALIGN(size, 16); // 16 byte alignment
39
40 int old;
41 do {
42 old = thread_data_used;
43 if (old + loss + size > PER_THREAD_DATA) {
44 return -1;
45 }
46 } while (!ink_atomic_cas(&thread_data_used, old, old + size));
47
48 return (off_t)(old + start);
49 }
50
51 TS_INLINE EThread *
assign_thread(EventType etype)52 EventProcessor::assign_thread(EventType etype)
53 {
54 int next;
55 ThreadGroupDescriptor *tg = &thread_group[etype];
56
57 ink_assert(etype < MAX_EVENT_TYPES);
58 if (tg->_count > 1) {
59 next = ++tg->_next_round_robin % tg->_count;
60 } else {
61 next = 0;
62 }
63 return tg->_thread[next];
64 }
65
66 // If thread_holding is the correct type, return it.
67 //
68 // Otherwise check if there is already an affinity associated with the continuation,
69 // return it if the type is the same, return the next available thread of "etype" if
70 // the type is different.
71 //
72 // Only assign new affinity when there is currently none.
73 TS_INLINE EThread *
assign_affinity_by_type(Continuation * cont,EventType etype)74 EventProcessor::assign_affinity_by_type(Continuation *cont, EventType etype)
75 {
76 EThread *ethread = cont->mutex->thread_holding;
77 if (!ethread->is_event_type(etype)) {
78 ethread = cont->getThreadAffinity();
79 if (ethread == nullptr || !ethread->is_event_type(etype)) {
80 ethread = assign_thread(etype);
81 }
82 }
83
84 if (cont->getThreadAffinity() == nullptr) {
85 cont->setThreadAffinity(ethread);
86 }
87
88 return ethread;
89 }
90
91 TS_INLINE Event *
schedule(Event * e,EventType etype)92 EventProcessor::schedule(Event *e, EventType etype)
93 {
94 ink_assert(etype < MAX_EVENT_TYPES);
95
96 if (TSSystemState::is_event_system_shut_down()) {
97 return nullptr;
98 }
99
100 EThread *affinity_thread = e->continuation->getThreadAffinity();
101 EThread *curr_thread = this_ethread();
102 if (affinity_thread != nullptr && affinity_thread->is_event_type(etype)) {
103 e->ethread = affinity_thread;
104 } else {
105 // Is the current thread eligible?
106 if (curr_thread != nullptr && curr_thread->is_event_type(etype)) {
107 e->ethread = curr_thread;
108 } else {
109 e->ethread = assign_thread(etype);
110 }
111 if (affinity_thread == nullptr) {
112 e->continuation->setThreadAffinity(e->ethread);
113 }
114 }
115
116 if (e->continuation->mutex) {
117 e->mutex = e->continuation->mutex;
118 }
119
120 if (curr_thread != nullptr && e->ethread == curr_thread) {
121 e->ethread->EventQueueExternal.enqueue_local(e);
122 } else {
123 e->ethread->EventQueueExternal.enqueue(e);
124 }
125
126 return e;
127 }
128
129 TS_INLINE Event *
schedule_imm(Continuation * cont,EventType et,int callback_event,void * cookie)130 EventProcessor::schedule_imm(Continuation *cont, EventType et, int callback_event, void *cookie)
131 {
132 Event *e = eventAllocator.alloc();
133
134 ink_assert(et < MAX_EVENT_TYPES);
135 #ifdef ENABLE_TIME_TRACE
136 e->start_time = Thread::get_hrtime();
137 #endif
138 e->callback_event = callback_event;
139 e->cookie = cookie;
140 return schedule(e->init(cont, 0, 0), et);
141 }
142
143 TS_INLINE Event *
schedule_at(Continuation * cont,ink_hrtime t,EventType et,int callback_event,void * cookie)144 EventProcessor::schedule_at(Continuation *cont, ink_hrtime t, EventType et, int callback_event, void *cookie)
145 {
146 Event *e = eventAllocator.alloc();
147
148 ink_assert(t > 0);
149 ink_assert(et < MAX_EVENT_TYPES);
150 e->callback_event = callback_event;
151 e->cookie = cookie;
152 return schedule(e->init(cont, t, 0), et);
153 }
154
155 TS_INLINE Event *
schedule_in(Continuation * cont,ink_hrtime t,EventType et,int callback_event,void * cookie)156 EventProcessor::schedule_in(Continuation *cont, ink_hrtime t, EventType et, int callback_event, void *cookie)
157 {
158 Event *e = eventAllocator.alloc();
159
160 ink_assert(et < MAX_EVENT_TYPES);
161 e->callback_event = callback_event;
162 e->cookie = cookie;
163 return schedule(e->init(cont, Thread::get_hrtime() + t, 0), et);
164 }
165
166 TS_INLINE Event *
schedule_every(Continuation * cont,ink_hrtime t,EventType et,int callback_event,void * cookie)167 EventProcessor::schedule_every(Continuation *cont, ink_hrtime t, EventType et, int callback_event, void *cookie)
168 {
169 Event *e = eventAllocator.alloc();
170
171 ink_assert(t != 0);
172 ink_assert(et < MAX_EVENT_TYPES);
173 e->callback_event = callback_event;
174 e->cookie = cookie;
175 if (t < 0) {
176 return schedule(e->init(cont, t, t), et);
177 } else {
178 return schedule(e->init(cont, Thread::get_hrtime() + t, t), et);
179 }
180 }
181