1 /*
2 * Copyright (c) 2014-2017, Siemens AG. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
18 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24 * POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <embb/mtapi/c/mtapi.h>
28 #include <embb/base/c/core_set.h>
29
30 #include <mtapi_status_t.h>
31 #include <embb_mtapi_alloc.h>
32 #include <embb_mtapi_job_t.h>
33 #include <embb_mtapi_log.h>
34 #include <embb_mtapi_node_t.h>
35 #include <embb_mtapi_action_t.h>
36 #include <embb_mtapi_group_t.h>
37 #include <embb_mtapi_task_t.h>
38 #include <embb_mtapi_queue_t.h>
39 #include <embb_mtapi_scheduler_t.h>
40 #include <embb_mtapi_attr.h>
41
42 #include <embb/base/c/internal/cmake_config.h>
43
44
45 static embb_mtapi_node_t* embb_mtapi_node_instance = NULL;
46
47 /* ---- CLASS MEMBERS ------------------------------------------------------ */
48
embb_mtapi_node_is_initialized()49 mtapi_boolean_t embb_mtapi_node_is_initialized() {
50 return (mtapi_boolean_t)(embb_mtapi_node_instance != NULL);
51 }
52
embb_mtapi_node_get_instance()53 embb_mtapi_node_t* embb_mtapi_node_get_instance() {
54 return embb_mtapi_node_instance;
55 }
56
57
58 /* ---- INTERFACE FUNCTIONS ------------------------------------------------ */
59
mtapi_initialize(MTAPI_IN mtapi_domain_t domain_id,MTAPI_IN mtapi_node_t node_id,MTAPI_IN mtapi_node_attributes_t * attributes,MTAPI_OUT mtapi_info_t * mtapi_info,MTAPI_OUT mtapi_status_t * status)60 void mtapi_initialize(
61 MTAPI_IN mtapi_domain_t domain_id,
62 MTAPI_IN mtapi_node_t node_id,
63 MTAPI_IN mtapi_node_attributes_t* attributes,
64 MTAPI_OUT mtapi_info_t* mtapi_info,
65 MTAPI_OUT mtapi_status_t* status) {
66 mtapi_status_t local_status = MTAPI_ERR_UNKNOWN;
67 embb_mtapi_node_t* node;
68
69 embb_mtapi_log_trace(
70 "mtapi_initialize() called (domain: %i, node: %i)\n", domain_id, node_id);
71
72 /* check if node was already initialized */
73 if (embb_mtapi_node_is_initialized()) {
74 local_status = MTAPI_ERR_NODE_INITIALIZED;
75
76 node = embb_mtapi_node_instance;
77
78 /* return previously set information structure */
79 if (MTAPI_NULL != mtapi_info) {
80 *mtapi_info = node->info;
81 }
82 } else {
83 embb_mtapi_alloc_reset_bytes_allocated();
84 /* create node instance */
85 embb_mtapi_node_instance = (embb_mtapi_node_t*)
86 embb_mtapi_alloc_allocate(sizeof(embb_mtapi_node_t));
87 if (NULL == embb_mtapi_node_instance) {
88 /* out of memory! */
89 local_status = MTAPI_ERR_UNKNOWN;
90 } else {
91 node = embb_mtapi_node_instance;
92
93 node->domain_id = domain_id;
94 node->node_id = node_id;
95
96 if (MTAPI_NULL != attributes) {
97 node->attributes = *attributes;
98 local_status = MTAPI_SUCCESS;
99 } else {
100 mtapi_nodeattr_init(&node->attributes, &local_status);
101 }
102
103 if (MTAPI_SUCCESS == local_status) {
104 mtapi_affinity_init(&node->affinity_all, MTAPI_TRUE, &local_status);
105 }
106
107 if (MTAPI_SUCCESS == local_status) {
108 embb_atomic_init_int(&node->is_scheduler_running, MTAPI_FALSE);
109
110 /* initialize storage */
111 embb_mtapi_job_initialize_list(node);
112 node->action_pool = embb_mtapi_action_pool_new(
113 node->attributes.max_actions);
114 node->group_pool = embb_mtapi_group_pool_new(
115 node->attributes.max_groups);
116 node->task_pool = embb_mtapi_task_pool_new(
117 node->attributes.max_tasks);
118 node->queue_pool = embb_mtapi_queue_pool_new(
119 node->attributes.max_queues);
120 if (MTAPI_NULL == node->job_list ||
121 MTAPI_NULL == node->action_pool ||
122 MTAPI_NULL == node->group_pool ||
123 MTAPI_NULL == node->task_pool ||
124 MTAPI_NULL == node->queue_pool) {
125 mtapi_finalize(NULL);
126 local_status = MTAPI_ERR_NODE_INITFAILED;
127 }
128
129 if (local_status == MTAPI_SUCCESS) {
130 /* initialize scheduler for local node */
131 node->scheduler = embb_mtapi_scheduler_new();
132 if (MTAPI_NULL != node->scheduler) {
133 /* fill information structure */
134 node->info.mtapi_version = 0x1000; // mtapi version 1.0
135 node->info.organization_id = MCA_ORG_ID_EMB;
136 node->info.implementation_version =
137 EMBB_BASE_VERSION_MAJOR * 0x1000 + EMBB_BASE_VERSION_MINOR;
138 node->info.number_of_domains = ~0u;
139 node->info.number_of_nodes = ~0u;
140 node->info.hardware_concurrency = embb_core_count_available();
141 node->info.used_memory = embb_mtapi_alloc_get_bytes_allocated();
142 if (MTAPI_NULL != mtapi_info) {
143 *mtapi_info = node->info;
144 }
145
146 /* initialization succeeded, tell workers to start working */
147 embb_atomic_store_int(&node->is_scheduler_running, MTAPI_TRUE);
148 } else {
149 mtapi_finalize(MTAPI_NULL);
150 local_status = MTAPI_ERR_NODE_INITFAILED;
151 }
152 }
153 } else {
154 embb_mtapi_alloc_deallocate(node);
155 local_status = MTAPI_ERR_PARAMETER;
156 }
157 }
158 }
159
160 mtapi_status_set(status, local_status);
161 }
162
mtapi_finalize(MTAPI_OUT mtapi_status_t * status)163 void mtapi_finalize(MTAPI_OUT mtapi_status_t* status) {
164 mtapi_status_t local_status = MTAPI_ERR_UNKNOWN;
165
166 embb_mtapi_log_trace("mtapi_finalize() called\n");
167
168 if (embb_mtapi_node_is_initialized()) {
169 embb_mtapi_node_t* node = embb_mtapi_node_get_instance();
170
171 /* finalize scheduler */
172 if (MTAPI_NULL != node->scheduler) {
173 embb_mtapi_scheduler_delete(node->scheduler);
174 node->scheduler = MTAPI_NULL;
175 }
176
177 /* finalize storage in reverse order */
178 if (MTAPI_NULL != node->queue_pool) {
179 embb_mtapi_queue_pool_delete(node->queue_pool);
180 node->queue_pool = MTAPI_NULL;
181 }
182
183 if (MTAPI_NULL != node->task_pool) {
184 embb_mtapi_task_pool_delete(node->task_pool);
185 node->task_pool = MTAPI_NULL;
186 }
187
188 if (MTAPI_NULL != node->group_pool) {
189 embb_mtapi_group_pool_delete(node->group_pool);
190 node->group_pool = MTAPI_NULL;
191 }
192
193 if (MTAPI_NULL != node->action_pool) {
194 embb_mtapi_action_pool_delete(node->action_pool);
195 node->action_pool = MTAPI_NULL;
196 }
197
198 if (MTAPI_NULL != node->job_list) {
199 embb_mtapi_job_finalize_list(node);
200 }
201
202 embb_atomic_destroy_int(&node->is_scheduler_running);
203
204 /* free system instance */
205 embb_mtapi_alloc_deallocate(node);
206 embb_mtapi_node_instance = MTAPI_NULL;
207
208 local_status = MTAPI_SUCCESS;
209 } else {
210 local_status = MTAPI_ERR_NODE_NOTINIT;
211 }
212
213 mtapi_status_set(status, local_status);
214 }
215
mtapi_node_get_attribute(MTAPI_IN mtapi_node_t node,MTAPI_IN mtapi_uint_t attribute_num,MTAPI_OUT void * attribute,MTAPI_IN mtapi_size_t attribute_size,MTAPI_OUT mtapi_status_t * status)216 void mtapi_node_get_attribute(
217 MTAPI_IN mtapi_node_t node,
218 MTAPI_IN mtapi_uint_t attribute_num,
219 MTAPI_OUT void* attribute,
220 MTAPI_IN mtapi_size_t attribute_size,
221 MTAPI_OUT mtapi_status_t* status) {
222 mtapi_status_t local_status = MTAPI_ERR_UNKNOWN;
223 embb_mtapi_node_t* local_node = embb_mtapi_node_get_instance();
224
225 embb_mtapi_log_trace("mtapi_node_get_attribute() called\n");
226
227 if (embb_mtapi_node_is_initialized()) {
228 if (local_node->node_id == node) {
229 if (MTAPI_NULL != attribute) {
230 switch (attribute_num) {
231 case MTAPI_NODE_CORE_AFFINITY:
232 if (MTAPI_NODE_CORE_AFFINITY_SIZE == attribute_size) {
233 *(embb_core_set_t*)attribute =
234 local_node->attributes.core_affinity;
235 local_status = MTAPI_SUCCESS;
236 } else {
237 local_status = MTAPI_ERR_ATTR_SIZE;
238 }
239 break;
240
241 case MTAPI_NODE_NUMCORES:
242 local_status = embb_mtapi_attr_get_mtapi_uint_t(
243 &local_node->attributes.num_cores, attribute, attribute_size);
244 break;
245
246 case MTAPI_NODE_TYPE:
247 local_status = embb_mtapi_attr_get_mtapi_uint_t(
248 &local_node->attributes.type, attribute, attribute_size);
249 break;
250
251 case MTAPI_NODE_MAX_TASKS:
252 local_status = embb_mtapi_attr_get_mtapi_uint_t(
253 &local_node->attributes.max_tasks, attribute, attribute_size);
254 break;
255
256 case MTAPI_NODE_MAX_ACTIONS:
257 local_status = embb_mtapi_attr_get_mtapi_uint_t(
258 &local_node->attributes.max_actions, attribute, attribute_size);
259 break;
260
261 case MTAPI_NODE_MAX_GROUPS:
262 local_status = embb_mtapi_attr_get_mtapi_uint_t(
263 &local_node->attributes.max_groups, attribute, attribute_size);
264 break;
265
266 case MTAPI_NODE_MAX_QUEUES:
267 local_status = embb_mtapi_attr_get_mtapi_uint_t(
268 &local_node->attributes.max_queues, attribute, attribute_size);
269 break;
270
271 case MTAPI_NODE_QUEUE_LIMIT:
272 local_status = embb_mtapi_attr_get_mtapi_uint_t(
273 &local_node->attributes.queue_limit, attribute, attribute_size);
274 break;
275
276 case MTAPI_NODE_MAX_JOBS:
277 local_status = embb_mtapi_attr_get_mtapi_uint_t(
278 &local_node->attributes.max_jobs, attribute, attribute_size);
279 break;
280
281 case MTAPI_NODE_MAX_ACTIONS_PER_JOB:
282 local_status = embb_mtapi_attr_get_mtapi_uint_t(
283 &local_node->attributes.max_actions_per_job, attribute,
284 attribute_size);
285 break;
286
287 case MTAPI_NODE_MAX_PRIORITIES:
288 local_status = embb_mtapi_attr_get_mtapi_uint_t(
289 &local_node->attributes.max_priorities, attribute, attribute_size);
290 break;
291
292 default:
293 local_status = MTAPI_ERR_ATTR_NUM;
294 break;
295 }
296 } else {
297 local_status = MTAPI_ERR_PARAMETER;
298 }
299 } else {
300 local_status = MTAPI_ERR_NODE_NOTINIT;
301 }
302 } else {
303 local_status = MTAPI_ERR_NODE_NOTINIT;
304 }
305
306 mtapi_status_set(status, local_status);
307 }
308
mtapi_domain_id_get(MTAPI_OUT mtapi_status_t * status)309 mtapi_domain_t mtapi_domain_id_get(
310 MTAPI_OUT mtapi_status_t* status
311 ) {
312 mtapi_status_t local_status = MTAPI_ERR_UNKNOWN;
313 embb_mtapi_node_t* node = embb_mtapi_node_get_instance();
314 mtapi_domain_t domain_id = MTAPI_DOMAIN_ID_INVALID;
315
316 embb_mtapi_log_trace("mtapi_domain_id_get() called\n");
317
318 if (embb_mtapi_node_is_initialized()) {
319 domain_id = node->domain_id;
320 local_status = MTAPI_SUCCESS;
321 } else {
322 local_status = MTAPI_ERR_NODE_NOTINIT;
323 }
324
325 mtapi_status_set(status, local_status);
326 return domain_id;
327 }
328
mtapi_node_id_get(MTAPI_OUT mtapi_status_t * status)329 mtapi_node_t mtapi_node_id_get(
330 MTAPI_OUT mtapi_status_t* status) {
331 mtapi_status_t local_status = MTAPI_ERR_UNKNOWN;
332 embb_mtapi_node_t* node = embb_mtapi_node_get_instance();
333 mtapi_node_t node_id = MTAPI_NODE_ID_INVALID;
334
335 embb_mtapi_log_trace("mtapi_domain_id_get() called\n");
336
337 if (embb_mtapi_node_is_initialized()) {
338 node_id = node->node_id;
339 local_status = MTAPI_SUCCESS;
340 } else {
341 local_status = MTAPI_ERR_NODE_NOTINIT;
342 }
343
344 mtapi_status_set(status, local_status);
345 return node_id;
346 }
347