1 /*****************************************************************************\
2 * slurm_errno.c - error codes and functions for slurm
3 ******************************************************************************
4 * Copyright (C) 2002-2007 The Regents of the University of California.
5 * Copyright (C) 2008-2010 Lawrence Livermore National Security.
6 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
7 * Written by Jim Garlick <garlick@llnl.gov>, et. al.
8 * CODE-OCEC-09-009. All rights reserved.
9 *
10 * This file is part of Slurm, a resource management program.
11 * For details, see <https://slurm.schedmd.com/>.
12 * Please also read the included file: DISCLAIMER.
13 *
14 * Slurm is free software; you can redistribute it and/or modify it under
15 * the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
17 * any later version.
18 *
19 * In addition, as a special exception, the copyright holders give permission
20 * to link the code of portions of this program with the OpenSSL library under
21 * certain conditions as described in each individual source file, and
22 * distribute linked combinations including the two. You must obey the GNU
23 * General Public License in all respects for all of the code used other than
24 * OpenSSL. If you modify file(s) with this exception, you may extend this
25 * exception to your version of the file(s), but you are not obligated to do
26 * so. If you do not wish to do so, delete this exception statement from your
27 * version. If you delete this exception statement from all source files in
28 * the program, then also delete it here.
29 *
30 * Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
31 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
32 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
33 * details.
34 *
35 * You should have received a copy of the GNU General Public License along
36 * with Slurm; if not, write to the Free Software Foundation, Inc.,
37 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
38 \*****************************************************************************/
39
40 /* This implementation relies on "overloading" the libc errno by
41 * partitioning its domain into system (<1000) and Slurm (>=1000) values.
42 * Slurm API functions should call slurm_seterrno() to set errno to a value.
43 * API users should call slurm_strerror() to convert all errno values to
44 * their description strings.
45 */
46
47 #include "config.h"
48
49 #include <errno.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53
54 #include "slurm/slurm_errno.h"
55
56 #include "src/common/switch.h"
57
58 /* Type for error string table entries */
59 typedef struct {
60 int xe_number;
61 char *xe_message;
62 } slurm_errtab_t;
63
64 /* Add new error values to slurm/slurm_errno.h, and their descriptions to this table */
65 static slurm_errtab_t slurm_errtab[] = {
66 {0, "No error"},
67 {-1, "Unspecified error"},
68 {EINPROGRESS, "Operation now in progress"},
69
70 /* General Message error codes */
71 { SLURM_UNEXPECTED_MSG_ERROR,
72 "Unexpected message received" },
73 { SLURM_COMMUNICATIONS_CONNECTION_ERROR,
74 "Communication connection failure" },
75 { SLURM_COMMUNICATIONS_SEND_ERROR,
76 "Message send failure" },
77 { SLURM_COMMUNICATIONS_RECEIVE_ERROR,
78 "Message receive failure" },
79 { SLURM_COMMUNICATIONS_SHUTDOWN_ERROR,
80 "Communication shutdown failure" },
81 { SLURM_PROTOCOL_VERSION_ERROR,
82 "Incompatible versions of client and server code" },
83 { SLURM_PROTOCOL_IO_STREAM_VERSION_ERROR,
84 "I/O stream version number error" },
85 { SLURM_PROTOCOL_AUTHENTICATION_ERROR,
86 "Protocol authentication error" },
87 { SLURM_PROTOCOL_INSANE_MSG_LENGTH,
88 "Insane message length" },
89 { SLURM_MPI_PLUGIN_NAME_INVALID,
90 "Invalid MPI plugin name" },
91 { SLURM_MPI_PLUGIN_PRELAUNCH_SETUP_FAILED,
92 "MPI plugin's pre-launch setup failed" },
93 { SLURM_PLUGIN_NAME_INVALID,
94 "Plugin initialization failed" },
95 { SLURM_UNKNOWN_FORWARD_ADDR,
96 "Can't find an address, check slurm.conf" },
97
98 /* communication failures to/from slurmctld */
99 { SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR,
100 "Unable to contact slurm controller (connect failure)" },
101 { SLURMCTLD_COMMUNICATIONS_SEND_ERROR,
102 "Unable to contact slurm controller (send failure)" },
103 { SLURMCTLD_COMMUNICATIONS_RECEIVE_ERROR,
104 "Unable to contact slurm controller (receive failure)" },
105 { SLURMCTLD_COMMUNICATIONS_SHUTDOWN_ERROR,
106 "Unable to contact slurm controller (shutdown failure)"},
107
108 /* _info.c/communication layer RESPONSE_SLURM_RC message codes */
109
110 { SLURM_NO_CHANGE_IN_DATA, /* Not really an error */
111 "Data has not changed since time specified" },
112
113 /* slurmctld error codes */
114
115 { ESLURM_INVALID_PARTITION_NAME,
116 "Invalid partition name specified" },
117 { ESLURM_DEFAULT_PARTITION_NOT_SET,
118 "No partition specified or system default partition" },
119 { ESLURM_ACCESS_DENIED,
120 "Access/permission denied" },
121 { ESLURM_JOB_MISSING_REQUIRED_PARTITION_GROUP,
122 "User's group not permitted to use this partition" },
123 { ESLURM_REQUESTED_NODES_NOT_IN_PARTITION,
124 "Requested nodes not in this partition" },
125 { ESLURM_TOO_MANY_REQUESTED_CPUS,
126 "More processors requested than permitted" },
127 { ESLURM_INVALID_NODE_COUNT,
128 "Node count specification invalid" },
129 { ESLURM_ERROR_ON_DESC_TO_RECORD_COPY,
130 "Unable to create job record, try again" },
131 { ESLURM_JOB_MISSING_SIZE_SPECIFICATION,
132 "Job size specification needs to be provided" },
133 { ESLURM_JOB_SCRIPT_MISSING,
134 "Job script not specified" },
135 { ESLURM_USER_ID_MISSING,
136 "Invalid user id" },
137 { ESLURM_DUPLICATE_JOB_ID,
138 "Duplicate job id" },
139 { ESLURM_PATHNAME_TOO_LONG,
140 "Pathname of a file, directory or other parameter too long" },
141 { ESLURM_NOT_TOP_PRIORITY,
142 "Immediate execution impossible, insufficient priority" },
143 { ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE,
144 "Requested node configuration is not available" },
145 { ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE,
146 "Requested partition configuration not available now" },
147 { ESLURM_NODES_BUSY,
148 "Requested nodes are busy" },
149 { ESLURM_INVALID_JOB_ID,
150 "Invalid job id specified" },
151 { ESLURM_INVALID_NODE_NAME,
152 "Invalid node name specified" },
153 { ESLURM_INVALID_CORE_CNT,
154 "Core count for reservation node list is not consistent!" },
155 { ESLURM_WRITING_TO_FILE,
156 "I/O error writing script/environment to file" },
157 { ESLURM_TRANSITION_STATE_NO_UPDATE,
158 "Job can not be altered now, try again later" },
159 { ESLURM_ALREADY_DONE,
160 "Job/step already completing or completed" },
161 { ESLURM_INTERCONNECT_FAILURE,
162 "Error configuring interconnect" },
163 { ESLURM_BAD_DIST,
164 "Task distribution specification invalid" },
165 { ESLURM_JOB_PENDING,
166 "Job is pending execution" },
167 { ESLURM_BAD_TASK_COUNT,
168 "Task count specification invalid" },
169 { ESLURM_INVALID_JOB_CREDENTIAL,
170 "Error generating job credential" },
171 { ESLURM_IN_STANDBY_MODE,
172 "Slurm backup controller in standby mode" },
173 { ESLURM_INVALID_NODE_STATE,
174 "Invalid node state specified" },
175 { ESLURM_INVALID_FEATURE,
176 "Invalid feature specification" },
177 { ESLURM_INVALID_AUTHTYPE_CHANGE,
178 "AuthType change requires restart of all Slurm daemons and "
179 "commands to take effect"},
180 { ESLURM_ACTIVE_FEATURE_NOT_SUBSET,
181 "Active features not subset of available features" },
182 { ESLURM_INVALID_CRED_TYPE_CHANGE,
183 "CredType change requires restart of all Slurm daemons "
184 "to take effect" },
185 { ESLURM_INVALID_SCHEDTYPE_CHANGE,
186 "SchedulerType change requires restart of the slurmctld daemon "
187 "to take effect" },
188 { ESLURM_INVALID_SELECTTYPE_CHANGE,
189 "SelectType change requires restart of the slurmctld daemon "
190 "to take effect" },
191 { ESLURM_INVALID_SWITCHTYPE_CHANGE,
192 "SwitchType change requires restart of all Slurm daemons and "
193 "jobs to take effect" },
194 { ESLURM_FRAGMENTATION,
195 "Immediate execution impossible, "
196 "resources too fragmented for allocation" },
197 { ESLURM_NOT_SUPPORTED,
198 "Requested operation not supported on this system" },
199 { ESLURM_DISABLED,
200 "Requested operation is presently disabled" },
201 { ESLURM_DEPENDENCY,
202 "Job dependency problem" },
203 { ESLURM_BATCH_ONLY,
204 "Only batch jobs are accepted or processed" },
205 { ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED,
206 "Current SwitchType does not permit arbitrary task distribution"},
207 { ESLURM_TASKDIST_REQUIRES_OVERCOMMIT,
208 "Requested more tasks than available processors" },
209 { ESLURM_JOB_HELD,
210 "Job is in held state, pending scheduler release" },
211 { ESLURM_INVALID_TASK_MEMORY,
212 "Memory required by task is not available" },
213 { ESLURM_INVALID_ACCOUNT,
214 "Invalid account or account/partition combination specified"},
215 { ESLURM_INVALID_PARENT_ACCOUNT,
216 "Invalid parent account specified" },
217 { ESLURM_SAME_PARENT_ACCOUNT,
218 "Account already child of parent account specified" },
219 { ESLURM_INVALID_QOS,
220 "Invalid qos specification" },
221 { ESLURM_INVALID_WCKEY,
222 "Invalid wckey specification" },
223 { ESLURM_INVALID_LICENSES,
224 "Invalid license specification" },
225 { ESLURM_NEED_RESTART,
226 "The node configuration changes that were made require restart "
227 "of the slurmctld daemon to take effect"},
228 { ESLURM_ACCOUNTING_POLICY,
229 "Job violates accounting/QOS policy (job submit limit, user's "
230 "size and/or time limits)"},
231 { ESLURM_INVALID_TIME_LIMIT,
232 "Requested time limit is invalid (missing or exceeds some limit)"},
233 { ESLURM_RESERVATION_ACCESS,
234 "Access denied to requested reservation" },
235 { ESLURM_RESERVATION_INVALID,
236 "Requested reservation is invalid" },
237 { ESLURM_INVALID_TIME_VALUE,
238 "Invalid time specified" },
239 { ESLURM_RESERVATION_BUSY,
240 "Requested reservation is in use" },
241 { ESLURM_RESERVATION_NOT_USABLE,
242 "Requested reservation not usable now" },
243 { ESLURM_RESERVATION_OVERLAP,
244 "Requested reservation overlaps with another reservation" },
245 { ESLURM_PORTS_BUSY,
246 "Required ports are in use" },
247 { ESLURM_PORTS_INVALID,
248 "Requires more ports than can be reserved" },
249 { ESLURM_PROLOG_RUNNING,
250 "PrologSlurmctld is still running" },
251 { ESLURM_NO_STEPS,
252 "Job steps can not be run on this cluster" },
253 { ESLURM_INVALID_BLOCK_STATE,
254 "Invalid block state specified" },
255 { ESLURM_INVALID_BLOCK_LAYOUT,
256 "Functionality not available with current block layout mode"},
257 { ESLURM_INVALID_BLOCK_NAME,
258 "Invalid block name specified" },
259 { ESLURM_QOS_PREEMPTION_LOOP,
260 "QOS Preemption loop detected" },
261 { ESLURM_NODE_NOT_AVAIL,
262 "Required node not available (down, drained or reserved)"},
263 { ESLURM_INVALID_CPU_COUNT,
264 "CPU count specification invalid" },
265 { ESLURM_PARTITION_NOT_AVAIL,
266 "Required partition not available (inactive or drain)"},
267 { ESLURM_CIRCULAR_DEPENDENCY,
268 "Circular job dependency" },
269 { ESLURM_INVALID_GRES,
270 "Invalid generic resource (gres) specification" },
271 { ESLURM_JOB_NOT_PENDING,
272 "Job is no longer pending execution" },
273 { ESLURM_QOS_THRES,
274 "Requested account has breached requested QOS usage threshold"},
275 { ESLURM_PARTITION_IN_USE,
276 "Partition is in use" },
277 { ESLURM_STEP_LIMIT,
278 "Step limit reached for this job" },
279 { ESLURM_JOB_SUSPENDED,
280 "Job is current suspended, requested operation disabled" },
281 { ESLURM_CAN_NOT_START_IMMEDIATELY,
282 "Job can not start immediately" },
283 { ESLURM_INTERCONNECT_BUSY,
284 "Switch resources currently not available" },
285 { ESLURM_RESERVATION_EMPTY,
286 "Reservation request lacks users or accounts" },
287 { ESLURM_INVALID_ARRAY,
288 "Invalid job array specification" },
289 { ESLURM_RESERVATION_NAME_DUP,
290 "Duplicate reservation name" },
291 { ESLURM_JOB_STARTED,
292 "Job has already started" },
293 { ESLURM_JOB_FINISHED,
294 "Job has already finished" },
295 { ESLURM_JOB_NOT_RUNNING,
296 "Job is not running"},
297 { ESLURM_JOB_NOT_PENDING_NOR_RUNNING,
298 "Job is not pending nor running" },
299 { ESLURM_JOB_NOT_SUSPENDED,
300 "Job is not suspended" },
301 { ESLURM_JOB_NOT_FINISHED,
302 "Job is not finished" },
303 { ESLURM_TRIGGER_DUP,
304 "Duplicate event trigger" },
305 { ESLURM_INTERNAL,
306 "Slurm internal error, contact system administrator" },
307 { ESLURM_INVALID_BURST_BUFFER_CHANGE,
308 "BurstBufferType change requires restart of slurmctld daemon "
309 "to take effect"},
310 { ESLURM_BURST_BUFFER_PERMISSION,
311 "Burst Buffer permission denied" },
312 { ESLURM_BURST_BUFFER_LIMIT,
313 "Burst Buffer resource limit exceeded" },
314 { ESLURM_INVALID_BURST_BUFFER_REQUEST,
315 "Burst Buffer request invalid" },
316 { ESLURM_PRIO_RESET_FAIL,
317 "Changes to job priority are not persistent, change nice instead" },
318 { ESLURM_POWER_NOT_AVAIL,
319 "Required power not available now" },
320 { ESLURM_POWER_RESERVED,
321 "Required power at least partially reserved" },
322 { ESLURM_INVALID_POWERCAP,
323 "Required powercap is not valid, check min/max values"},
324 { ESLURM_INVALID_MCS_LABEL,
325 "Invalid mcs_label specified" },
326 { ESLURM_BURST_BUFFER_WAIT,
327 "Waiting for burst buffer" },
328 { ESLURM_PARTITION_DOWN,
329 "Partition in DOWN state" },
330 { ESLURM_DUPLICATE_GRES,
331 "Duplicate generic resource (gres) specification" },
332 { ESLURM_JOB_SETTING_DB_INX,
333 "Job update not available right now, the DB index is being set, try again in a bit" },
334 { ESLURM_RSV_ALREADY_STARTED,
335 "Reservation already started" },
336 { ESLURM_SUBMISSIONS_DISABLED,
337 "System submissions disabled" },
338 { ESLURM_NOT_HET_JOB,
339 "Job not heterogeneous job" },
340 { ESLURM_NOT_HET_JOB_LEADER,
341 "Job not heterogeneous job leader" },
342 { ESLURM_NOT_WHOLE_HET_JOB,
343 "Operation not permitted on individual component of heterogeneous job" },
344 { ESLURM_CORE_RESERVATION_UPDATE,
345 "Core-based reservation can not be updated" },
346 { ESLURM_DUPLICATE_STEP_ID,
347 "Duplicate job step id" },
348 { ESLURM_X11_NOT_AVAIL,
349 "X11 forwarding not available" },
350 { ESLURM_GROUP_ID_MISSING,
351 "Invalid group id" },
352 { ESLURM_BATCH_CONSTRAINT,
353 "Job --batch option is invalid or not a subset of --constraints" },
354 { ESLURM_INVALID_TRES,
355 "Invalid Trackable RESource (TRES) specification" },
356 { ESLURM_INVALID_TRES_BILLING_WEIGHTS,
357 "Invalid TRESBillingWeights specification" },
358 { ESLURM_INVALID_JOB_DEFAULTS,
359 "Invalid JobDefaults specification" },
360 { ESLURM_RESERVATION_MAINT,
361 "Job can not start due to maintenance reservation." },
362 { ESLURM_INVALID_GRES_TYPE,
363 "Invalid GRES specification (with and without type identification)" },
364 { ESLURM_REBOOT_IN_PROGRESS,
365 "Reboot already in progress" },
366 { ESLURM_MULTI_KNL_CONSTRAINT,
367 "Multiple KNL NUMA and/or MCDRAM constraints require use of a heterogeneous job" },
368 { ESLURM_UNSUPPORTED_GRES,
369 "Requested GRES option unsupported by configured SelectType plugin" },
370 { ESLURM_INVALID_NICE,
371 "Invalid --nice value" },
372 { ESLURM_INVALID_TIME_MIN_LIMIT,
373 "Invalid time-min specification (exceeds job's time or other limits)"},
374 { ESLURM_DEFER,
375 "Immediate execution impossible. "
376 "Individual job submission scheduling attempts deferred"},
377 { ESLURM_CONFIGLESS_DISABLED,
378 "ConfigLess mode is disabled" },
379 { ESLURM_ENVIRONMENT_MISSING,
380 "Environment is missing in job" },
381
382 /* slurmd error codes */
383 { ESLURMD_PIPE_ERROR_ON_TASK_SPAWN,
384 "Pipe error on task spawn" },
385 { ESLURMD_KILL_TASK_FAILED,
386 "Kill task failed" },
387 { ESLURMD_UID_NOT_FOUND,
388 "User not found on host" },
389 { ESLURMD_GID_NOT_FOUND,
390 "Group ID not found on host" },
391 { ESLURMD_INVALID_ACCT_FREQ,
392 "Invalid accounting frequency requested" },
393 { ESLURMD_INVALID_JOB_CREDENTIAL,
394 "Invalid job credential" },
395 { ESLURMD_CREDENTIAL_REVOKED,
396 "Job credential revoked" },
397 { ESLURMD_CREDENTIAL_EXPIRED,
398 "Job credential expired" },
399 { ESLURMD_CREDENTIAL_REPLAYED,
400 "Job credential replayed" },
401 { ESLURMD_CREATE_BATCH_DIR_ERROR,
402 "Slurmd could not create a batch directory or file" },
403 { ESLURMD_MODIFY_BATCH_DIR_ERROR,
404 "Slurmd could not chown or chmod a batch directory" },
405 { ESLURMD_CREATE_BATCH_SCRIPT_ERROR,
406 "Slurmd could not create a batch script" },
407 { ESLURMD_MODIFY_BATCH_SCRIPT_ERROR,
408 "Slurmd could not chown or chmod a batch script" },
409 { ESLURMD_SETUP_ENVIRONMENT_ERROR,
410 "Slurmd could not set up environment for batch job" },
411 { ESLURMD_SHARED_MEMORY_ERROR,
412 "Slurmd shared memory error" },
413 { ESLURMD_SET_UID_OR_GID_ERROR,
414 "Slurmd could not set UID or GID" },
415 { ESLURMD_SET_SID_ERROR,
416 "Slurmd could not set session ID" },
417 { ESLURMD_CANNOT_SPAWN_IO_THREAD,
418 "Slurmd could not spawn I/O thread" },
419 { ESLURMD_FORK_FAILED,
420 "Slurmd could not fork job" },
421 { ESLURMD_EXECVE_FAILED,
422 "Slurmd could not execve job" },
423 { ESLURMD_IO_ERROR,
424 "Slurmd could not connect IO" },
425 { ESLURMD_PROLOG_FAILED,
426 "Job prolog failed" },
427 { ESLURMD_EPILOG_FAILED,
428 "Job epilog failed" },
429 { ESLURMD_SESSION_KILLED,
430 "Session manager killed" },
431 { ESLURMD_TOOMANYSTEPS,
432 "Too many job steps on node" },
433 { ESLURMD_STEP_EXISTS,
434 "Job step already exists" },
435 { ESLURMD_JOB_NOTRUNNING,
436 "Job step not running" },
437 { ESLURMD_STEP_SUSPENDED,
438 "Job step is suspended" },
439 { ESLURMD_STEP_NOTSUSPENDED,
440 "Job step is not currently suspended" },
441 { ESLURMD_INVALID_SOCKET_NAME_LEN,
442 "Unix socket name exceeded maximum length" },
443
444 /* slurmd errors in user batch job */
445 { ESCRIPT_CHDIR_FAILED,
446 "unable to change directory to work directory" },
447 { ESCRIPT_OPEN_OUTPUT_FAILED,
448 "could not open output file" },
449 { ESCRIPT_NON_ZERO_RETURN,
450 "Script terminated with non-zero exit code" },
451
452
453 /* socket specific Slurm communications error */
454
455 { SLURM_PROTOCOL_SOCKET_IMPL_ZERO_RECV_LENGTH,
456 "Received zero length message" },
457 { SLURM_PROTOCOL_SOCKET_IMPL_NEGATIVE_RECV_LENGTH,
458 "Received message length < 0" },
459 { SLURM_PROTOCOL_SOCKET_IMPL_NOT_ALL_DATA_SENT,
460 "Failed to send entire message" },
461 { ESLURM_PROTOCOL_INCOMPLETE_PACKET,
462 "Header lengths are longer than data received" },
463 { SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT,
464 "Socket timed out on send/recv operation" },
465 { SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT,
466 "Zero Bytes were transmitted or received" },
467
468 /* slurm_auth errors */
469
470 { ESLURM_AUTH_CRED_INVALID,
471 "Invalid authentication credential" },
472 { ESLURM_AUTH_FOPEN_ERROR,
473 "Failed to open authentication public key" },
474 { ESLURM_AUTH_NET_ERROR,
475 "Failed to connect to authentication agent" },
476 { ESLURM_AUTH_BADARG,
477 "Bad argument to plugin function" },
478 { ESLURM_AUTH_MEMORY,
479 "Memory management error" },
480 { ESLURM_AUTH_INVALID,
481 "Authentication credential invalid" },
482 { ESLURM_AUTH_UNPACK,
483 "Cannot unpack credential" },
484
485 /* accounting errors */
486 { ESLURM_DB_CONNECTION,
487 "Unable to connect to database" },
488 { ESLURM_JOBS_RUNNING_ON_ASSOC,
489 "Job(s) active, cancel job(s) before remove" },
490 { ESLURM_CLUSTER_DELETED,
491 "Cluster deleted, commit/rollback immediately" },
492 { ESLURM_ONE_CHANGE,
493 "Can only change one at a time" },
494 { ESLURM_BAD_NAME,
495 "Unacceptable name given. (No '.' in name allowed)" },
496 { ESLURM_OVER_ALLOCATE,
497 "You can not allocate more than 100% of a resource" },
498 { ESLURM_RESULT_TOO_LARGE,
499 "Query result exceeds size limit" },
500 { ESLURM_DB_QUERY_TOO_WIDE,
501 "Too wide of a date range in query" },
502
503 /* Federation Errors */
504 { ESLURM_FED_CLUSTER_MAX_CNT,
505 "Too many clusters in federation" },
506 { ESLURM_FED_CLUSTER_MULTIPLE_ASSIGNMENT,
507 "Clusters can only be assigned to one federation" },
508 { ESLURM_INVALID_CLUSTER_FEATURE,
509 "Invalid cluster feature specification" },
510 { ESLURM_JOB_NOT_FEDERATED,
511 "Not a valid federated job" },
512 { ESLURM_INVALID_CLUSTER_NAME,
513 "Invalid cluster name" },
514 { ESLURM_FED_JOB_LOCK,
515 "Job locked by another sibling" },
516 { ESLURM_FED_NO_VALID_CLUSTERS,
517 "No eligible clusters for federated job" },
518
519 /* plugin and custom errors */
520 { ESLURM_MISSING_TIME_LIMIT,
521 "Time limit specification required, but not provided" },
522 { ESLURM_INVALID_KNL,
523 "Invalid KNL configuration (MCDRAM or NUMA option)" }
524 };
525
526 /*
527 * Linear search through table of errno values and strings,
528 * returns NULL on error, string on success.
529 */
_lookup_slurm_api_errtab(int errnum)530 static char *_lookup_slurm_api_errtab(int errnum)
531 {
532 char *res = NULL;
533 int i;
534
535 for (i = 0; i < sizeof(slurm_errtab) / sizeof(slurm_errtab_t); i++) {
536 if (slurm_errtab[i].xe_number == errnum) {
537 res = slurm_errtab[i].xe_message;
538 break;
539 }
540 }
541
542 return res;
543 }
544
545 /*
546 * Return string associated with error (Slurm or system).
547 * Always returns a valid string (because strerror always does).
548 */
slurm_strerror(int errnum)549 char *slurm_strerror(int errnum)
550 {
551 char *res = _lookup_slurm_api_errtab(errnum);
552 if (res)
553 return res;
554 else if (errnum > 0)
555 return strerror(errnum);
556 else
557 return "Unknown negative error number";
558 }
559
560 /*
561 * Get errno
562 */
slurm_get_errno(void)563 int slurm_get_errno(void)
564 {
565 return errno;
566 }
567
568 /*
569 * Set errno to the specified value.
570 */
slurm_seterrno(int errnum)571 void slurm_seterrno(int errnum)
572 {
573 #ifdef __set_errno
574 __set_errno(errnum);
575 #else
576 errno = errnum;
577 #endif
578 }
579
580 /*
581 * Print "message: error description" on stderr for current errno value.
582 */
slurm_perror(const char * msg)583 void slurm_perror(const char *msg)
584 {
585 fprintf(stderr, "%s: %s\n", msg, slurm_strerror(errno));
586 }
587