1 /*****************************************************************************\
2  *  read_config.c - read the overall slurm configuration file
3  *****************************************************************************
4  *  Copyright (C) 2002-2007 The Regents of the University of California.
5  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
6  *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
7  *  Portions Copyright (C) 2010-2016 SchedMD <https://www.schedmd.com>.
8  *  Portions (boards) copyright (C) 2012 Bull, <rod.schultz@bull.com>
9  *  Portions (route) copyright (C) 2014 Bull, <rod.schultz@bull.com>
10  *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
11  *  Copyright (C) 2013 Intel, Inc.
12  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
13  *  Written by Morris Jette <jette1@llnl.gov>.
14  *  CODE-OCEC-09-009. All rights reserved.
15  *
16  *  This file is part of Slurm, a resource management program.
17  *  For details, see <https://slurm.schedmd.com/>.
18  *  Please also read the included file: DISCLAIMER.
19  *
20  *  Slurm is free software; you can redistribute it and/or modify it under
21  *  the terms of the GNU General Public License as published by the Free
22  *  Software Foundation; either version 2 of the License, or (at your option)
23  *  any later version.
24  *
25  *  In addition, as a special exception, the copyright holders give permission
26  *  to link the code of portions of this program with the OpenSSL library under
27  *  certain conditions as described in each individual source file, and
28  *  distribute linked combinations including the two. You must obey the GNU
29  *  General Public License in all respects for all of the code used other than
30  *  OpenSSL. If you modify file(s) with this exception, you may extend this
31  *  exception to your version of the file(s), but you are not obligated to do
32  *  so. If you do not wish to do so, delete this exception statement from your
33  *  version.  If you delete this exception statement from all source files in
34  *  the program, then also delete it here.
35  *
36  *  Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
37  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
38  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
39  *  details.
40  *
41  *  You should have received a copy of the GNU General Public License along
42  *  with Slurm; if not, write to the Free Software Foundation, Inc.,
43  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
44 \*****************************************************************************/
45 
46 #include "config.h"
47 
48 #include <arpa/inet.h>
49 #include <assert.h>
50 #include <ctype.h>
51 #include <errno.h>
52 #include <limits.h>
53 #include <netdb.h>
54 #include <netinet/in.h>
55 #include <pthread.h>
56 #include <pwd.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <string.h>
60 #include <sys/socket.h>
61 #include <sys/stat.h>
62 #include <sys/types.h>
63 #include <time.h>
64 #include <unistd.h>
65 
66 #include "slurm/slurm.h"
67 
68 #include "src/common/cpu_frequency.h"
69 #include "src/common/fetch_config.h"
70 #include "src/common/hostlist.h"
71 #include "src/common/list.h"
72 #include "src/common/log.h"
73 #include "src/common/macros.h"
74 #include "src/common/node_conf.h"
75 #include "src/common/node_features.h"
76 #include "src/common/parse_config.h"
77 #include "src/common/parse_time.h"
78 #include "src/common/proc_args.h"
79 #include "src/common/read_config.h"
80 #include "src/common/slurm_accounting_storage.h"
81 #include "src/common/slurm_protocol_api.h"
82 #include "src/common/slurm_protocol_defs.h"
83 #include "src/common/slurm_resource_info.h"
84 #include "src/common/slurm_resolv.h"
85 #include "src/common/slurm_rlimits_info.h"
86 #include "src/common/slurm_selecttype_info.h"
87 #include "src/common/strlcpy.h"
88 #include "src/common/uid.h"
89 #include "src/common/util-net.h"
90 #include "src/common/xmalloc.h"
91 #include "src/common/xstring.h"
92 
93 /*
94 ** Define slurm-specific aliases for use by plugins, see slurm_xlator.h
95 ** for details.
96  */
97 strong_alias(destroy_config_plugin_params, slurm_destroy_config_plugin_params);
98 strong_alias(destroy_config_key_pair, slurm_destroy_config_key_pair);
99 strong_alias(get_extra_conf_path, slurm_get_extra_conf_path);
100 strong_alias(sort_key_pairs, slurm_sort_key_pairs);
101 
102 /*
103  * Instantiation of the "extern slurm_ctl_conf_t slurmctld_conf" and
104  * "bool ignore_state_errors" found in slurmctld.h
105  */
106 slurm_ctl_conf_t slurmctld_conf;
107 bool ignore_state_errors = false;
108 
109 #ifndef NDEBUG
110 uint16_t drop_priv_flag = 0;
111 #endif
112 
113 static pthread_mutex_t conf_lock = PTHREAD_MUTEX_INITIALIZER;
114 static s_p_hashtbl_t *conf_hashtbl = NULL;
115 static slurm_ctl_conf_t *conf_ptr = &slurmctld_conf;
116 static bool conf_initialized = false;
117 static s_p_hashtbl_t *default_frontend_tbl;
118 static s_p_hashtbl_t *default_nodename_tbl;
119 static s_p_hashtbl_t *default_partition_tbl;
120 static log_level_t lvl = LOG_LEVEL_FATAL;
121 static int	local_test_config_rc = SLURM_SUCCESS;
122 static bool     no_addr_cache = false;
123 static int plugstack_fd = -1;
124 static char *plugstack_conf = NULL;
125 static int topology_fd = -1;
126 static char *topology_conf = NULL;
127 
128 inline static void _normalize_debug_level(uint16_t *level);
129 static int _init_slurm_conf(const char *file_name);
130 
131 #define NAME_HASH_LEN 512
132 typedef struct names_ll_s {
133 	char *alias;	/* NodeName */
134 	char *hostname;	/* NodeHostname */
135 	char *address;	/* NodeAddr */
136 	char *bcast_address; /* BcastAddress */
137 	uint16_t port;
138 	uint16_t cpus;
139 	uint16_t boards;
140 	uint16_t sockets;
141 	uint16_t cores;
142 	uint16_t threads;
143 	char *cpu_spec_list;
144 	uint16_t core_spec_cnt;
145 	uint64_t mem_spec_limit;
146 	slurm_addr_t addr;
147 	slurm_addr_t bcast_addr;
148 	bool addr_initialized;
149 	bool bcast_addr_initialized;
150 	struct names_ll_s *next_alias;
151 	struct names_ll_s *next_hostname;
152 } names_ll_t;
153 static bool nodehash_initialized = false;
154 static names_ll_t *host_to_node_hashtbl[NAME_HASH_LEN] = {NULL};
155 static names_ll_t *node_to_host_hashtbl[NAME_HASH_LEN] = {NULL};
156 
157 typedef struct slurm_conf_server {
158 	char *hostname;
159 	char *addr;
160 } slurm_conf_server_t;
161 
162 static void _destroy_nodename(void *ptr);
163 static int _parse_frontend(void **dest, slurm_parser_enum_t type,
164 			   const char *key, const char *value,
165 			   const char *line, char **leftover);
166 static int _parse_nodename(void **dest, slurm_parser_enum_t type,
167 			   const char *key, const char *value,
168 			   const char *line, char **leftover);
169 static bool _is_valid_path(char *path, char *msg);
170 static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
171 				const char *key, const char *value,
172 				const char *line, char **leftover);
173 static void _destroy_partitionname(void *ptr);
174 static int _parse_downnodes(void **dest, slurm_parser_enum_t type,
175 			    const char *key, const char *value,
176 			    const char *line, char **leftover);
177 static void _destroy_downnodes(void *ptr);
178 static int _parse_nodeset(void **dest, slurm_parser_enum_t type,
179 			  const char *key, const char *value,
180 			  const char *line, char **leftover);
181 static void _destroy_nodeset(void *ptr);
182 
183 static int _load_slurmctld_host(slurm_ctl_conf_t *conf);
184 static int _parse_slurmctld_host(void **dest, slurm_parser_enum_t type,
185 				 const char *key, const char *value,
186 				 const char *line, char **leftover);
187 static void _destroy_slurmctld_host(void *ptr);
188 
189 static int _defunct_option(void **dest, slurm_parser_enum_t type,
190 			   const char *key, const char *value,
191 			   const char *line, char **leftover);
192 static int _validate_and_set_defaults(slurm_ctl_conf_t *conf,
193 				      s_p_hashtbl_t *hashtbl);
194 static uint16_t *_parse_srun_ports(const char *);
195 
196 s_p_options_t slurm_conf_options[] = {
197 	{"AccountingStorageTRES", S_P_STRING},
198 	{"AccountingStorageEnforce", S_P_STRING},
199 	{"AccountingStorageExternalHost", S_P_STRING},
200 	{"AccountingStorageHost", S_P_STRING},
201 	{"AccountingStorageBackupHost", S_P_STRING},
202 	{"AccountingStorageLoc", S_P_STRING},
203 	{"AccountingStoragePass", S_P_STRING},
204 	{"AccountingStoragePort", S_P_UINT32},
205 	{"AccountingStorageType", S_P_STRING},
206 	{"AccountingStorageUser", S_P_STRING},
207 	{"AccountingStoreJobComment", S_P_BOOLEAN},
208 	{"AcctGatherEnergyType", S_P_STRING},
209 	{"AcctGatherNodeFreq", S_P_UINT16},
210 	{"AcctGatherProfileType", S_P_STRING},
211 	{"AcctGatherInterconnectType", S_P_STRING},
212 	{"AcctGatherInfinibandType", S_P_STRING},
213 	{"AcctGatherFilesystemType", S_P_STRING},
214 	{"AllowSpecResourcesUsage", S_P_BOOLEAN},
215 	{"AuthAltTypes", S_P_STRING},
216 	{"AuthInfo", S_P_STRING},
217 	{"AuthType", S_P_STRING},
218 	{"BackupAddr", S_P_STRING},
219 	{"BackupController", S_P_STRING},
220 	{"BatchStartTimeout", S_P_UINT16},
221 	{"BurstBufferParameters", S_P_STRING},
222 	{"BurstBufferType", S_P_STRING},
223 	{"CacheGroups", S_P_UINT16},
224 	{"CheckpointType", S_P_STRING},
225 	{"ChosLoc", S_P_STRING},
226 	{"CoreSpecPlugin", S_P_STRING},
227 	{"CliFilterPlugins", S_P_STRING},
228 	{"ClusterName", S_P_STRING},
229 	{"CommunicationParameters", S_P_STRING},
230 	{"CompleteWait", S_P_UINT16},
231 	{"ControlAddr", S_P_STRING},
232 	{"ControlMachine", S_P_STRING},
233 	{"CpuFreqDef", S_P_STRING},
234 	{"CpuFreqGovernors", S_P_STRING},
235 	{"CredType", S_P_STRING},
236 	{"CryptoType", S_P_STRING},
237 	{"DebugFlags", S_P_STRING},
238 	{"DefaultStorageHost", S_P_STRING},
239 	{"DefaultStorageLoc", S_P_STRING},
240 	{"DefaultStoragePass", S_P_STRING},
241 	{"DefaultStoragePort", S_P_UINT32},
242 	{"DefaultStorageType", S_P_STRING},
243 	{"DefaultStorageUser", S_P_STRING},
244 	{"DefCPUPerGPU" , S_P_UINT64},
245 	{"DefMemPerCPU", S_P_UINT64},
246 	{"DefMemPerGPU" , S_P_UINT64},
247 	{"DefMemPerNode", S_P_UINT64},
248 	{"DependencyParameters", S_P_STRING},
249 	{"DisableRootJobs", S_P_BOOLEAN},
250 	{"EioTimeout", S_P_UINT16},
251 	{"EnforcePartLimits", S_P_STRING},
252 	{"Epilog", S_P_STRING},
253 	{"EpilogMsgTime", S_P_UINT32},
254 	{"EpilogSlurmctld", S_P_STRING},
255 	{"ExtSensorsType", S_P_STRING},
256 	{"ExtSensorsFreq", S_P_UINT16},
257 	{"FairShareDampeningFactor", S_P_UINT16},
258 	{"FastSchedule", S_P_UINT16},
259 	{"FederationParameters", S_P_STRING},
260 	{"FirstJobId", S_P_UINT32},
261 	{"GetEnvTimeout", S_P_UINT16},
262 	{"GresTypes", S_P_STRING},
263 	{"GroupUpdateForce", S_P_UINT16},
264 	{"GroupUpdateTime", S_P_UINT16},
265 	{"GpuFreqDef", S_P_STRING},
266 	{"HealthCheckInterval", S_P_UINT16},
267 	{"HealthCheckNodeState", S_P_STRING},
268 	{"HealthCheckProgram", S_P_STRING},
269 	{"InactiveLimit", S_P_UINT16},
270 	{"JobAcctGatherType", S_P_STRING},
271 	{"JobAcctGatherFrequency", S_P_STRING},
272 	{"JobAcctGatherParams", S_P_STRING},
273 	{"JobCheckpointDir", S_P_STRING},
274 	{"JobCompHost", S_P_STRING},
275 	{"JobCompLoc", S_P_STRING},
276 	{"JobCompParams", S_P_STRING},
277 	{"JobCompPass", S_P_STRING},
278 	{"JobCompPort", S_P_UINT32},
279 	{"JobCompType", S_P_STRING},
280 	{"JobContainerType", S_P_STRING},
281 	{"JobCompUser", S_P_STRING},
282 	{"JobCredentialPrivateKey", S_P_STRING},
283 	{"JobCredentialPublicCertificate", S_P_STRING},
284 	{"JobFileAppend", S_P_UINT16},
285 	{"JobRequeue", S_P_UINT16},
286 	{"JobSubmitPlugins", S_P_STRING},
287 	{"KeepAliveTime", S_P_UINT16},
288 	{"KillOnBadExit", S_P_UINT16},
289 	{"KillWait", S_P_UINT16},
290 	{"LaunchParameters", S_P_STRING},
291 	{"LaunchType", S_P_STRING},
292 	{"Layouts", S_P_STRING},
293 	{"Licenses", S_P_STRING},
294 	{"LogTimeFormat", S_P_STRING},
295 	{"MailDomain", S_P_STRING},
296 	{"MailProg", S_P_STRING},
297 	{"MaxArraySize", S_P_UINT32},
298 	{"MaxDBDMsgs", S_P_UINT32},
299 	{"MaxJobCount", S_P_UINT32},
300 	{"MaxJobId", S_P_UINT32},
301 	{"MaxMemPerCPU", S_P_UINT64},
302 	{"MaxMemPerNode", S_P_UINT64},
303 	{"MaxStepCount", S_P_UINT32},
304 	{"MaxTasksPerNode", S_P_UINT16},
305 	{"MCSParameters", S_P_STRING},
306 	{"MCSPlugin", S_P_STRING},
307 	{"MemLimitEnforce", S_P_STRING},
308 	{"MessageTimeout", S_P_UINT16},
309 	{"MinJobAge", S_P_UINT32},
310 	{"MpiDefault", S_P_STRING},
311 	{"MpiParams", S_P_STRING},
312 	{"MsgAggregationParams", S_P_STRING},
313 	{"NodeFeaturesPlugins", S_P_STRING},
314 	{"OverTimeLimit", S_P_UINT16},
315 	{"PluginDir", S_P_STRING},
316 	{"PlugStackConfig", S_P_STRING},
317 	{"PowerParameters", S_P_STRING},
318 	{"PowerPlugin", S_P_STRING},
319 	{"PreemptMode", S_P_STRING},
320 	{"PreemptType", S_P_STRING},
321 	{"PreemptExemptTime", S_P_STRING},
322 	{"PrEpParameters", S_P_STRING},
323 	{"PrEpPlugins", S_P_STRING},
324 	{"PriorityDecayHalfLife", S_P_STRING},
325 	{"PriorityCalcPeriod", S_P_STRING},
326 	{"PriorityFavorSmall", S_P_BOOLEAN},
327 	{"PriorityMaxAge", S_P_STRING},
328 	{"PriorityParameters", S_P_STRING},
329 	{"PriorityUsageResetPeriod", S_P_STRING},
330 	{"PriorityType", S_P_STRING},
331 	{"PriorityFlags", S_P_STRING},
332 	{"PrioritySiteFactorParameters", S_P_STRING},
333 	{"PrioritySiteFactorPlugin", S_P_STRING},
334 	{"PriorityWeightAge", S_P_UINT32},
335 	{"PriorityWeightAssoc", S_P_UINT32},
336 	{"PriorityWeightFairshare", S_P_UINT32},
337 	{"PriorityWeightJobSize", S_P_UINT32},
338 	{"PriorityWeightPartition", S_P_UINT32},
339 	{"PriorityWeightQOS", S_P_UINT32},
340 	{"PriorityWeightTRES", S_P_STRING},
341 	{"PrivateData", S_P_STRING},
342 	{"ProctrackType", S_P_STRING},
343 	{"Prolog", S_P_STRING},
344 	{"PrologSlurmctld", S_P_STRING},
345 	{"PrologEpilogTimeout", S_P_UINT16},
346 	{"PrologFlags", S_P_STRING},
347 	{"PropagatePrioProcess", S_P_UINT16},
348 	{"PropagateResourceLimitsExcept", S_P_STRING},
349 	{"PropagateResourceLimits", S_P_STRING},
350 	{"RebootProgram", S_P_STRING},
351 	{"ReconfigFlags", S_P_STRING},
352 	{"RequeueExit", S_P_STRING},
353 	{"RequeueExitHold", S_P_STRING},
354 	{"ResumeFailProgram", S_P_STRING},
355 	{"ResumeProgram", S_P_STRING},
356 	{"ResumeRate", S_P_UINT16},
357 	{"ResumeTimeout", S_P_UINT16},
358 	{"ResvEpilog", S_P_STRING},
359 	{"ResvOverRun", S_P_UINT16},
360 	{"ResvProlog", S_P_STRING},
361 	{"ReturnToService", S_P_UINT16},
362 	{"RoutePlugin", S_P_STRING},
363 	{"SallocDefaultCommand", S_P_STRING},
364 	{"SbcastParameters", S_P_STRING},
365 	{"SchedulerAuth", S_P_STRING, _defunct_option},
366 	{"SchedulerParameters", S_P_STRING},
367 	{"SchedulerPort", S_P_UINT16},
368 	{"SchedulerRootFilter", S_P_UINT16},
369 	{"SchedulerTimeSlice", S_P_UINT16},
370 	{"SchedulerType", S_P_STRING},
371 	{"SelectType", S_P_STRING},
372 	{"SelectTypeParameters", S_P_STRING},
373 	{"SlurmUser", S_P_STRING},
374 	{"SlurmdUser", S_P_STRING},
375 	{"SlurmctldAddr", S_P_STRING},
376 	{"SlurmctldDebug", S_P_STRING},
377 	{"SlurmctldLogFile", S_P_STRING},
378 	{"SlurmctldPidFile", S_P_STRING},
379 	{"SlurmctldPlugstack", S_P_STRING},
380 	{"SlurmctldPort", S_P_STRING},
381 	{"SlurmctldPrimaryOffProg", S_P_STRING},
382 	{"SlurmctldPrimaryOnProg", S_P_STRING},
383 	{"SlurmctldSyslogDebug", S_P_STRING},
384 	{"SlurmctldTimeout", S_P_UINT16},
385 	{"SlurmctldParameters", S_P_STRING},
386 	{"SlurmdDebug", S_P_STRING},
387 	{"SlurmdLogFile", S_P_STRING},
388 	{"SlurmdParameters", S_P_STRING},
389 	{"SlurmdPidFile",  S_P_STRING},
390 	{"SlurmdPort", S_P_UINT32},
391 	{"SlurmdSpoolDir", S_P_STRING},
392 	{"SlurmdSyslogDebug", S_P_STRING},
393 	{"SlurmdTimeout", S_P_UINT16},
394 	{"SlurmSchedLogFile", S_P_STRING},
395 	{"SlurmSchedLogLevel", S_P_UINT16},
396 	{"SrunEpilog", S_P_STRING},
397 	{"SrunProlog", S_P_STRING},
398 	{"SrunPortRange", S_P_STRING},
399 	{"StateSaveLocation", S_P_STRING},
400 	{"SuspendExcNodes", S_P_STRING},
401 	{"SuspendExcParts", S_P_STRING},
402 	{"SuspendProgram", S_P_STRING},
403 	{"SuspendRate", S_P_UINT16},
404 	{"SuspendTime", S_P_STRING},
405 	{"SuspendTimeout", S_P_UINT16},
406 	{"SwitchType", S_P_STRING},
407 	{"TaskEpilog", S_P_STRING},
408 	{"TaskProlog", S_P_STRING},
409 	{"TaskPlugin", S_P_STRING},
410 	{"TaskPluginParam", S_P_STRING},
411 	{"TCPTimeout", S_P_UINT16},
412 	{"TmpFS", S_P_STRING},
413 	{"TopologyParam", S_P_STRING},
414 	{"TopologyPlugin", S_P_STRING},
415 	{"TrackWCKey", S_P_BOOLEAN},
416 	{"TreeWidth", S_P_UINT16},
417 	{"UnkillableStepProgram", S_P_STRING},
418 	{"UnkillableStepTimeout", S_P_UINT16},
419 	{"UsePAM", S_P_BOOLEAN},
420 	{"VSizeFactor", S_P_UINT16},
421 	{"WaitTime", S_P_UINT16},
422 	{"X11Parameters", S_P_STRING},
423 
424 	{"DownNodes", S_P_ARRAY, _parse_downnodes, _destroy_downnodes},
425 	{"FrontendName", S_P_ARRAY, _parse_frontend, destroy_frontend},
426 	{"NodeName", S_P_ARRAY, _parse_nodename, _destroy_nodename},
427 	{"NodeSet", S_P_ARRAY, _parse_nodeset, _destroy_nodeset},
428 	{"PartitionName", S_P_ARRAY, _parse_partitionname,
429 	 _destroy_partitionname},
430 	{"SlurmctldHost", S_P_ARRAY, _parse_slurmctld_host,
431 	 _destroy_slurmctld_host},
432 
433 	{NULL}
434 };
435 
_is_valid_path(char * path,char * msg)436 static bool _is_valid_path(char *path, char *msg)
437 {
438 	char *saveptr = NULL, *buf, *entry;
439 
440 	if (path == NULL) {
441 		error ("is_valid_path: path is NULL!");
442 		return false;
443 	}
444 
445 	buf = xstrdup(path);
446 	entry = strtok_r(buf, ":", &saveptr);
447 	while (entry) {
448 		struct stat st;
449 
450 		/*
451 		*  Check to see if current path element is a valid dir
452 		*/
453 		if (stat (entry, &st) < 0) {
454 			error ("%s: %s: %m", msg, entry);
455 			goto out_false;
456 		} else if (!S_ISDIR (st.st_mode)) {
457 			error ("%s: %s: Not a directory", msg, entry);
458 			goto out_false;
459 		}
460 		/*
461 		*  Otherwise path element is valid, continue..
462 		*/
463 		entry = strtok_r(NULL, ":", &saveptr);
464 	}
465 
466 	xfree(buf);
467  	return true;
468 
469 out_false:
470 	xfree(buf);
471 	return false;
472 }
473 
_defunct_option(void ** dest,slurm_parser_enum_t type,const char * key,const char * value,const char * line,char ** leftover)474 static int _defunct_option(void **dest, slurm_parser_enum_t type,
475 			  const char *key, const char *value,
476 			  const char *line, char **leftover)
477 {
478 	error("The option \"%s\" is defunct, see man slurm.conf.", key);
479 	return 0;
480 }
481 
482 /* Used to get the general name of the machine, used primarily
483  * for bluegene systems.  Not in general use because some systems
484  * have multiple prefix's such as foo[1-1000],bar[1-1000].
485  */
486 /* Caller must be holding slurm_conf_lock() */
_set_node_prefix(const char * nodenames)487 static void _set_node_prefix(const char *nodenames)
488 {
489 	int i;
490 	char *tmp;
491 
492 	xassert(nodenames != NULL);
493 	for (i = 1; nodenames[i] != '\0'; i++) {
494 		if ((nodenames[i-1] == '[')
495 		   || (nodenames[i-1] <= '9'
496 		       && nodenames[i-1] >= '0'))
497 			break;
498 	}
499 
500 	if (i == 1) {
501 		error("In your Node definition in your slurm.conf you "
502 		      "gave a nodelist '%s' without a prefix.  "
503 		      "Please try something like bg%s.", nodenames, nodenames);
504 	}
505 
506 	xfree(conf_ptr->node_prefix);
507 	if (nodenames[i] == '\0')
508 		conf_ptr->node_prefix = xstrdup(nodenames);
509 	else {
510 		tmp = xmalloc(i + 1);
511 		snprintf(tmp, i, "%s", nodenames);
512 		conf_ptr->node_prefix = tmp;
513 		tmp = NULL;
514 	}
515 	debug3("Prefix is %s %s %d", conf_ptr->node_prefix, nodenames, i);
516 }
517 
_parse_frontend(void ** dest,slurm_parser_enum_t type,const char * key,const char * value,const char * line,char ** leftover)518 static int _parse_frontend(void **dest, slurm_parser_enum_t type,
519 			   const char *key, const char *value,
520 			   const char *line, char **leftover)
521 {
522 	s_p_hashtbl_t *tbl, *dflt;
523 	slurm_conf_frontend_t *n;
524 	char *node_state = NULL;
525 	static s_p_options_t _frontend_options[] = {
526 		{"AllowGroups", S_P_STRING},
527 		{"AllowUsers", S_P_STRING},
528 		{"DenyGroups", S_P_STRING},
529 		{"DenyUsers", S_P_STRING},
530 		{"FrontendAddr", S_P_STRING},
531 		{"Port", S_P_UINT16},
532 		{"Reason", S_P_STRING},
533 		{"State", S_P_STRING},
534 		{NULL}
535 	};
536 
537 #ifndef HAVE_FRONT_END
538 	log_var(lvl, "Use of FrontendName in slurm.conf without Slurm being configured/built with the --enable-front-end option");
539 	local_test_config_rc = 1;
540 #endif
541 
542 	tbl = s_p_hashtbl_create(_frontend_options);
543 	s_p_parse_line(tbl, *leftover, leftover);
544 	/* s_p_dump_values(tbl, _frontend_options); */
545 
546 	if (xstrcasecmp(value, "DEFAULT") == 0) {
547 		char *tmp;
548 		if (s_p_get_string(&tmp, "FrontendAddr", tbl)) {
549 			error("FrontendAddr not allowed with "
550 			      "FrontendName=DEFAULT");
551 			xfree(tmp);
552 			s_p_hashtbl_destroy(tbl);
553 			return -1;
554 		}
555 
556 		if (default_frontend_tbl != NULL) {
557 			s_p_hashtbl_merge(tbl, default_frontend_tbl);
558 			s_p_hashtbl_destroy(default_frontend_tbl);
559 		}
560 		default_frontend_tbl = tbl;
561 
562 		return 0;
563 	} else {
564 		n = xmalloc(sizeof(slurm_conf_frontend_t));
565 		dflt = default_frontend_tbl;
566 
567 		n->frontends = xstrdup(value);
568 
569 		(void) s_p_get_string(&n->allow_groups, "AllowGroups", tbl);
570 		(void) s_p_get_string(&n->allow_users,  "AllowUsers", tbl);
571 		(void) s_p_get_string(&n->deny_groups,  "DenyGroups", tbl);
572 		(void) s_p_get_string(&n->deny_users,   "DenyUsers", tbl);
573 		if (n->allow_groups && n->deny_groups) {
574 			log_var(lvl, "FrontEnd options AllowGroups and DenyGroups are incompatible");
575 			local_test_config_rc = 1;
576 		}
577 		if (n->allow_users && n->deny_users) {
578 			log_var(lvl, "FrontEnd options AllowUsers and DenyUsers are incompatible");
579 			local_test_config_rc = 1;
580 		}
581 
582 		if (!s_p_get_string(&n->addresses, "FrontendAddr", tbl))
583 			n->addresses = xstrdup(n->frontends);
584 
585 		if (!s_p_get_uint16(&n->port, "Port", tbl) &&
586 		    !s_p_get_uint16(&n->port, "Port", dflt)) {
587 			/* This gets resolved in slurm_conf_get_port()
588 			 * and slurm_conf_get_addr(). For now just
589 			 * leave with a value of zero */
590 			n->port = 0;
591 		}
592 
593 		if (!s_p_get_string(&n->reason, "Reason", tbl))
594 			s_p_get_string(&n->reason, "Reason", dflt);
595 
596 		if (!s_p_get_string(&node_state, "State", tbl) &&
597 		    !s_p_get_string(&node_state, "State", dflt)) {
598 			n->node_state = NODE_STATE_UNKNOWN;
599 		} else {
600 			n->node_state = state_str2int(node_state,
601 						      (char *) value);
602 			if (n->node_state == NO_VAL16)
603 				n->node_state = NODE_STATE_UNKNOWN;
604 			xfree(node_state);
605 		}
606 
607 		*dest = (void *)n;
608 
609 		s_p_hashtbl_destroy(tbl);
610 		return 1;
611 	}
612 
613 	/* should not get here */
614 }
615 
_parse_nodename(void ** dest,slurm_parser_enum_t type,const char * key,const char * value,const char * line,char ** leftover)616 static int _parse_nodename(void **dest, slurm_parser_enum_t type,
617 			   const char *key, const char *value,
618 			   const char *line, char **leftover)
619 {
620 	s_p_hashtbl_t *tbl, *dflt;
621 	slurm_conf_node_t *n;
622 	static s_p_options_t _nodename_options[] = {
623 		{"BcastAddr", S_P_STRING},
624 		{"Boards", S_P_UINT16},
625 		{"CoreSpecCount", S_P_UINT16},
626 		{"CoresPerSocket", S_P_UINT16},
627 		{"CPUs", S_P_UINT16},
628 		{"CPUSpecList", S_P_STRING},
629 		{"CpuBind", S_P_STRING},
630 		{"Feature", S_P_STRING},
631 		{"Features", S_P_STRING},
632 		{"Gres", S_P_STRING},
633 		{"MemSpecLimit", S_P_UINT64},
634 		{"NodeAddr", S_P_STRING},
635 		{"NodeHostname", S_P_STRING},
636 		{"Port", S_P_STRING},
637 		{"Procs", S_P_UINT16},
638 		{"RealMemory", S_P_UINT64},
639 		{"Reason", S_P_STRING},
640 		{"Sockets", S_P_UINT16},
641 		{"SocketsPerBoard", S_P_UINT16},
642 		{"State", S_P_STRING},
643 		{"ThreadsPerCore", S_P_UINT16},
644 		{"TmpDisk", S_P_UINT32},
645 		{"TRESWeights", S_P_STRING},
646 		{"Weight", S_P_UINT32},
647 		{NULL}
648 	};
649 
650 	tbl = s_p_hashtbl_create(_nodename_options);
651 	s_p_parse_line(tbl, *leftover, leftover);
652 	/* s_p_dump_values(tbl, _nodename_options); */
653 
654 	if (xstrcasecmp(value, "DEFAULT") == 0) {
655 		char *tmp;
656 		if (s_p_get_string(&tmp, "NodeHostname", tbl)) {
657 			error("NodeHostname not allowed with "
658 			      "NodeName=DEFAULT");
659 			xfree(tmp);
660 			s_p_hashtbl_destroy(tbl);
661 			return -1;
662 		}
663 
664 		if (s_p_get_string(&tmp, "BcastAddr", tbl)) {
665 			error("BcastAddr not allowed with NodeName=DEFAULT");
666 			xfree(tmp);
667 			s_p_hashtbl_destroy(tbl);
668 			return -1;
669 		}
670 
671 		if (s_p_get_string(&tmp, "NodeAddr", tbl)) {
672 			error("NodeAddr not allowed with NodeName=DEFAULT");
673 			xfree(tmp);
674 			s_p_hashtbl_destroy(tbl);
675 			return -1;
676 		}
677 
678 		if (default_nodename_tbl != NULL) {
679 			s_p_hashtbl_merge(tbl, default_nodename_tbl);
680 			s_p_hashtbl_destroy(default_nodename_tbl);
681 		}
682 		default_nodename_tbl = tbl;
683 
684 		return 0;
685 	} else {
686 		bool no_cpus    = false;
687 		bool no_boards  = false;
688 		bool no_sockets = false;
689 		bool no_sockets_per_board = false;
690 		uint16_t sockets_per_board = 0;
691 		char *cpu_bind = NULL;
692 
693 		n = xmalloc(sizeof(slurm_conf_node_t));
694 		dflt = default_nodename_tbl;
695 
696 		n->nodenames = xstrdup(value);
697 		if ((slurmdb_setup_cluster_name_dims() > 1)
698 		    && conf_ptr->node_prefix == NULL)
699 			_set_node_prefix(n->nodenames);
700 
701 		if (!s_p_get_string(&n->hostnames, "NodeHostname", tbl))
702 			n->hostnames = xstrdup(n->nodenames);
703 		if (!s_p_get_string(&n->addresses, "NodeAddr", tbl))
704 			n->addresses = xstrdup(n->hostnames);
705 		s_p_get_string(&n->bcast_addresses, "BcastAddr", tbl);
706 
707 		if (!s_p_get_uint16(&n->boards, "Boards", tbl)
708 		    && !s_p_get_uint16(&n->boards, "Boards", dflt)) {
709 			n->boards = 1;
710 			no_boards = true;
711 		}
712 
713 		if (s_p_get_string(&cpu_bind, "CpuBind", tbl) ||
714 		    s_p_get_string(&cpu_bind, "CpuBind", dflt)) {
715 			if (xlate_cpu_bind_str(cpu_bind, &n->cpu_bind) !=
716 			    SLURM_SUCCESS) {
717 				error("NodeNames=%s CpuBind=\'%s\' is invalid, ignored",
718 				      n->nodenames, cpu_bind);
719 				n->cpu_bind = 0;
720 			}
721 			xfree(cpu_bind);
722 		}
723 
724 		if (!s_p_get_uint16(&n->core_spec_cnt, "CoreSpecCount", tbl)
725 		    && !s_p_get_uint16(&n->core_spec_cnt,
726 				       "CoreSpecCount", dflt))
727 			n->core_spec_cnt = 0;
728 
729 
730 		if (!s_p_get_uint16(&n->cores, "CoresPerSocket", tbl)
731 		    && !s_p_get_uint16(&n->cores, "CoresPerSocket", dflt)) {
732 			n->cores = 1;
733 		}
734 
735 		if (!s_p_get_string(&n->cpu_spec_list, "CPUSpecList", tbl))
736 			s_p_get_string(&n->cpu_spec_list, "CPUSpecList", dflt);
737 
738 		if (!s_p_get_string(&n->feature, "Feature",  tbl) &&
739 		    !s_p_get_string(&n->feature, "Features", tbl) &&
740 		    !s_p_get_string(&n->feature, "Feature",  dflt))
741 			s_p_get_string(&n->feature, "Features", dflt);
742 
743 		if (!s_p_get_string(&n->gres, "Gres", tbl))
744 			s_p_get_string(&n->gres, "Gres", dflt);
745 
746 		if (!s_p_get_uint64(&n->mem_spec_limit, "MemSpecLimit", tbl)
747 		    && !s_p_get_uint64(&n->mem_spec_limit,
748 				       "MemSpecLimit", dflt))
749 			n->mem_spec_limit = 0;
750 
751 		if (!s_p_get_string(&n->port_str, "Port", tbl) &&
752 		    !s_p_get_string(&n->port_str, "Port", dflt)) {
753 			/* This gets resolved in slurm_conf_get_port()
754 			 * and slurm_conf_get_addr(). For now just
755 			 * leave with a value of NULL */
756 		}
757 
758 		if (!s_p_get_uint16(&n->cpus, "CPUs",  tbl)  &&
759 		    !s_p_get_uint16(&n->cpus, "CPUs",  dflt) &&
760 		    !s_p_get_uint16(&n->cpus, "Procs", tbl)  &&
761 		    !s_p_get_uint16(&n->cpus, "Procs", dflt)) {
762 			n->cpus = 1;
763 			no_cpus = true;
764 		}
765 
766 		if (!s_p_get_uint64(&n->real_memory, "RealMemory", tbl)
767 		    && !s_p_get_uint64(&n->real_memory, "RealMemory", dflt))
768 			n->real_memory = 1;
769 
770 		if (!s_p_get_string(&n->reason, "Reason", tbl))
771 			s_p_get_string(&n->reason, "Reason", dflt);
772 
773 		if (!s_p_get_uint16(&n->sockets, "Sockets", tbl)
774 		    && !s_p_get_uint16(&n->sockets, "Sockets", dflt)) {
775 			n->sockets = 1;
776 			no_sockets = true;
777 		}
778 
779 		if (!s_p_get_uint16(&sockets_per_board, "SocketsPerBoard", tbl)
780 		    && !s_p_get_uint16(&sockets_per_board, "SocketsPerBoard",
781 				       dflt)) {
782 			sockets_per_board = 1;
783 			no_sockets_per_board = true;
784 		}
785 
786 		if (!s_p_get_string(&n->state, "State", tbl)
787 		    && !s_p_get_string(&n->state, "State", dflt))
788 			n->state = NULL;
789 
790 		if (!s_p_get_uint16(&n->threads, "ThreadsPerCore", tbl)
791 		    && !s_p_get_uint16(&n->threads, "ThreadsPerCore", dflt)) {
792 			n->threads = 1;
793 		}
794 
795 		if (!s_p_get_uint32(&n->tmp_disk, "TmpDisk", tbl)
796 		    && !s_p_get_uint32(&n->tmp_disk, "TmpDisk", dflt))
797 			n->tmp_disk = 0;
798 
799 		if (!s_p_get_string(&n->tres_weights_str, "TRESWeights", tbl) &&
800 		    !s_p_get_string(&n->tres_weights_str, "TRESWeights", dflt))
801 			xfree(n->tres_weights_str);
802 
803 		if (!s_p_get_uint32(&n->weight, "Weight", tbl)
804 		    && !s_p_get_uint32(&n->weight, "Weight", dflt))
805 			n->weight = 1;
806 		else if (n->weight == INFINITE)
807 			n->weight -= 1;
808 
809 		s_p_hashtbl_destroy(tbl);
810 
811 		if (n->cores == 0) {	/* make sure cores is non-zero */
812 			error("NodeNames=%s CoresPerSocket=0 is invalid, "
813 			      "reset to 1", n->nodenames);
814 			n->cores = 1;
815 		}
816 		if (n->threads == 0) {	/* make sure threads is non-zero */
817 			error("NodeNames=%s ThreadsPerCore=0 is invalid, "
818 			      "reset to 1", n->nodenames);
819 			n->threads = 1;
820 		}
821 
822 		if (!no_sockets_per_board && sockets_per_board==0) {
823 			/* make sure sockets_per_boards is non-zero */
824 			error("NodeNames=%s SocketsPerBoards=0 is invalid, "
825 			      "reset to 1", n->nodenames);
826 			sockets_per_board = 1;
827 		}
828 
829 		if (no_boards) {
830 			/* This case is exactly like if was without boards,
831 			 * Except SocketsPerBoard=# can be used,
832 			 * But it can't be used with Sockets=# */
833 			n->boards = 1;
834 			if (!no_sockets_per_board) {
835 				if (!no_sockets)
836 					error("NodeNames=%s Sockets=# and "
837 					      "SocketsPerBoard=# is invalid"
838 					      ", using SocketsPerBoard",
839 					      n->nodenames);
840 				n->sockets = sockets_per_board;
841 			} else if (!no_cpus && no_sockets) {
842 				/* infer missing Sockets= */
843 				n->sockets = n->cpus / (n->cores * n->threads);
844 			}
845 
846 			if (n->sockets == 0) { /* make sure sockets != 0 */
847 				error("NodeNames=%s Sockets=0 is invalid, "
848 				      "reset to 1", n->nodenames);
849 				n->sockets = 1;
850 			}
851 		} else {
852 			/* In this case Boards=# is used.
853 			 * CPUs=# or Procs=# are ignored.
854 			 */
855 			if (n->boards == 0) {
856 				/* make sure boards is non-zero */
857 				error("NodeNames=%s Boards=0 is "
858 				      "invalid, reset to 1",
859 				      n->nodenames);
860 				n->boards = 1;
861 			}
862 
863 			if (!no_sockets_per_board) {
864 				if (!no_sockets)
865 					error("NodeNames=%s Sockets=# and "
866 					      "SocketsPerBoard=# is invalid, "
867 					      "using SocketsPerBoard",
868 					      n->nodenames);
869 
870 				n->sockets = n->boards * sockets_per_board;
871 			} else if (!no_sockets) {
872 				error("NodeNames=%s Sockets=# with Boards=# is"
873 				      " not recommended, assume "
874 				      "SocketsPerBoard was meant",
875 				      n->nodenames);
876 				if (n->sockets == 0) {
877 					/* make sure sockets is non-zero */
878 					error("NodeNames=%s Sockets=0 is "
879 					      "invalid, reset to 1",
880 					      n->nodenames);
881 					n->sockets = 1;
882 				}
883 				n->sockets = n->boards * n->sockets;
884 			} else {
885 				n->sockets = n->boards;
886 			}
887 		}
888 
889 		if (no_cpus) {		/* infer missing CPUs= */
890 			n->cpus = n->sockets * n->cores * n->threads;
891 		}
892 
893 		/* Node boards are factored into sockets */
894 		if ((n->cpus != n->sockets) &&
895 		    (n->cpus != n->sockets * n->cores) &&
896 		    (n->cpus != n->sockets * n->cores * n->threads)) {
897 			error("NodeNames=%s CPUs=%d match no Sockets, Sockets*CoresPerSocket or Sockets*CoresPerSocket*ThreadsPerCore. Resetting CPUs.",
898 			      n->nodenames, n->cpus);
899 			n->cpus = n->sockets * n->cores * n->threads;
900 		}
901 
902 		if (n->core_spec_cnt >= (n->sockets * n->cores)) {
903 			error("NodeNames=%s CoreSpecCount=%u is invalid, "
904 			      "reset to 1", n->nodenames, n->core_spec_cnt);
905 			n->core_spec_cnt = 1;
906 		}
907 
908 		if ((n->core_spec_cnt > 0) && n->cpu_spec_list) {
909 			error("NodeNames=%s CoreSpecCount=%u is invalid "
910 			      "with CPUSpecList, reset to 0",
911 			      n->nodenames, n->core_spec_cnt);
912 			n->core_spec_cnt = 0;
913 		}
914 
915 		if (n->mem_spec_limit >= n->real_memory) {
916 			error("NodeNames=%s MemSpecLimit=%"
917 			      ""PRIu64" is invalid, reset to 0",
918 			      n->nodenames, n->mem_spec_limit);
919 			n->mem_spec_limit = 0;
920 		}
921 
922 		*dest = (void *)n;
923 
924 		return 1;
925 	}
926 
927 	/* should not get here */
928 }
929 
930 /* Destroy a front_end record built by slurm_conf_frontend_array() */
destroy_frontend(void * ptr)931 extern void destroy_frontend(void *ptr)
932 {
933 	slurm_conf_frontend_t *n = (slurm_conf_frontend_t *) ptr;
934 	xfree(n->addresses);
935 	xfree(n->allow_groups);
936 	xfree(n->allow_users);
937 	xfree(n->deny_groups);
938 	xfree(n->deny_users);
939 	xfree(n->frontends);
940 	xfree(n->reason);
941 	xfree(ptr);
942 }
943 
944 /*
945  * _list_find_frontend - find an entry in the front_end list, see list.h for
946  *	documentation
947  * IN key - is frontend name
948  * RET 1 if found, 0 otherwise
949  */
950 #ifdef HAVE_FRONT_END
_list_find_frontend(void * front_end_entry,void * key)951 static int _list_find_frontend(void *front_end_entry, void *key)
952 {
953 	slurm_conf_frontend_t *front_end_ptr =
954 		(slurm_conf_frontend_t *) front_end_entry;
955 
956 	if (xstrcmp(front_end_ptr->frontends, (char *) key) == 0)
957 		return 1;
958 	return 0;
959 }
960 #endif
961 
_destroy_nodename(void * ptr)962 static void _destroy_nodename(void *ptr)
963 {
964 	slurm_conf_node_t *n = (slurm_conf_node_t *)ptr;
965 
966 	xfree(n->addresses);
967 	xfree(n->cpu_spec_list);
968 	xfree(n->feature);
969 	xfree(n->hostnames);
970 	xfree(n->gres);
971 	xfree(n->nodenames);
972 	xfree(n->port_str);
973 	xfree(n->reason);
974 	xfree(n->state);
975 	xfree(n->tres_weights_str);
976 	xfree(ptr);
977 }
978 
979 /* _parse_srun_ports()
980  *
981  * Parse the srun port range specified like min-max.
982  *
983  */
984 static uint16_t *
_parse_srun_ports(const char * str)985 _parse_srun_ports(const char *str)
986 {
987 	char *min;
988 	char *max;
989 	char *dash;
990 	char *p;
991 	uint16_t *v;
992 
993 	p = xstrdup(str);
994 
995 	min = p;
996 	dash = strchr(p, '-');
997 	if (dash == NULL) {
998 		xfree(p);
999 		return NULL;
1000 	}
1001 
1002 	*dash = 0;
1003 	max = dash + 1;
1004 
1005 	v = xcalloc(2, sizeof(uint16_t));
1006 
1007 	if (parse_uint16(min, &v[0]))
1008 		goto hosed;
1009 	if (parse_uint16(max, &v[1]))
1010 		goto hosed;
1011 	if (v[1] <= v[0])
1012 		goto hosed;
1013 
1014 	xfree(p);
1015 
1016 	return v;
1017 hosed:
1018 	xfree(v);
1019 	xfree(p);
1020 
1021 	return NULL;
1022 }
1023 
slurm_conf_frontend_array(slurm_conf_frontend_t ** ptr_array[])1024 int slurm_conf_frontend_array(slurm_conf_frontend_t **ptr_array[])
1025 {
1026 	int count = 0;
1027 	slurm_conf_frontend_t **ptr;
1028 
1029 	if (s_p_get_array((void ***)&ptr, &count, "FrontendName",
1030 			  conf_hashtbl)) {
1031 		*ptr_array = ptr;
1032 		return count;
1033 	} else {
1034 #ifdef HAVE_FRONT_END
1035 		/* No FrontendName in slurm.conf. Take the NodeAddr and
1036 		 * NodeHostName from the first node's record and use that to
1037 		 * build an equivalent structure to that constructed when
1038 		 * FrontendName is configured. This is intended for backward
1039 		 * compatibility with Slurm version 2.2. */
1040 		static slurm_conf_frontend_t local_front_end;
1041 		static slurm_conf_frontend_t *local_front_end_array[2] =
1042 			{NULL, NULL};
1043 		static char addresses[1024], hostnames[1024];
1044 
1045 		if (local_front_end_array[0] == NULL) {
1046 			slurm_conf_node_t **node_ptr;
1047 			int node_count = 0;
1048 			if (!s_p_get_array((void ***)&node_ptr, &node_count,
1049 					   "NodeName", conf_hashtbl) ||
1050 			    (node_count == 0)) {
1051 				log_var(lvl, "No front end nodes configured");
1052 				local_test_config_rc = 1;
1053 			}
1054 			strlcpy(addresses, node_ptr[0]->addresses,
1055 				sizeof(addresses));
1056 			strlcpy(hostnames, node_ptr[0]->hostnames,
1057 				sizeof(hostnames));
1058 			local_front_end.addresses = addresses;
1059 			local_front_end.frontends = hostnames;
1060 			if (node_ptr[0]->port_str) {
1061 				local_front_end.port = atoi(node_ptr[0]->
1062 							    port_str);
1063 			}
1064 			local_front_end.reason = NULL;
1065 			local_front_end.node_state = NODE_STATE_UNKNOWN;
1066 			local_front_end_array[0] = &local_front_end;
1067 		}
1068 		*ptr_array = local_front_end_array;
1069 		return 1;
1070 #else
1071 		*ptr_array = NULL;
1072 		return 0;
1073 #endif
1074 	}
1075 }
1076 
1077 
slurm_conf_nodename_array(slurm_conf_node_t ** ptr_array[])1078 int slurm_conf_nodename_array(slurm_conf_node_t **ptr_array[])
1079 {
1080 	int count = 0;
1081 	slurm_conf_node_t **ptr;
1082 
1083 	if (s_p_get_array((void ***)&ptr, &count, "NodeName", conf_hashtbl)) {
1084 		*ptr_array = ptr;
1085 		return count;
1086 	} else {
1087 		*ptr_array = NULL;
1088 		return 0;
1089 	}
1090 }
1091 
1092 /* Copy list of job_defaults_t elements */
job_defaults_copy(List in_list)1093 extern List job_defaults_copy(List in_list)
1094 {
1095 	List out_list = NULL;
1096 	job_defaults_t *in_default, *out_default;
1097 	ListIterator iter;
1098 
1099 	if (!in_list)
1100 		return out_list;
1101 
1102 	out_list = list_create(xfree_ptr);
1103 	iter = list_iterator_create(in_list);
1104 	while ((in_default = list_next(iter))) {
1105 		out_default = xmalloc(sizeof(job_defaults_t));
1106 		memcpy(out_default, in_default, sizeof(job_defaults_t));
1107 		list_append(out_list, out_default);
1108 	}
1109 	list_iterator_destroy(iter);
1110 
1111 	return out_list;
1112 }
1113 
_job_def_name(uint16_t type)1114 static char *_job_def_name(uint16_t type)
1115 {
1116 	static char name[32];
1117 
1118 	switch (type) {
1119 	case JOB_DEF_CPU_PER_GPU:
1120 		return "DefCpuPerGPU";
1121 	case JOB_DEF_MEM_PER_GPU:
1122 		return "DefMemPerGPU";
1123 	}
1124 	snprintf(name, sizeof(name), "Unknown(%u)", type);
1125 	return name;
1126 }
1127 
_job_def_type(char * type)1128 static uint16_t _job_def_type(char *type)
1129 {
1130 	if (!xstrcasecmp(type, "DefCpuPerGPU"))
1131 		return JOB_DEF_CPU_PER_GPU;
1132 	if (!xstrcasecmp(type, "DefMemPerGPU"))
1133 		return JOB_DEF_MEM_PER_GPU;
1134 	return NO_VAL16;
1135 }
1136 
1137 /*
1138  * Translate string of job_defaults_t elements into a List.
1139  * in_str IN - comma separated key=value pairs
1140  * out_list OUT - equivalent list of key=value pairs
1141  * Returns SLURM_SUCCESS or an error code
1142  */
job_defaults_list(char * in_str,List * out_list)1143 extern int job_defaults_list(char *in_str, List *out_list)
1144 {
1145 	int rc = SLURM_SUCCESS;
1146 	List tmp_list;
1147 	char *end_ptr = NULL, *tmp_str, *save_ptr = NULL, *sep, *tok;
1148 	uint16_t type;
1149 	long long int value;
1150 	job_defaults_t *out_default;
1151 
1152 	*out_list = NULL;
1153 	if (!in_str || (in_str[0] == '\0'))
1154 		return rc;
1155 
1156 	tmp_list = list_create(xfree_ptr);
1157 	tmp_str = xstrdup(in_str);
1158 	tok = strtok_r(tmp_str, ",", &save_ptr);
1159 	while (tok) {
1160 		sep = strchr(tok, '=');
1161 		if (!sep) {
1162 			rc = EINVAL;
1163 			break;
1164 		}
1165 		sep[0] = '\0';
1166 		sep++;
1167 		type = _job_def_type(tok);
1168 		if (type == NO_VAL16) {
1169 			rc = EINVAL;
1170 			break;
1171 		}
1172 		value = strtoll(sep, &end_ptr, 10);
1173 		if (!end_ptr || (end_ptr[0] != '\0') ||
1174 		    (value < 0) || (value == LLONG_MAX)) {
1175 			rc = EINVAL;
1176 			break;
1177 		}
1178 		out_default = xmalloc(sizeof(job_defaults_t));
1179 		out_default->type = type;
1180 		out_default->value = (uint64_t) value;
1181 		list_append(tmp_list, out_default);
1182 		tok = strtok_r(NULL, ",", &save_ptr);
1183 	}
1184 	xfree(tmp_str);
1185 	if (rc != SLURM_SUCCESS)
1186 		FREE_NULL_LIST(tmp_list);
1187 	else
1188 		*out_list = tmp_list;
1189 	return rc;
1190 }
1191 
1192 /*
1193  * Translate list of job_defaults_t elements into a string.
1194  * Return value must be released using xfree()
1195  */
job_defaults_str(List in_list)1196 extern char *job_defaults_str(List in_list)
1197 {
1198 	job_defaults_t *in_default;
1199 	ListIterator iter;
1200 	char *out_str = NULL, *sep = "";
1201 
1202 	if (!in_list)
1203 		return out_str;
1204 
1205 	iter = list_iterator_create(in_list);
1206 	while ((in_default = list_next(iter))) {
1207 		xstrfmtcat(out_str, "%s%s=%"PRIu64, sep,
1208 			   _job_def_name(in_default->type), in_default->value);
1209 		sep = ",";
1210 	}
1211 	list_iterator_destroy(iter);
1212 
1213 	return out_str;
1214 
1215 }
1216 
1217 /* Pack a job_defaults_t element. Used by slurm_pack_list() */
job_defaults_pack(void * in,uint16_t protocol_version,Buf buffer)1218 extern void job_defaults_pack(void *in, uint16_t protocol_version, Buf buffer)
1219 {
1220 	job_defaults_t *object = (job_defaults_t *)in;
1221 
1222 	if (!object) {
1223 		pack16(0, buffer);
1224 		pack64(0, buffer);
1225 		return;
1226 	}
1227 
1228 	pack16(object->type, buffer);
1229 	pack64(object->value, buffer);
1230 }
1231 
1232 /* Unpack a job_defaults_t element. Used by slurm_unpack_list() */
job_defaults_unpack(void ** out,uint16_t protocol_version,Buf buffer)1233 extern int job_defaults_unpack(void **out, uint16_t protocol_version,
1234 			       Buf buffer)
1235 {
1236 	job_defaults_t *object = xmalloc(sizeof(job_defaults_t));
1237 
1238 	safe_unpack16(&object->type, buffer);
1239 	safe_unpack64(&object->value, buffer);
1240 	*out = object;
1241 	return SLURM_SUCCESS;
1242 
1243 unpack_error:
1244 	xfree(object);
1245 	*out = NULL;
1246 	return SLURM_ERROR;
1247 }
1248 
_parse_partitionname(void ** dest,slurm_parser_enum_t type,const char * key,const char * value,const char * line,char ** leftover)1249 static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
1250 			       const char *key, const char *value,
1251 			       const char *line, char **leftover)
1252 {
1253 	s_p_hashtbl_t *tbl, *dflt;
1254 	uint64_t def_cpu_per_gpu = 0, def_mem_per_gpu = 0;
1255 	job_defaults_t *job_defaults;
1256 	char *cpu_bind = NULL, *tmp = NULL;
1257 	uint16_t tmp_16 = 0;
1258 	static s_p_options_t _partition_options[] = {
1259 		{"AllocNodes", S_P_STRING},
1260 		{"AllowAccounts",S_P_STRING},
1261 		{"AllowGroups", S_P_STRING},
1262 		{"AllowQos", S_P_STRING},
1263 		{"Alternate", S_P_STRING},
1264 		{"CpuBind", S_P_STRING},
1265 		{"DefCPUPerGPU" , S_P_UINT64},
1266 		{"DefMemPerCPU", S_P_UINT64},
1267 		{"DefMemPerGPU" , S_P_UINT64},
1268 		{"DefMemPerNode", S_P_UINT64},
1269 		{"Default", S_P_BOOLEAN}, /* YES or NO */
1270 		{"DefaultTime", S_P_STRING},
1271 		{"DenyAccounts", S_P_STRING},
1272 		{"DenyQos", S_P_STRING},
1273 		{"DisableRootJobs", S_P_BOOLEAN}, /* YES or NO */
1274 		{"ExclusiveUser", S_P_BOOLEAN}, /* YES or NO */
1275 		{"GraceTime", S_P_UINT32},
1276 		{"Hidden", S_P_BOOLEAN}, /* YES or NO */
1277 		{"LLN", S_P_BOOLEAN}, /* YES or NO */
1278 		{"MaxCPUsPerNode", S_P_UINT32},
1279 		{"MaxMemPerCPU", S_P_UINT64},
1280 		{"MaxMemPerNode", S_P_UINT64},
1281 		{"MaxTime", S_P_STRING},
1282 		{"MaxNodes", S_P_UINT32}, /* INFINITE or a number */
1283 		{"MinNodes", S_P_UINT32},
1284 		{"Nodes", S_P_STRING},
1285 		{"OverSubscribe", S_P_STRING}, /* YES, NO, or FORCE */
1286 		{"OverTimeLimit", S_P_STRING},
1287 		{"PreemptMode", S_P_STRING},
1288 		{"Priority", S_P_UINT16},
1289 		{"PriorityJobFactor", S_P_UINT16},
1290 		{"PriorityTier", S_P_UINT16},
1291 		{"QOS", S_P_STRING},
1292 		{"RootOnly", S_P_BOOLEAN}, /* YES or NO */
1293 		{"ReqResv", S_P_BOOLEAN}, /* YES or NO */
1294 		{"SelectTypeParameters", S_P_STRING},
1295 		{"Shared", S_P_STRING}, /* YES, NO, or FORCE */
1296 		{"State", S_P_STRING}, /* UP, DOWN, INACTIVE or DRAIN */
1297 		{"TRESBillingWeights", S_P_STRING},
1298 		{NULL}
1299 	};
1300 
1301 
1302 	tbl = s_p_hashtbl_create(_partition_options);
1303 	s_p_parse_line(tbl, *leftover, leftover);
1304 	/* s_p_dump_values(tbl, _partition_options); */
1305 
1306 	if (xstrcasecmp(value, "DEFAULT") == 0) {
1307 		if (default_partition_tbl != NULL) {
1308 			s_p_hashtbl_merge(tbl, default_partition_tbl);
1309 			s_p_hashtbl_destroy(default_partition_tbl);
1310 		}
1311 		default_partition_tbl = tbl;
1312 
1313 		return 0;
1314 	} else {
1315 		slurm_conf_partition_t *p = xmalloc(sizeof(*p));
1316 		dflt = default_partition_tbl;
1317 
1318 		p->name = xstrdup(value);
1319 
1320 		if (!s_p_get_string(&p->allow_accounts, "AllowAccounts",tbl))
1321 			s_p_get_string(&p->allow_accounts,
1322 				       "AllowAccounts", dflt);
1323 		/* lower case account names */
1324 		if (p->allow_accounts)
1325 			xstrtolower(p->allow_accounts);
1326 		if (p->allow_accounts &&
1327 		    (xstrcasecmp(p->allow_accounts, "ALL") == 0))
1328 			xfree(p->allow_accounts);
1329 
1330 		if (!s_p_get_string(&p->allow_groups, "AllowGroups", tbl))
1331 			s_p_get_string(&p->allow_groups, "AllowGroups", dflt);
1332 		if (p->allow_groups &&
1333 		    (xstrcasecmp(p->allow_groups, "ALL") == 0))
1334 			xfree(p->allow_groups);
1335 
1336 		if (!s_p_get_string(&p->allow_qos, "AllowQos", tbl))
1337 			s_p_get_string(&p->allow_qos, "AllowQos", dflt);
1338 		/* lower case qos names */
1339 		if (p->allow_qos)
1340 			xstrtolower(p->allow_qos);
1341 		if (p->allow_qos && (xstrcasecmp(p->allow_qos, "ALL") == 0))
1342 			xfree(p->allow_qos);
1343 
1344 		if (!s_p_get_string(&p->deny_accounts, "DenyAccounts", tbl))
1345 			s_p_get_string(&p->deny_accounts,
1346 				       "DenyAccounts", dflt);
1347 		if (p->allow_accounts && p->deny_accounts) {
1348 			error("Both AllowAccounts and DenyAccounts are "
1349 			      "defined, DenyAccounts will be ignored");
1350 		}
1351 		/* lower case account names */
1352 		else if(p->deny_accounts)
1353 			xstrtolower(p->deny_accounts);
1354 
1355 		if (!s_p_get_string(&p->deny_qos, "DenyQos", tbl))
1356 			s_p_get_string(&p->deny_qos, "DenyQos", dflt);
1357 		if (p->allow_qos && p->deny_qos) {
1358 			error("Both AllowQos and DenyQos are defined, "
1359 			      "DenyQos will be ignored");
1360 		}
1361 		/* lower case qos names */
1362 		else if(p->deny_qos)
1363 			xstrtolower(p->deny_qos);
1364 
1365 		if (!s_p_get_string(&p->allow_alloc_nodes,
1366 				    "AllocNodes", tbl)) {
1367 			s_p_get_string(&p->allow_alloc_nodes, "AllocNodes",
1368 				       dflt);
1369 			if (p->allow_alloc_nodes &&
1370 			    (xstrcasecmp(p->allow_alloc_nodes, "ALL") == 0))
1371 				xfree(p->allow_alloc_nodes);
1372 		}
1373 
1374 		if (!s_p_get_string(&p->alternate, "Alternate", tbl))
1375 			s_p_get_string(&p->alternate, "Alternate", dflt);
1376 
1377 		if (s_p_get_string(&cpu_bind, "CpuBind", tbl) ||
1378 		    s_p_get_string(&cpu_bind, "CpuBind", dflt)) {
1379 			if (xlate_cpu_bind_str(cpu_bind, &p->cpu_bind) !=
1380 			    SLURM_SUCCESS) {
1381 				error("Partition=%s CpuBind=\'%s\' is invalid, ignored",
1382 				      p->name, cpu_bind);
1383 				p->cpu_bind = 0;
1384 			}
1385 			xfree(cpu_bind);
1386 		}
1387 
1388 		if (!s_p_get_string(&p->billing_weights_str,
1389 				    "TRESBillingWeights", tbl) &&
1390 		    !s_p_get_string(&p->billing_weights_str,
1391 				    "TRESBillingWeights", dflt))
1392 			xfree(p->billing_weights_str);
1393 
1394 		if (!s_p_get_boolean(&p->default_flag, "Default", tbl)
1395 		    && !s_p_get_boolean(&p->default_flag, "Default", dflt))
1396 			p->default_flag = false;
1397 
1398 		if (!s_p_get_uint32(&p->max_cpus_per_node, "MaxCPUsPerNode",
1399 				    tbl) &&
1400 		    !s_p_get_uint32(&p->max_cpus_per_node, "MaxCPUsPerNode",
1401 				    dflt))
1402 			p->max_cpus_per_node = INFINITE;
1403 
1404 
1405 		if (s_p_get_uint64(&def_cpu_per_gpu, "DefCPUPerGPU", tbl) ||
1406 		    s_p_get_uint64(&def_cpu_per_gpu, "DefCPUPerGPU", dflt)) {
1407 			job_defaults = xmalloc(sizeof(job_defaults_t));
1408 			job_defaults->type  = JOB_DEF_CPU_PER_GPU;
1409 			job_defaults->value = def_cpu_per_gpu;
1410 			if (!p->job_defaults_list) {
1411 				p->job_defaults_list = list_create(xfree_ptr);
1412 			}
1413 			list_append(p->job_defaults_list, job_defaults);
1414 		}
1415 		if (s_p_get_uint64(&def_mem_per_gpu, "DefMemPerGPU", tbl) ||
1416 		    s_p_get_uint64(&def_mem_per_gpu, "DefMemPerGPU", dflt)) {
1417 			job_defaults = xmalloc(sizeof(job_defaults_t));
1418 			job_defaults->type  = JOB_DEF_MEM_PER_GPU;
1419 			job_defaults->value = def_mem_per_gpu;
1420 			if (!p->job_defaults_list) {
1421 				p->job_defaults_list = list_create(xfree_ptr);
1422 			}
1423 			list_append(p->job_defaults_list, job_defaults);
1424 		}
1425 
1426 		if (!s_p_get_uint64(&p->def_mem_per_cpu, "DefMemPerNode",
1427 				    tbl) &&
1428 		    !s_p_get_uint64(&p->def_mem_per_cpu, "DefMemPerNode",
1429 				    dflt)) {
1430 			if (s_p_get_uint64(&p->def_mem_per_cpu,
1431 					   "DefMemPerCPU", tbl) ||
1432 			    s_p_get_uint64(&p->def_mem_per_cpu,
1433 					   "DefMemPerCPU", dflt)) {
1434 				p->def_mem_per_cpu |= MEM_PER_CPU;
1435 			} else {
1436 				p->def_mem_per_cpu = 0;
1437 			}
1438 		}
1439 
1440 		if (!s_p_get_uint64(&p->max_mem_per_cpu, "MaxMemPerNode",
1441 				    tbl) &&
1442 		    !s_p_get_uint64(&p->max_mem_per_cpu, "MaxMemPerNode",
1443 				    dflt)) {
1444 			if (s_p_get_uint64(&p->max_mem_per_cpu,
1445 					   "MaxMemPerCPU", tbl) ||
1446 			    s_p_get_uint64(&p->max_mem_per_cpu,
1447 					   "MaxMemPerCPU", dflt)) {
1448 				p->max_mem_per_cpu |= MEM_PER_CPU;
1449 			} else {
1450 				p->max_mem_per_cpu = 0;
1451 			}
1452 		}
1453 
1454 		if (!s_p_get_boolean((bool *)&p->disable_root_jobs,
1455 				     "DisableRootJobs", tbl))
1456 			p->disable_root_jobs = NO_VAL16;
1457 
1458 		if (!s_p_get_boolean((bool *)&p->exclusive_user,
1459 				     "ExclusiveUser", tbl))
1460 			p->exclusive_user = 0;
1461 
1462 		if (!s_p_get_boolean(&p->hidden_flag, "Hidden", tbl) &&
1463 		    !s_p_get_boolean(&p->hidden_flag, "Hidden", dflt))
1464 			p->hidden_flag = false;
1465 
1466 		if (!s_p_get_string(&tmp, "MaxTime", tbl) &&
1467 		    !s_p_get_string(&tmp, "MaxTime", dflt))
1468 			p->max_time = INFINITE;
1469 		else {
1470 			int max_time = time_str2mins(tmp);
1471 			if ((max_time < 0) && (max_time != INFINITE)) {
1472 				error("Bad value \"%s\" for MaxTime", tmp);
1473 				_destroy_partitionname(p);
1474 				s_p_hashtbl_destroy(tbl);
1475 				xfree(tmp);
1476 				return -1;
1477 			}
1478 			p->max_time = max_time;
1479 			xfree(tmp);
1480 		}
1481 
1482 		if (!s_p_get_uint32(&p->grace_time, "GraceTime", tbl) &&
1483 		    !s_p_get_uint32(&p->grace_time, "GraceTime", dflt))
1484 			p->grace_time = 0;
1485 
1486 		if (!s_p_get_string(&tmp, "DefaultTime", tbl) &&
1487 		    !s_p_get_string(&tmp, "DefaultTime", dflt))
1488 			p->default_time = NO_VAL;
1489 		else {
1490 			int default_time = time_str2mins(tmp);
1491 			if ((default_time < 0) && (default_time != INFINITE)) {
1492 				error("Bad value \"%s\" for DefaultTime", tmp);
1493 				_destroy_partitionname(p);
1494 				s_p_hashtbl_destroy(tbl);
1495 				xfree(tmp);
1496 				return -1;
1497 			}
1498 			p->default_time = default_time;
1499 			xfree(tmp);
1500 		}
1501 
1502 		if (!s_p_get_uint32(&p->max_nodes, "MaxNodes", tbl)
1503 		    && !s_p_get_uint32(&p->max_nodes, "MaxNodes", dflt))
1504 			p->max_nodes = INFINITE;
1505 
1506 		if (!s_p_get_uint32(&p->min_nodes, "MinNodes", tbl)
1507 		    && !s_p_get_uint32(&p->min_nodes, "MinNodes", dflt))
1508 			p->min_nodes = 0;
1509 
1510 		if (!s_p_get_string(&p->nodes, "Nodes", tbl)
1511 		    && !s_p_get_string(&p->nodes, "Nodes", dflt))
1512 			p->nodes = NULL;
1513 		else {
1514 			int i;
1515 			for (i=0; p->nodes[i]; i++) {
1516 				if (isspace((int)p->nodes[i]))
1517 					p->nodes[i] = ',';
1518 			}
1519 		}
1520 
1521 		if (!s_p_get_boolean(&p->root_only_flag, "RootOnly", tbl)
1522 		    && !s_p_get_boolean(&p->root_only_flag, "RootOnly", dflt))
1523 			p->root_only_flag = false;
1524 
1525 		if (!s_p_get_boolean(&p->req_resv_flag, "ReqResv", tbl)
1526 		    && !s_p_get_boolean(&p->req_resv_flag, "ReqResv", dflt))
1527 			p->req_resv_flag = false;
1528 
1529 		if (!s_p_get_boolean(&p->lln_flag, "LLN", tbl) &&
1530 		    !s_p_get_boolean(&p->lln_flag, "LLN", dflt))
1531 			p->lln_flag = false;
1532 
1533 		if (s_p_get_string(&tmp, "OverTimeLimit", tbl) ||
1534 		    s_p_get_string(&tmp, "OverTimeLimit", dflt)) {
1535 			if (!strcasecmp(tmp, "INFINITE") ||
1536 			    !strcasecmp(tmp, "UNLIMITED")) {
1537 				p->over_time_limit = INFINITE16;
1538 			} else {
1539 				int i = strtol(tmp, (char **) NULL, 10);
1540 				if (i < 0)
1541 					error("Ignoring bad OverTimeLimit value: %s",
1542 					      tmp);
1543 				else if (i > 0xfffe)
1544 					p->over_time_limit = INFINITE16;
1545 				else
1546 					p->over_time_limit = i;
1547 			}
1548 			xfree(tmp);
1549 		} else
1550 			p->over_time_limit = NO_VAL16;
1551 
1552 		if (s_p_get_string(&tmp, "PreemptMode", tbl) ||
1553 		    s_p_get_string(&tmp, "PreemptMode", dflt)) {
1554 			p->preempt_mode = preempt_mode_num(tmp);
1555 			if (p->preempt_mode == NO_VAL16) {
1556 				error("Bad value \"%s\" for PreemptMode", tmp);
1557 				xfree(tmp);
1558 				return -1;
1559 			}
1560 			xfree(tmp);
1561 		} else
1562 			p->preempt_mode = NO_VAL16;
1563 
1564 		if (!s_p_get_uint16(&p->priority_job_factor,
1565 				    "PriorityJobFactor", tbl) &&
1566 		    !s_p_get_uint16(&p->priority_job_factor,
1567 				    "PriorityJobFactor", dflt)) {
1568 			p->priority_job_factor = 1;
1569 		}
1570 
1571 		if (!s_p_get_uint16(&p->priority_tier, "PriorityTier", tbl) &&
1572 		    !s_p_get_uint16(&p->priority_tier, "PriorityTier", dflt)) {
1573 			p->priority_tier = 1;
1574 		}
1575 		if (s_p_get_uint16(&tmp_16, "Priority", tbl) ||
1576 		    s_p_get_uint16(&tmp_16, "Priority", dflt)) {
1577 			p->priority_job_factor = tmp_16;
1578 			p->priority_tier = tmp_16;
1579 		}
1580 
1581 		if (!s_p_get_string(&p->qos_char, "QOS", tbl)
1582 		    && !s_p_get_string(&p->qos_char, "QOS", dflt))
1583 			p->qos_char = NULL;
1584 
1585 		if (s_p_get_string(&tmp, "SelectTypeParameters", tbl)) {
1586 			if (xstrncasecmp(tmp, "CR_Core_Memory", 14) == 0)
1587 				p->cr_type = CR_CORE | CR_MEMORY;
1588 			else if (xstrncasecmp(tmp, "CR_Core", 7) == 0)
1589 				p->cr_type = CR_CORE;
1590 			else if (xstrncasecmp(tmp, "CR_Socket_Memory", 16) == 0)
1591 				p->cr_type = CR_SOCKET | CR_MEMORY;
1592 			else if (xstrncasecmp(tmp, "CR_Socket", 9) == 0)
1593 				p->cr_type = CR_SOCKET;
1594 			else {
1595 				error("Bad value for SelectTypeParameters: %s",
1596 				      tmp);
1597 				_destroy_partitionname(p);
1598 				s_p_hashtbl_destroy(tbl);
1599 				xfree(tmp);
1600 				return -1;
1601 			}
1602 			xfree(tmp);
1603 		} else
1604 			p->cr_type = 0;
1605 
1606 		if (s_p_get_string(&tmp, "OverSubscribe", tbl) ||
1607 		    s_p_get_string(&tmp, "OverSubscribe", dflt) ||
1608 		    s_p_get_string(&tmp, "Shared", tbl) ||
1609 		    s_p_get_string(&tmp, "Shared", dflt)) {
1610 			if (xstrcasecmp(tmp, "NO") == 0)
1611 				p->max_share = 1;
1612 			else if (xstrcasecmp(tmp, "EXCLUSIVE") == 0)
1613 				p->max_share = 0;
1614 			else if (xstrncasecmp(tmp, "YES:", 4) == 0) {
1615 				int i = strtol(&tmp[4], (char **) NULL, 10);
1616 				if (i <= 1) {
1617 					error("Ignoring bad OverSubscribe value: %s",
1618 					      tmp);
1619 					p->max_share = 1; /* Shared=NO */
1620 				} else
1621 					p->max_share = i;
1622 			} else if (xstrcasecmp(tmp, "YES") == 0)
1623 				p->max_share = 4;
1624 			else if (xstrncasecmp(tmp, "FORCE:", 6) == 0) {
1625 				int i = strtol(&tmp[6], (char **) NULL, 10);
1626 				if (i < 1) {
1627 					error("Ignoring bad OverSubscribe value: %s",
1628 					      tmp);
1629 					p->max_share = 1; /* Shared=NO */
1630 				} else
1631 					p->max_share = i | SHARED_FORCE;
1632 			} else if (xstrcasecmp(tmp, "FORCE") == 0)
1633 				p->max_share = 4 | SHARED_FORCE;
1634 			else {
1635 				error("Bad value \"%s\" for Shared", tmp);
1636 				_destroy_partitionname(p);
1637 				s_p_hashtbl_destroy(tbl);
1638 				xfree(tmp);
1639 				return -1;
1640 			}
1641 			xfree(tmp);
1642 		} else
1643 			p->max_share = 1;
1644 
1645 		if (s_p_get_string(&tmp, "State", tbl) ||
1646 		    s_p_get_string(&tmp, "State", dflt)) {
1647 			if (xstrncasecmp(tmp, "DOWN", 4) == 0)
1648 				p->state_up = PARTITION_DOWN;
1649 			else if (xstrncasecmp(tmp, "UP", 2) == 0)
1650 				p->state_up = PARTITION_UP;
1651 			else if (xstrncasecmp(tmp, "DRAIN", 5) == 0)
1652 				p->state_up = PARTITION_DRAIN;
1653 			else if (xstrncasecmp(tmp, "INACTIVE", 8) == 0)
1654 				 p->state_up = PARTITION_INACTIVE;
1655 			else {
1656 				error("Bad value \"%s\" for State", tmp);
1657 				_destroy_partitionname(p);
1658 				s_p_hashtbl_destroy(tbl);
1659 				xfree(tmp);
1660 				return -1;
1661 			}
1662 			xfree(tmp);
1663 		} else
1664 			p->state_up = PARTITION_UP;
1665 
1666 		s_p_hashtbl_destroy(tbl);
1667 
1668 		*dest = (void *)p;
1669 
1670 		return 1;
1671 	}
1672 
1673 	/* should not get here */
1674 }
1675 
_destroy_partitionname(void * ptr)1676 static void _destroy_partitionname(void *ptr)
1677 {
1678 	slurm_conf_partition_t *p = (slurm_conf_partition_t *)ptr;
1679 
1680 	xfree(p->allow_alloc_nodes);
1681 	xfree(p->allow_accounts);
1682 	xfree(p->allow_groups);
1683 	xfree(p->allow_qos);
1684 	xfree(p->alternate);
1685 	xfree(p->billing_weights_str);
1686 	xfree(p->deny_accounts);
1687 	xfree(p->deny_qos);
1688 	FREE_NULL_LIST(p->job_defaults_list);
1689 	xfree(p->name);
1690 	xfree(p->nodes);
1691 	xfree(p->qos_char);
1692 	xfree(ptr);
1693 }
1694 
_load_slurmctld_host(slurm_ctl_conf_t * conf)1695 static int _load_slurmctld_host(slurm_ctl_conf_t *conf)
1696 {
1697 	int count = 0, i, j;
1698 	char *ignore;
1699 	slurm_conf_server_t **ptr = NULL;
1700 
1701 	if (s_p_get_array((void ***)&ptr, &count, "SlurmctldHost", conf_hashtbl)) {
1702 		/*
1703 		 * Using new-style SlurmctldHost entries.
1704 		 */
1705 		conf->control_machine = xcalloc(count, sizeof(char *));
1706 		conf->control_addr = xcalloc(count, sizeof(char *));
1707 		conf->control_cnt = count;
1708 
1709 		for (i = 0; i < count; i++) {
1710 			conf->control_machine[i] = xstrdup(ptr[i]->hostname);
1711 			conf->control_addr[i] = xstrdup(ptr[i]->addr);
1712 		}
1713 
1714 		/*
1715 		 * Throw errors if old-style entries are still in the config,
1716 		 * but continue on with the newer-style entries anyways.
1717 		 */
1718 		if (s_p_get_string(&ignore, "ControlMachine", conf_hashtbl)) {
1719 			error("Ignoring ControlMachine since SlurmctldHost is set.");
1720 			xfree(ignore);
1721 		}
1722 		if (s_p_get_string(&ignore, "ControlAddr", conf_hashtbl)) {
1723 			error("Ignoring ControlAddr since SlurmctldHost is set.");
1724 			xfree(ignore);
1725 		}
1726 		if (s_p_get_string(&ignore, "BackupController", conf_hashtbl)) {
1727 			error("Ignoring BackupController since SlurmctldHost is set.");
1728 			xfree(ignore);
1729 		}
1730 		if (s_p_get_string(&ignore, "BackupAddr", conf_hashtbl)) {
1731 			error("Ignoring BackupAddr since SlurmctldHost is set.");
1732 			xfree(ignore);
1733 		}
1734 	} else {
1735 		/*
1736 		 * Using old-style ControlMachine/BackupController entries.
1737 		 *
1738 		 * Allocate two entries, one for primary and one for backup.
1739 		 */
1740 		char *tmp = NULL;
1741 		conf->control_machine = xmalloc(sizeof(char *));
1742 		conf->control_addr = xmalloc(sizeof(char *));
1743 		conf->control_cnt = 1;
1744 
1745 		if (!s_p_get_string(&conf->control_machine[0],
1746 				    "ControlMachine", conf_hashtbl)) {
1747 			/*
1748 			 * Missing SlurmctldHost and ControlMachine, so just
1749 			 * warn about the newer config option.
1750 			 */
1751 			error("No SlurmctldHost defined.");
1752 			goto error;
1753 		}
1754 		if (!s_p_get_string(&conf->control_addr[0],
1755 				    "ControlAddr", conf_hashtbl) &&
1756 		    conf->control_machine[0] &&
1757 		    strchr(conf->control_machine[0], ',')) {
1758 			error("ControlMachine has multiple host names, so ControlAddr must be specified.");
1759 			goto error;
1760 		}
1761 
1762 		if (s_p_get_string(&tmp, "BackupController", conf_hashtbl)) {
1763 			xrealloc(conf->control_machine, (sizeof(char *) * 2));
1764 			xrealloc(conf->control_addr, (sizeof(char *) * 2));
1765 			conf->control_cnt = 2;
1766 			conf->control_machine[1] = tmp;
1767 			tmp = NULL;
1768 		}
1769 		if (s_p_get_string(&tmp, "BackupAddr", conf_hashtbl)) {
1770 			if (conf->control_cnt == 1) {
1771 				error("BackupAddr specified without BackupController");
1772 				xfree(tmp);
1773 				goto error;
1774 			}
1775 			conf->control_addr[1] = tmp;
1776 			tmp = NULL;
1777 		}
1778 	}
1779 
1780 	/*
1781 	 * Fix up the control_addr array if they were not explicitly set above,
1782 	 * replace "localhost" with the actual hostname, and verify there are
1783 	 * no duplicate entries.
1784 	 */
1785 	for (i = 0; i < conf->control_cnt; i++) {
1786 		if (!conf->control_addr[i]) {
1787 			conf->control_addr[i] =
1788 				xstrdup(conf->control_machine[i]);
1789 		}
1790 		if (!xstrcasecmp("localhost", conf->control_machine[i])) {
1791 			xfree(conf->control_machine[i]);
1792 			conf->control_machine[i] = xmalloc(MAX_SLURM_NAME);
1793 			if (gethostname_short(conf->control_machine[i],
1794 					      MAX_SLURM_NAME)) {
1795 				error("getnodename: %m");
1796 				goto error;
1797 			}
1798 		}
1799 		for (j = 0; j < i; j++) {
1800 			if (!xstrcmp(conf->control_machine[i],
1801 				     conf->control_machine[j])) {
1802 				error("Duplicate SlurmctldHost records: %s",
1803 				      conf->control_machine[i]);
1804 				goto error;
1805 			}
1806 		}
1807 	}
1808 	return SLURM_SUCCESS;
1809 
1810 error:
1811 	if (conf->control_machine && conf->control_addr) {
1812 		for (i = 0; i < conf->control_cnt; i++) {
1813 			xfree(conf->control_machine[i]);
1814 			xfree(conf->control_addr[i]);
1815 		}
1816 		xfree(conf->control_machine);
1817 		xfree(conf->control_addr);
1818 	}
1819 	conf->control_cnt = 0;
1820 	return SLURM_ERROR;
1821 }
1822 
_parse_slurmctld_host(void ** dest,slurm_parser_enum_t type,const char * key,const char * value,const char * line,char ** leftover)1823 static int _parse_slurmctld_host(void **dest, slurm_parser_enum_t type,
1824 				 const char *key, const char *value,
1825 				 const char *line, char **leftover)
1826 {
1827 	s_p_hashtbl_t *tbl;
1828 	slurm_conf_server_t *p;
1829 	char *open_paren, *close_paren;
1830 	static s_p_options_t _slurmctld_host_options[] = {
1831 		{NULL}
1832 	};
1833 
1834 	tbl = s_p_hashtbl_create(_slurmctld_host_options);
1835 	s_p_parse_line(tbl, *leftover, leftover);
1836 
1837 	open_paren = strchr(value, '(');
1838 	close_paren = strchr(value, ')');
1839 	if ((open_paren && !close_paren) ||
1840 	    (!open_paren && close_paren) ||
1841 	    (close_paren && (close_paren[1] != '\0')) ||
1842 	    (close_paren && (close_paren != strrchr(value, ')')))) {
1843 		error("Bad value \"%s\" for SlurmctldHost", value);
1844 		return -1;
1845 	}
1846 
1847 	p = xmalloc(sizeof(slurm_conf_server_t));
1848 	if (open_paren && close_paren) {
1849 		p->hostname = xstrdup(value);
1850 		open_paren = strchr(p->hostname, '(');
1851 		if (open_paren)
1852 			open_paren[0] = '\0';
1853 		p->addr = xstrdup(open_paren + 1);
1854 		close_paren = strchr(p->addr, ')');
1855 		if (close_paren)
1856 			close_paren[0] = '\0';
1857 	} else {
1858 		p->hostname = xstrdup(value);
1859 		p->addr = xstrdup(value);
1860 	}
1861 
1862 	s_p_hashtbl_destroy(tbl);
1863 	*dest = (void *) p;
1864 
1865 	return 1;
1866 }
1867 
1868 /* May not be needed */
_destroy_slurmctld_host(void * ptr)1869 static void _destroy_slurmctld_host(void *ptr)
1870 {
1871 	slurm_conf_server_t *p = (slurm_conf_server_t *) ptr;
1872 
1873 	xfree(p->hostname);
1874 	xfree(p->addr);
1875 	xfree(ptr);
1876 }
1877 
slurm_conf_partition_array(slurm_conf_partition_t ** ptr_array[])1878 int slurm_conf_partition_array(slurm_conf_partition_t **ptr_array[])
1879 {
1880 	int count = 0;
1881 	slurm_conf_partition_t **ptr;
1882 
1883 	if (s_p_get_array((void ***)&ptr, &count, "PartitionName",
1884 			  conf_hashtbl)) {
1885 		*ptr_array = ptr;
1886 		return count;
1887 	} else {
1888 		*ptr_array = NULL;
1889 		return 0;
1890 	}
1891 }
1892 
_parse_downnodes(void ** dest,slurm_parser_enum_t type,const char * key,const char * value,const char * line,char ** leftover)1893 static int _parse_downnodes(void **dest, slurm_parser_enum_t type,
1894 			   const char *key, const char *value,
1895 			   const char *line, char **leftover)
1896 {
1897 	s_p_hashtbl_t *tbl;
1898 	slurm_conf_downnodes_t *n;
1899 	static s_p_options_t _downnodes_options[] = {
1900 		{"Reason", S_P_STRING},
1901 		{"State", S_P_STRING},
1902 		{NULL}
1903 	};
1904 
1905 	tbl = s_p_hashtbl_create(_downnodes_options);
1906 	s_p_parse_line(tbl, *leftover, leftover);
1907 	/* s_p_dump_values(tbl, _downnodes_options); */
1908 
1909 	n = xmalloc(sizeof(slurm_conf_node_t));
1910 	n->nodenames = xstrdup(value);
1911 
1912 	if (!s_p_get_string(&n->reason, "Reason", tbl))
1913 		n->reason = xstrdup("Set in slurm.conf");
1914 
1915 	if (!s_p_get_string(&n->state, "State", tbl))
1916 		n->state = NULL;
1917 
1918 	s_p_hashtbl_destroy(tbl);
1919 
1920 	*dest = (void *)n;
1921 
1922 	return 1;
1923 }
1924 
_destroy_downnodes(void * ptr)1925 static void _destroy_downnodes(void *ptr)
1926 {
1927 	slurm_conf_downnodes_t *n = (slurm_conf_downnodes_t *)ptr;
1928 	xfree(n->nodenames);
1929 	xfree(n->reason);
1930 	xfree(n->state);
1931 	xfree(ptr);
1932 }
1933 
slurm_conf_downnodes_array(slurm_conf_downnodes_t ** ptr_array[])1934 extern int slurm_conf_downnodes_array(slurm_conf_downnodes_t **ptr_array[])
1935 {
1936 	int count = 0;
1937 	slurm_conf_downnodes_t **ptr;
1938 
1939 	if (s_p_get_array((void ***)&ptr, &count, "DownNodes", conf_hashtbl)) {
1940 		*ptr_array = ptr;
1941 		return count;
1942 	} else {
1943 		*ptr_array = NULL;
1944 		return 0;
1945 	}
1946 }
1947 
_parse_nodeset(void ** dest,slurm_parser_enum_t type,const char * key,const char * value,const char * line,char ** leftover)1948 static int _parse_nodeset(void **dest, slurm_parser_enum_t type,
1949 			  const char *key, const char *value,
1950 			  const char *line, char **leftover)
1951 {
1952 	s_p_hashtbl_t *tbl;
1953 	slurm_conf_nodeset_t *n;
1954 	static s_p_options_t _nodeset_options[] = {
1955 		{"Feature", S_P_STRING},
1956 		{"Nodes", S_P_STRING},
1957 		{NULL}
1958 	};
1959 
1960 	tbl = s_p_hashtbl_create(_nodeset_options);
1961 	s_p_parse_line(tbl, *leftover, leftover);
1962 	/* s_p_dump_values(tbl, _nodeset_options); */
1963 
1964 	n = xmalloc(sizeof(slurm_conf_nodeset_t));
1965 	n->name = xstrdup(value);
1966 
1967 	s_p_get_string(&n->feature, "Feature", tbl);
1968 	s_p_get_string(&n->nodes, "Nodes", tbl);
1969 
1970 	s_p_hashtbl_destroy(tbl);
1971 
1972 	*dest = (void *)n;
1973 
1974 	return 1;
1975 }
1976 
_destroy_nodeset(void * ptr)1977 static void _destroy_nodeset(void *ptr)
1978 {
1979 	slurm_conf_nodeset_t *n = (slurm_conf_nodeset_t *)ptr;
1980 	xfree(n->feature);
1981 	xfree(n->name);
1982 	xfree(n->nodes);
1983 	xfree(ptr);
1984 }
1985 
slurm_conf_nodeset_array(slurm_conf_nodeset_t ** ptr_array[])1986 extern int slurm_conf_nodeset_array(slurm_conf_nodeset_t **ptr_array[])
1987 {
1988 	int count = 0;
1989 	slurm_conf_nodeset_t **ptr;
1990 
1991 	if (s_p_get_array((void ***)&ptr, &count, "NodeSet", conf_hashtbl)) {
1992 		*ptr_array = ptr;
1993 		return count;
1994 	} else {
1995 		*ptr_array = NULL;
1996 		return 0;
1997 	}
1998 }
1999 
_free_name_hashtbl(void)2000 static void _free_name_hashtbl(void)
2001 {
2002 	int i;
2003 	names_ll_t *p, *q;
2004 
2005 	for (i=0; i<NAME_HASH_LEN; i++) {
2006 		p = node_to_host_hashtbl[i];
2007 		while (p) {
2008 			xfree(p->address);
2009 			xfree(p->alias);
2010 			xfree(p->cpu_spec_list);
2011 			xfree(p->hostname);
2012 			q = p->next_alias;
2013 			xfree(p);
2014 			p = q;
2015 		}
2016 		node_to_host_hashtbl[i] = NULL;
2017 		host_to_node_hashtbl[i] = NULL;
2018 	}
2019 	nodehash_initialized = false;
2020 }
2021 
_init_name_hashtbl(void)2022 static void _init_name_hashtbl(void)
2023 {
2024 	return;
2025 }
2026 
_get_hash_idx(const char * name)2027 static int _get_hash_idx(const char *name)
2028 {
2029 	int index = 0;
2030 	int j;
2031 
2032 	if (name == NULL)
2033 		return 0;	/* degenerate case */
2034 
2035 	/* Multiply each character by its numerical position in the
2036 	 * name string to add a bit of entropy, because host names such
2037 	 * as cluster[0001-1000] can cause excessive index collisions.
2038 	 */
2039 	for (j = 1; *name; name++, j++)
2040 		index += (int)*name * j;
2041 	index %= NAME_HASH_LEN;
2042 	while (index < 0) /* Coverity thinks "index" could be negative with "if" */
2043 		index += NAME_HASH_LEN;
2044 
2045 	return index;
2046 }
2047 
_push_to_hashtbls(char * alias,char * hostname,char * address,char * bcast_address,uint16_t port,uint16_t cpus,uint16_t boards,uint16_t sockets,uint16_t cores,uint16_t threads,bool front_end,char * cpu_spec_list,uint16_t core_spec_cnt,uint64_t mem_spec_limit,slurm_addr_t * addr,bool initialized)2048 static void _push_to_hashtbls(char *alias, char *hostname, char *address,
2049 			      char *bcast_address, uint16_t port,
2050 			      uint16_t cpus, uint16_t boards,
2051 			      uint16_t sockets, uint16_t cores,
2052 			      uint16_t threads, bool front_end,
2053 			      char *cpu_spec_list, uint16_t core_spec_cnt,
2054 			      uint64_t mem_spec_limit, slurm_addr_t *addr,
2055 			      bool initialized)
2056 {
2057 	int hostname_idx, alias_idx;
2058 	names_ll_t *p, *new;
2059 
2060 	alias_idx = _get_hash_idx(alias);
2061 	hostname_idx = _get_hash_idx(hostname);
2062 
2063 #if !defined(HAVE_FRONT_END) && !defined(MULTIPLE_SLURMD)
2064 	/* Ensure only one slurmd configured on each host */
2065 	p = host_to_node_hashtbl[hostname_idx];
2066 	while (p) {
2067 		if (xstrcmp(p->hostname, hostname) == 0) {
2068 			error("Duplicated NodeHostName %s in the config file",
2069 			      hostname);
2070 			return;
2071 		}
2072 		p = p->next_hostname;
2073 	}
2074 #endif
2075 	/* Ensure only one instance of each NodeName */
2076 	p = node_to_host_hashtbl[alias_idx];
2077 	while (p) {
2078 		if (xstrcmp(p->alias, alias) == 0) {
2079 			if (front_end)
2080 				log_var(lvl, "Frontend not configured correctly in slurm.conf. See FrontEndName in slurm.conf man page.");
2081 			else
2082 				log_var(lvl, "Duplicated NodeName %s in the config file",
2083 					p->alias);
2084 			local_test_config_rc = 1;
2085 
2086 			return;
2087 		}
2088 		p = p->next_alias;
2089 	}
2090 
2091 	/* Create the new data structure and link it into the hash tables */
2092 	new = xmalloc(sizeof(*new));
2093 	new->alias	= xstrdup(alias);
2094 	new->hostname	= xstrdup(hostname);
2095 	new->address	= xstrdup(address);
2096 	new->bcast_address = xstrdup(bcast_address);
2097 	new->port	= port;
2098 	new->cpus	= cpus;
2099 	new->boards	= boards;
2100 	new->sockets	= sockets;
2101 	new->cores	= cores;
2102 	new->threads	= threads;
2103 	new->addr_initialized = initialized;
2104 	new->cpu_spec_list = xstrdup(cpu_spec_list);
2105 	new->core_spec_cnt = core_spec_cnt;
2106 	new->mem_spec_limit = mem_spec_limit;
2107 
2108 	if (addr)
2109 		memcpy(&new->addr, addr, sizeof(slurm_addr_t));
2110 
2111 	/* Put on end of each list */
2112 	new->next_alias	= NULL;
2113 	if (node_to_host_hashtbl[alias_idx]) {
2114 		p = node_to_host_hashtbl[alias_idx];
2115 		while (p->next_alias)
2116 			p = p->next_alias;
2117 		p->next_alias = new;
2118 	} else {
2119 		node_to_host_hashtbl[alias_idx] = new;
2120 	}
2121 
2122 	new->next_hostname = NULL;
2123 	if (host_to_node_hashtbl[hostname_idx]) {
2124 		p = host_to_node_hashtbl[hostname_idx];
2125 		while (p->next_hostname)
2126 			p = p->next_hostname;
2127 		p->next_hostname = new;
2128 	} else {
2129 		host_to_node_hashtbl[hostname_idx] = new;
2130 	}
2131 }
2132 
_register_front_ends(slurm_conf_frontend_t * front_end_ptr)2133 static int _register_front_ends(slurm_conf_frontend_t *front_end_ptr)
2134 {
2135 	hostlist_t hostname_list = NULL;
2136 	hostlist_t address_list = NULL;
2137 	char *hostname = NULL;
2138 	char *address = NULL;
2139 	int error_code = SLURM_SUCCESS;
2140 
2141 	if ((front_end_ptr->frontends == NULL) ||
2142 	    (front_end_ptr->frontends[0] == '\0'))
2143 		return -1;
2144 
2145 	if ((hostname_list = hostlist_create(front_end_ptr->frontends))
2146 	     == NULL) {
2147 		error("Unable to create FrontendNames list from %s",
2148 		      front_end_ptr->frontends);
2149 		error_code = errno;
2150 		goto cleanup;
2151 	}
2152 	if ((address_list = hostlist_create(front_end_ptr->addresses))
2153 	     == NULL) {
2154 		error("Unable to create FrontendAddr list from %s",
2155 		      front_end_ptr->addresses);
2156 		error_code = errno;
2157 		goto cleanup;
2158 	}
2159 	if (hostlist_count(address_list) != hostlist_count(hostname_list)) {
2160 		error("Node count mismatch between FrontendNames and "
2161 		      "FrontendAddr");
2162 		goto cleanup;
2163 	}
2164 
2165 	while ((hostname = hostlist_shift(hostname_list))) {
2166 		address = hostlist_shift(address_list);
2167 		_push_to_hashtbls(hostname, hostname, address, NULL,
2168 				  front_end_ptr->port, 1, 1, 1, 1, 1, 1,
2169 				  NULL, 0, 0, NULL, false);
2170 		free(hostname);
2171 		free(address);
2172 	}
2173 
2174 	/* free allocated storage */
2175 cleanup:
2176 	if (hostname_list)
2177 		hostlist_destroy(hostname_list);
2178 	if (address_list)
2179 		hostlist_destroy(address_list);
2180 	return error_code;
2181 }
2182 
_check_callback(char * alias,char * hostname,char * address,char * bcast_address,uint16_t port,int state_val,slurm_conf_node_t * node_ptr,config_record_t * config_ptr)2183 static void _check_callback(char *alias, char *hostname, char *address,
2184 			    char *bcast_address, uint16_t port, int state_val,
2185 			    slurm_conf_node_t *node_ptr,
2186 			    config_record_t *config_ptr)
2187 {
2188 	_push_to_hashtbls(alias, hostname, address, bcast_address, port,
2189 			  node_ptr->cpus, node_ptr->boards,
2190 			  node_ptr->sockets, node_ptr->cores,
2191 			  node_ptr->threads, 0, node_ptr->cpu_spec_list,
2192 			  node_ptr->core_spec_cnt,
2193 			  node_ptr->mem_spec_limit, NULL, false);
2194 }
2195 
_init_slurmd_nodehash(void)2196 static void _init_slurmd_nodehash(void)
2197 {
2198 	slurm_conf_node_t **ptr_array;
2199 	slurm_conf_frontend_t **ptr_front_end;
2200 	int count, i;
2201 
2202 	if (nodehash_initialized)
2203 		return;
2204 	else
2205 		nodehash_initialized = true;
2206 
2207 	if (!conf_initialized) {
2208 		if (_init_slurm_conf(NULL) != SLURM_SUCCESS) {
2209 			log_var(lvl, "Unable to process slurm.conf file");
2210 			local_test_config_rc = 1;
2211 		}
2212 	}
2213 
2214 	count = slurm_conf_nodename_array(&ptr_array);
2215 	for (i = 0; i < count; i++) {
2216 		if ((check_nodeline_info(ptr_array[i],
2217 					 NULL, lvl,
2218 					 _check_callback) == SLURM_SUCCESS) &&
2219 		    (slurmdb_setup_cluster_name_dims() > 1) &&
2220 		    !conf_ptr->node_prefix)
2221 			_set_node_prefix(ptr_array[i]->nodenames);
2222 	}
2223 
2224 	count = slurm_conf_frontend_array(&ptr_front_end);
2225 	for (i = 0; i < count; i++)
2226 		_register_front_ends(ptr_front_end[i]);
2227 }
2228 
2229 /*
2230  * Caller needs to call slurm_conf_lock() and hold the lock before
2231  * calling this function (and call slurm_conf_unlock() afterwards).
2232  */
_internal_get_hostname(const char * node_name)2233 static char *_internal_get_hostname(const char *node_name)
2234 {
2235 	int idx;
2236 	names_ll_t *p;
2237 
2238 	_init_slurmd_nodehash();
2239 
2240 	idx = _get_hash_idx(node_name);
2241 	p = node_to_host_hashtbl[idx];
2242 	while (p) {
2243 		if (xstrcmp(p->alias, node_name) == 0) {
2244 			return xstrdup(p->hostname);
2245 		}
2246 		p = p->next_alias;
2247 	}
2248 	return NULL;
2249 }
2250 
2251 /*
2252  * slurm_conf_get_hostname - Return the NodeHostname for given NodeName
2253  */
slurm_conf_get_hostname(const char * node_name)2254 extern char *slurm_conf_get_hostname(const char *node_name)
2255 {
2256 	char *hostname = NULL;
2257 
2258 	slurm_conf_lock();
2259 	hostname = _internal_get_hostname(node_name);
2260 	slurm_conf_unlock();
2261 
2262 	return hostname;
2263 }
2264 
2265 /*
2266  * slurm_conf_get_nodename - Return the NodeName for given NodeHostname
2267  *
2268  * NOTE: Call xfree() to release returned value's memory.
2269  * NOTE: Caller must NOT be holding slurm_conf_lock().
2270  */
slurm_conf_get_nodename(const char * node_hostname)2271 extern char *slurm_conf_get_nodename(const char *node_hostname)
2272 {
2273 	char *alias = NULL;
2274 	int idx;
2275 	names_ll_t *p;
2276 #ifdef HAVE_FRONT_END
2277 	slurm_conf_frontend_t *front_end_ptr = NULL;
2278 
2279  	slurm_conf_lock();
2280 	if (!front_end_list) {
2281 		debug("front_end_list is NULL");
2282 	} else {
2283 		front_end_ptr = list_find_first(front_end_list,
2284 						_list_find_frontend,
2285 						(char *) node_hostname);
2286 		if (front_end_ptr) {
2287 			alias = xstrdup(front_end_ptr->frontends);
2288 			slurm_conf_unlock();
2289 			return alias;
2290 		}
2291 	}
2292 #else
2293 	slurm_conf_lock();
2294 #endif
2295 
2296 	_init_slurmd_nodehash();
2297 	idx = _get_hash_idx(node_hostname);
2298 	p = host_to_node_hashtbl[idx];
2299 	while (p) {
2300 		if (xstrcmp(p->hostname, node_hostname) == 0) {
2301 			alias = xstrdup(p->alias);
2302 			break;
2303 		}
2304 		p = p->next_hostname;
2305 	}
2306 	slurm_conf_unlock();
2307 
2308 	return alias;
2309 }
2310 
2311 /*
2312  * slurm_conf_get_aliases - Return all the nodes NodeName value
2313  * associated to a given NodeHostname (usefull in case of multiple-slurmd
2314  * to get the list of virtual nodes associated with a real node)
2315  *
2316  * NOTE: Call xfree() to release returned value's memory.
2317  * NOTE: Caller must NOT be holding slurm_conf_lock().
2318  */
slurm_conf_get_aliases(const char * node_hostname)2319 extern char *slurm_conf_get_aliases(const char *node_hostname)
2320 {
2321 	int idx;
2322 	names_ll_t *p;
2323 	char *aliases = NULL;
2324 	char *s = NULL;
2325 
2326 	slurm_conf_lock();
2327 	_init_slurmd_nodehash();
2328 	idx = _get_hash_idx(node_hostname);
2329 
2330 	p = host_to_node_hashtbl[idx];
2331 	while (p) {
2332 		if (xstrcmp(p->hostname, node_hostname) == 0) {
2333 			if ( aliases == NULL )
2334 				aliases = xstrdup(p->alias);
2335 			else {
2336 				s = xstrdup_printf("%s %s",aliases,p->alias);
2337 				xfree(aliases);
2338 				aliases = s;
2339 			}
2340 		}
2341 		p = p->next_hostname;
2342 	}
2343 	slurm_conf_unlock();
2344 
2345 	return aliases;
2346 }
2347 
2348 /*
2349  * slurm_conf_get_nodeaddr - Return the NodeAddr for given NodeHostname
2350  *
2351  * NOTE: Call xfree() to release returned value's memory.
2352  * NOTE: Caller must NOT be holding slurm_conf_lock().
2353  */
slurm_conf_get_nodeaddr(const char * node_hostname)2354 extern char *slurm_conf_get_nodeaddr(const char *node_hostname)
2355 {
2356 	int idx;
2357 	names_ll_t *p;
2358 
2359 	slurm_conf_lock();
2360 	_init_slurmd_nodehash();
2361 	idx = _get_hash_idx(node_hostname);
2362 
2363 	p = host_to_node_hashtbl[idx];
2364 	while (p) {
2365 		if (xstrcmp(p->hostname, node_hostname) == 0) {
2366 			char *nodeaddr;
2367 			if (p->address != NULL)
2368 				nodeaddr = xstrdup(p->address);
2369 			else
2370 				nodeaddr = NULL;
2371 			slurm_conf_unlock();
2372 			return nodeaddr;
2373 		}
2374 		p = p->next_hostname;
2375 	}
2376 	slurm_conf_unlock();
2377 
2378 	return NULL;
2379 }
2380 
2381 /*
2382  * slurm_conf_get_nodename_from_addr - Return the NodeName for given NodeAddr
2383  *
2384  * NOTE: Call xfree() to release returned value's memory.
2385  * NOTE: Caller must NOT be holding slurm_conf_lock().
2386  */
slurm_conf_get_nodename_from_addr(const char * node_addr)2387 extern char *slurm_conf_get_nodename_from_addr(const char *node_addr)
2388 {
2389 	char hostname[NI_MAXHOST];
2390 	unsigned long addr = inet_addr(node_addr);
2391 	char *start_name, *ret_name = NULL, *dot_ptr;
2392 
2393 	if (get_name_info((struct sockaddr *)&addr,
2394 			  sizeof(addr), hostname) != 0) {
2395 		error("%s: No node found with addr %s", __func__, node_addr);
2396 		return NULL;
2397 	}
2398 
2399 	if (!xstrcmp(hostname, "localhost")) {
2400 		start_name = xshort_hostname();
2401 	} else {
2402 		start_name = xstrdup(hostname);
2403 		dot_ptr = strchr(start_name, '.');
2404 		if (dot_ptr)
2405 			dot_ptr[0] = '\0';
2406 	}
2407 
2408 	ret_name = slurm_conf_get_aliases(start_name);
2409 	xfree(start_name);
2410 
2411 	return ret_name;
2412 }
2413 
2414 /*
2415  * slurm_conf_get_aliased_nodename - Return the NodeName for the
2416  * complete hostname string returned by gethostname if there is
2417  * such a match, otherwise iterate through any aliases returned
2418  * by get_host_by_name
2419  */
slurm_conf_get_aliased_nodename()2420 extern char *slurm_conf_get_aliased_nodename()
2421 {
2422 	char hostname_full[1024];
2423 	int error_code;
2424 	char *nodename;
2425 
2426 	error_code = gethostname(hostname_full, sizeof(hostname_full));
2427 	/* we shouldn't have any problem here since by the time
2428 	 * this function has been called, gethostname_short,
2429 	 * which invokes gethostname, has probably already been called
2430 	 * successfully, so just return NULL if something weird
2431 	 * happens at this point
2432 	 */
2433 	if (error_code)
2434 		return NULL;
2435 
2436 	nodename = slurm_conf_get_nodename(hostname_full);
2437 	/* if the full hostname did not match a nodename */
2438 	if (nodename == NULL) {
2439 		/* use get_host_by_name; buffer sizes, semantics, etc.
2440 		 * copied from slurm_protocol_socket_implementation.c
2441 		 */
2442 		struct hostent * he = NULL;
2443 		char * h_buf[4096];
2444 		int h_err;
2445 
2446 		he = get_host_by_name(hostname_full, (void *)&h_buf,
2447 				      sizeof(h_buf), &h_err);
2448 		if (he != NULL) {
2449 			unsigned int i = 0;
2450 			/* check the "official" host name first */
2451 			nodename = slurm_conf_get_nodename(he->h_name);
2452 			while ((nodename == NULL) &&
2453 			       (he->h_aliases[i] != NULL)) {
2454 				/* the "official" name still didn't match --
2455 				 * iterate through the aliases */
2456 				nodename =
2457 				     slurm_conf_get_nodename(he->h_aliases[i]);
2458 				i++;
2459 			}
2460 		}
2461 	}
2462 
2463 	return nodename;
2464 }
2465 
2466 /*
2467  * Return BcastAddr (if set) for a given NodeName, or NULL.
2468  */
slurm_conf_get_bcast_address(const char * node_name)2469 extern char *slurm_conf_get_bcast_address(const char *node_name)
2470 {
2471 	int idx;
2472 	names_ll_t *p;
2473 	char *bcast_address;
2474 
2475 	slurm_conf_lock();
2476 	_init_slurmd_nodehash();
2477 
2478 	idx = _get_hash_idx(node_name);
2479 	p = node_to_host_hashtbl[idx];
2480 	while (p && xstrcmp(p->alias, node_name))
2481 		p = p->next_alias;
2482 
2483 	if (!p) {
2484 		slurm_conf_unlock();
2485 		return NULL;
2486 	}
2487 
2488 	bcast_address = xstrdup(p->bcast_address);
2489 	slurm_conf_unlock();
2490 	return bcast_address;
2491 }
2492 
2493 /*
2494  * slurm_conf_get_port - Return the port for a given NodeName
2495  */
slurm_conf_get_port(const char * node_name)2496 extern uint16_t slurm_conf_get_port(const char *node_name)
2497 {
2498 	int idx;
2499 	names_ll_t *p;
2500 
2501 	slurm_conf_lock();
2502 	_init_slurmd_nodehash();
2503 
2504 	idx = _get_hash_idx(node_name);
2505 	p = node_to_host_hashtbl[idx];
2506 	while (p) {
2507 		if (xstrcmp(p->alias, node_name) == 0) {
2508 			uint16_t port;
2509 			if (!p->port)
2510 				p->port = (uint16_t) conf_ptr->slurmd_port;
2511 			port = p->port;
2512 			slurm_conf_unlock();
2513 			return port;
2514 		}
2515 		p = p->next_alias;
2516 	}
2517 	slurm_conf_unlock();
2518 
2519 	return 0;
2520 }
2521 
2522 /*
2523  * slurm_reset_alias - Reset the address and hostname of a specific node name
2524  */
slurm_reset_alias(char * node_name,char * node_addr,char * node_hostname)2525 extern void slurm_reset_alias(char *node_name, char *node_addr,
2526 			      char *node_hostname)
2527 {
2528 	int idx;
2529 	names_ll_t *p;
2530 
2531 	slurm_conf_lock();
2532 	_init_slurmd_nodehash();
2533 
2534 	idx = _get_hash_idx(node_name);
2535 	p = node_to_host_hashtbl[idx];
2536 	while (p) {
2537 		if (xstrcmp(p->alias, node_name) == 0) {
2538 			if (node_addr) {
2539 				xfree(p->address);
2540 				p->address = xstrdup(node_addr);
2541 				p->addr_initialized = false;
2542 			}
2543 			if (node_hostname) {
2544 				xfree(p->hostname);
2545 				p->hostname = xstrdup(node_hostname);
2546 			}
2547 			break;
2548 		}
2549 		p = p->next_alias;
2550 	}
2551 	slurm_conf_unlock();
2552 
2553 	return;
2554 }
2555 
2556 /*
2557  * slurm_conf_get_addr - Return the slurm_addr_t for a given NodeName
2558  * Returns SLURM_SUCCESS on success, SLURM_ERROR on failure.
2559  */
slurm_conf_get_addr(const char * node_name,slurm_addr_t * address,uint16_t flags)2560 extern int slurm_conf_get_addr(const char *node_name, slurm_addr_t *address,
2561 			       uint16_t flags)
2562 {
2563 	int idx;
2564 	names_ll_t *p;
2565 
2566 	slurm_conf_lock();
2567 	_init_slurmd_nodehash();
2568 
2569 	idx = _get_hash_idx(node_name);
2570 	p = node_to_host_hashtbl[idx];
2571 	while (p && xstrcmp(p->alias, node_name))
2572 		p = p->next_alias;
2573 
2574 	if (!p) {
2575 		slurm_conf_unlock();
2576 		return SLURM_ERROR;
2577 	}
2578 
2579 	if (!p->port)
2580 		p->port = (uint16_t) conf_ptr->slurmd_port;
2581 
2582 	/*
2583 	 * Only use BcastAddr if USE_BCAST_NETWORK flag set and BcastAddr
2584 	 * exists. Otherwise fall through to using NodeAddr value below.
2585 	 */
2586 	if (p->bcast_address && (flags & USE_BCAST_NETWORK)) {
2587 		if (!p->bcast_addr_initialized) {
2588 			slurm_set_addr(&p->bcast_addr, p->port,
2589 				       p->bcast_address);
2590 			if (p->bcast_addr.sin_family == 0 &&
2591 			    p->bcast_addr.sin_port == 0) {
2592 				slurm_conf_unlock();
2593 				return SLURM_ERROR;
2594 			}
2595 		}
2596 		if (!no_addr_cache)
2597 			p->bcast_addr_initialized = true;
2598 		*address = p->bcast_addr;
2599 		slurm_conf_unlock();
2600 		return SLURM_SUCCESS;
2601 	}
2602 
2603 	if (!p->addr_initialized) {
2604 		slurm_set_addr(&p->addr, p->port, p->address);
2605 		if (p->addr.sin_family == 0 && p->addr.sin_port == 0) {
2606 			slurm_conf_unlock();
2607 			return SLURM_ERROR;
2608 		}
2609 		if (!no_addr_cache)
2610 			p->addr_initialized = true;
2611 	}
2612 
2613 	*address = p->addr;
2614 	slurm_conf_unlock();
2615 	return SLURM_SUCCESS;
2616 }
2617 
2618 /*
2619  * slurm_conf_get_cpus_bsct -
2620  * Return the cpus, boards, sockets, cores, and threads configured for a
2621  * given NodeName
2622  * Returns SLURM_SUCCESS on success, SLURM_ERROR on failure.
2623  */
slurm_conf_get_cpus_bsct(const char * node_name,uint16_t * cpus,uint16_t * boards,uint16_t * sockets,uint16_t * cores,uint16_t * threads)2624 extern int slurm_conf_get_cpus_bsct(const char *node_name,
2625 				    uint16_t *cpus, uint16_t *boards,
2626 				    uint16_t *sockets, uint16_t *cores,
2627 				    uint16_t *threads)
2628 {
2629 	int idx;
2630 	names_ll_t *p;
2631 
2632 	slurm_conf_lock();
2633 	_init_slurmd_nodehash();
2634 
2635 	idx = _get_hash_idx(node_name);
2636 	p = node_to_host_hashtbl[idx];
2637 	while (p) {
2638 		if (xstrcmp(p->alias, node_name) == 0) {
2639 		    	if (cpus)
2640 				*cpus    = p->cpus;
2641 			if (boards)
2642 				*boards  = p->boards;
2643 			if (sockets)
2644 				*sockets = p->sockets;
2645 			if (cores)
2646 				*cores   = p->cores;
2647 			if (threads)
2648 				*threads = p->threads;
2649 			slurm_conf_unlock();
2650 			return SLURM_SUCCESS;
2651 		}
2652 		p = p->next_alias;
2653 	}
2654 	slurm_conf_unlock();
2655 
2656 	return SLURM_ERROR;
2657 }
2658 
2659 /*
2660  * slurm_conf_get_res_spec_info - Return resource specialization info
2661  * for a given NodeName
2662  * Returns SLURM_SUCCESS on success, SLURM_ERROR on failure.
2663  */
slurm_conf_get_res_spec_info(const char * node_name,char ** cpu_spec_list,uint16_t * core_spec_cnt,uint64_t * mem_spec_limit)2664 extern int slurm_conf_get_res_spec_info(const char *node_name,
2665 					char **cpu_spec_list,
2666 					uint16_t *core_spec_cnt,
2667 					uint64_t *mem_spec_limit)
2668 {
2669 	int idx;
2670 	names_ll_t *p;
2671 
2672 	slurm_conf_lock();
2673 	_init_slurmd_nodehash();
2674 
2675 	idx = _get_hash_idx(node_name);
2676 	p = node_to_host_hashtbl[idx];
2677 	while (p) {
2678 		if (xstrcmp(p->alias, node_name) == 0) {
2679 			if (core_spec_cnt)
2680 				*cpu_spec_list = xstrdup(p->cpu_spec_list);
2681 			if (core_spec_cnt)
2682 				*core_spec_cnt  = p->core_spec_cnt;
2683 			if (mem_spec_limit)
2684 				*mem_spec_limit = p->mem_spec_limit;
2685 			slurm_conf_unlock();
2686 			return SLURM_SUCCESS;
2687 		}
2688 		p = p->next_alias;
2689 	}
2690 	slurm_conf_unlock();
2691 
2692 	return SLURM_ERROR;
2693 }
2694 
2695 /*
2696  * gethostname_short - equivalent to gethostname, but return only the first
2697  * component of the fully qualified name
2698  * (e.g. "linux123.foo.bar" becomes "linux123")
2699  * OUT name
2700  */
gethostname_short(char * name,size_t len)2701 int gethostname_short(char *name, size_t len)
2702 {
2703 	char *dot_ptr, path_name[1024];
2704 
2705 	if (gethostname(path_name, sizeof(path_name)))
2706 		return errno;
2707 
2708 	if ((dot_ptr = strchr(path_name, '.')))
2709 		*dot_ptr = '\0';
2710 
2711 	if (strlcpy(name, path_name, len) >= len)
2712 		return ENAMETOOLONG;
2713 
2714 	return 0;
2715 }
2716 
2717 /*
2718  * free_slurm_conf - free all storage associated with a slurm_ctl_conf_t.
2719  * IN/OUT ctl_conf_ptr - pointer to data structure to be freed
2720  * IN purge_node_hash - purge system-wide node hash table if set,
2721  *			set to zero if clearing private copy of config data
2722  */
2723 extern void
free_slurm_conf(slurm_ctl_conf_t * ctl_conf_ptr,bool purge_node_hash)2724 free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
2725 {
2726 	int i;
2727 
2728 	xfree (ctl_conf_ptr->accounting_storage_backup_host);
2729 	xfree (ctl_conf_ptr->accounting_storage_ext_host);
2730 	xfree (ctl_conf_ptr->accounting_storage_host);
2731 	xfree (ctl_conf_ptr->accounting_storage_loc);
2732 	xfree (ctl_conf_ptr->accounting_storage_pass);
2733 	xfree (ctl_conf_ptr->accounting_storage_tres);
2734 	xfree (ctl_conf_ptr->accounting_storage_type);
2735 	xfree (ctl_conf_ptr->accounting_storage_user);
2736 	FREE_NULL_LIST(ctl_conf_ptr->acct_gather_conf);
2737 	xfree (ctl_conf_ptr->acct_gather_energy_type);
2738 	xfree (ctl_conf_ptr->acct_gather_profile_type);
2739 	xfree (ctl_conf_ptr->acct_gather_interconnect_type);
2740 	xfree (ctl_conf_ptr->acct_gather_filesystem_type);
2741 	xfree(ctl_conf_ptr->authalttypes);
2742 	xfree (ctl_conf_ptr->authinfo);
2743 	xfree (ctl_conf_ptr->authtype);
2744 	xfree (ctl_conf_ptr->bb_type);
2745 	FREE_NULL_LIST(ctl_conf_ptr->cgroup_conf);
2746 	xfree(ctl_conf_ptr->cli_filter_plugins);
2747 	xfree (ctl_conf_ptr->cluster_name);
2748 	for (i = 0; i < ctl_conf_ptr->control_cnt; i++) {
2749 		xfree(ctl_conf_ptr->control_addr[i]);
2750 		xfree(ctl_conf_ptr->control_machine[i]);
2751 	}
2752 	ctl_conf_ptr->control_cnt = 0;
2753 
2754 	xfree (ctl_conf_ptr->comm_params);
2755 	xfree (ctl_conf_ptr->control_addr);
2756 	xfree (ctl_conf_ptr->control_machine);
2757 	xfree (ctl_conf_ptr->core_spec_plugin);
2758 	xfree (ctl_conf_ptr->cred_type);
2759 	xfree (ctl_conf_ptr->dependency_params);
2760 	xfree (ctl_conf_ptr->epilog);
2761 	xfree (ctl_conf_ptr->epilog_slurmctld);
2762 	FREE_NULL_LIST(ctl_conf_ptr->ext_sensors_conf);
2763 	xfree (ctl_conf_ptr->ext_sensors_type);
2764 	xfree (ctl_conf_ptr->fed_params);
2765 	xfree (ctl_conf_ptr->gres_plugins);
2766 	xfree (ctl_conf_ptr->gpu_freq_def);
2767 	xfree (ctl_conf_ptr->health_check_program);
2768 	xfree (ctl_conf_ptr->job_acct_gather_freq);
2769 	xfree (ctl_conf_ptr->job_acct_gather_type);
2770 	xfree (ctl_conf_ptr->job_acct_gather_params);
2771 	xfree (ctl_conf_ptr->job_comp_host);
2772 	xfree (ctl_conf_ptr->job_comp_loc);
2773 	xfree(ctl_conf_ptr->job_comp_params);
2774 	xfree (ctl_conf_ptr->job_comp_pass);
2775 	xfree (ctl_conf_ptr->job_comp_type);
2776 	xfree (ctl_conf_ptr->job_comp_user);
2777 	xfree (ctl_conf_ptr->job_container_plugin);
2778 	xfree (ctl_conf_ptr->job_credential_private_key);
2779 	xfree (ctl_conf_ptr->job_credential_public_certificate);
2780 	FREE_NULL_LIST(ctl_conf_ptr->job_defaults_list);
2781 	xfree (ctl_conf_ptr->job_submit_plugins);
2782 	xfree (ctl_conf_ptr->launch_params);
2783 	xfree (ctl_conf_ptr->launch_type);
2784 	xfree (ctl_conf_ptr->layouts);
2785 	xfree (ctl_conf_ptr->licenses);
2786 	xfree (ctl_conf_ptr->mail_domain);
2787 	xfree (ctl_conf_ptr->mail_prog);
2788 	xfree (ctl_conf_ptr->mcs_plugin);
2789 	xfree (ctl_conf_ptr->mcs_plugin_params);
2790 	xfree (ctl_conf_ptr->mpi_default);
2791 	xfree (ctl_conf_ptr->mpi_params);
2792 	xfree (ctl_conf_ptr->msg_aggr_params);
2793 	FREE_NULL_LIST(ctl_conf_ptr->node_features_conf);
2794 	xfree (ctl_conf_ptr->node_features_plugins);
2795 	xfree (ctl_conf_ptr->node_prefix);
2796 	xfree (ctl_conf_ptr->plugindir);
2797 	xfree (ctl_conf_ptr->plugstack);
2798 	FREE_NULL_LIST(ctl_conf_ptr->slurmctld_plugstack_conf);
2799 	xfree (ctl_conf_ptr->power_parameters);
2800 	xfree (ctl_conf_ptr->power_plugin);
2801 	xfree (ctl_conf_ptr->preempt_type);
2802 	xfree(ctl_conf_ptr->prep_params);
2803 	xfree(ctl_conf_ptr->prep_plugins);
2804 	xfree (ctl_conf_ptr->priority_params);
2805 	xfree (ctl_conf_ptr->priority_type);
2806 	xfree (ctl_conf_ptr->priority_weight_tres);
2807 	xfree (ctl_conf_ptr->proctrack_type);
2808 	xfree (ctl_conf_ptr->prolog);
2809 	xfree (ctl_conf_ptr->prolog_slurmctld);
2810 	xfree (ctl_conf_ptr->propagate_rlimits);
2811 	xfree (ctl_conf_ptr->propagate_rlimits_except);
2812 	xfree (ctl_conf_ptr->reboot_program);
2813 	xfree (ctl_conf_ptr->requeue_exit);
2814 	xfree (ctl_conf_ptr->requeue_exit_hold);
2815 	xfree (ctl_conf_ptr->resume_fail_program);
2816 	xfree (ctl_conf_ptr->resume_program);
2817 	xfree (ctl_conf_ptr->resv_epilog);
2818 	xfree (ctl_conf_ptr->resv_prolog);
2819 	xfree (ctl_conf_ptr->route_plugin);
2820 	xfree (ctl_conf_ptr->salloc_default_command);
2821 	xfree (ctl_conf_ptr->sbcast_parameters);
2822 	xfree (ctl_conf_ptr->sched_logfile);
2823 	xfree (ctl_conf_ptr->sched_params);
2824 	xfree (ctl_conf_ptr->schedtype);
2825 	xfree (ctl_conf_ptr->select_type);
2826 	FREE_NULL_LIST(ctl_conf_ptr->select_conf_key_pairs);
2827 	xfree (ctl_conf_ptr->site_factor_params);
2828 	xfree (ctl_conf_ptr->site_factor_plugin);
2829 	xfree (ctl_conf_ptr->slurm_conf);
2830 	xfree (ctl_conf_ptr->slurm_user_name);
2831 	xfree (ctl_conf_ptr->slurmctld_addr);
2832 	xfree (ctl_conf_ptr->slurmctld_logfile);
2833 	xfree (ctl_conf_ptr->slurmctld_pidfile);
2834 	xfree (ctl_conf_ptr->slurmctld_plugstack);
2835 	xfree (ctl_conf_ptr->slurmctld_primary_off_prog);
2836 	xfree (ctl_conf_ptr->slurmctld_primary_on_prog);
2837 	xfree (ctl_conf_ptr->slurmd_logfile);
2838 	xfree (ctl_conf_ptr->slurmctld_params);
2839 	xfree (ctl_conf_ptr->slurmd_params);
2840 	xfree (ctl_conf_ptr->slurmd_pidfile);
2841 	xfree (ctl_conf_ptr->slurmd_spooldir);
2842 	xfree (ctl_conf_ptr->slurmd_user_name);
2843 	xfree (ctl_conf_ptr->srun_epilog);
2844 	xfree (ctl_conf_ptr->srun_port_range);
2845 	xfree (ctl_conf_ptr->srun_prolog);
2846 	xfree (ctl_conf_ptr->state_save_location);
2847 	xfree (ctl_conf_ptr->suspend_exc_nodes);
2848 	xfree (ctl_conf_ptr->suspend_exc_parts);
2849 	xfree (ctl_conf_ptr->suspend_program);
2850 	xfree (ctl_conf_ptr->switch_type);
2851 	xfree (ctl_conf_ptr->task_epilog);
2852 	xfree (ctl_conf_ptr->task_plugin);
2853 	xfree (ctl_conf_ptr->task_prolog);
2854 	xfree (ctl_conf_ptr->tmp_fs);
2855 	xfree (ctl_conf_ptr->topology_param);
2856 	xfree (ctl_conf_ptr->topology_plugin);
2857 	xfree (ctl_conf_ptr->unkillable_program);
2858 	xfree (ctl_conf_ptr->version);
2859 	xfree (ctl_conf_ptr->x11_params);
2860 
2861 	if (purge_node_hash)
2862 		_free_name_hashtbl();
2863 }
2864 
2865 /*
2866  * init_slurm_conf - initialize or re-initialize the slurm configuration
2867  *	values to defaults (NULL or NO_VAL). Note that the configuration
2868  *	file pathname (slurm_conf) is not changed.
2869  * IN/OUT ctl_conf_ptr - pointer to data structure to be initialized
2870  */
2871 void
init_slurm_conf(slurm_ctl_conf_t * ctl_conf_ptr)2872 init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
2873 {
2874 	int i;
2875 
2876 	ctl_conf_ptr->last_update		= time(NULL);
2877 	xfree (ctl_conf_ptr->accounting_storage_backup_host);
2878 	ctl_conf_ptr->accounting_storage_enforce          = 0;
2879 	xfree (ctl_conf_ptr->accounting_storage_ext_host);
2880 	xfree (ctl_conf_ptr->accounting_storage_host);
2881 	xfree (ctl_conf_ptr->accounting_storage_loc);
2882 	xfree (ctl_conf_ptr->accounting_storage_pass);
2883 	ctl_conf_ptr->accounting_storage_port             = 0;
2884 	xfree (ctl_conf_ptr->accounting_storage_tres);
2885 	xfree (ctl_conf_ptr->accounting_storage_type);
2886 	xfree (ctl_conf_ptr->accounting_storage_user);
2887 	xfree(ctl_conf_ptr->authalttypes);
2888 	xfree (ctl_conf_ptr->authinfo);
2889 	xfree (ctl_conf_ptr->authtype);
2890 	ctl_conf_ptr->batch_start_timeout	= 0;
2891 	xfree (ctl_conf_ptr->bb_type);
2892 	xfree(ctl_conf_ptr->cli_filter_plugins);
2893 	xfree (ctl_conf_ptr->cluster_name);
2894 	xfree (ctl_conf_ptr->comm_params);
2895 	ctl_conf_ptr->complete_wait		= NO_VAL16;
2896 	ctl_conf_ptr->conf_flags                = 0;
2897 	for (i = 0; i < ctl_conf_ptr->control_cnt; i++) {
2898 		xfree(ctl_conf_ptr->control_addr[i]);
2899 		xfree(ctl_conf_ptr->control_machine[i]);
2900 	}
2901 	ctl_conf_ptr->control_cnt = 0;
2902 	xfree (ctl_conf_ptr->control_addr);
2903 	xfree (ctl_conf_ptr->control_machine);
2904 	xfree (ctl_conf_ptr->core_spec_plugin);
2905 	xfree (ctl_conf_ptr->cred_type);
2906 	ctl_conf_ptr->def_mem_per_cpu           = 0;
2907 	ctl_conf_ptr->debug_flags		= 0;
2908 	xfree (ctl_conf_ptr->dependency_params);
2909 	ctl_conf_ptr->acct_gather_node_freq	= 0;
2910 	xfree (ctl_conf_ptr->acct_gather_energy_type);
2911 	xfree (ctl_conf_ptr->acct_gather_profile_type);
2912 	xfree (ctl_conf_ptr->acct_gather_interconnect_type);
2913 	xfree (ctl_conf_ptr->acct_gather_filesystem_type);
2914 	ctl_conf_ptr->ext_sensors_freq		= 0;
2915 	xfree (ctl_conf_ptr->ext_sensors_type);
2916 	ctl_conf_ptr->enforce_part_limits       = 0;
2917 	xfree (ctl_conf_ptr->epilog);
2918 	ctl_conf_ptr->epilog_msg_time		= NO_VAL;
2919 	xfree(ctl_conf_ptr->fed_params);
2920 	ctl_conf_ptr->first_job_id		= NO_VAL;
2921 	ctl_conf_ptr->get_env_timeout		= 0;
2922 	xfree(ctl_conf_ptr->gres_plugins);
2923 	ctl_conf_ptr->group_time		= 0;
2924 	ctl_conf_ptr->group_force		= 0;
2925 	ctl_conf_ptr->hash_val			= NO_VAL;
2926 	ctl_conf_ptr->health_check_interval	= 0;
2927 	xfree(ctl_conf_ptr->health_check_program);
2928 	ctl_conf_ptr->inactive_limit		= NO_VAL16;
2929 	xfree (ctl_conf_ptr->job_acct_gather_freq);
2930 	xfree (ctl_conf_ptr->job_acct_gather_type);
2931 	xfree (ctl_conf_ptr->job_acct_gather_params);
2932 	xfree (ctl_conf_ptr->job_comp_loc);
2933 	xfree(ctl_conf_ptr->job_comp_params);
2934 	xfree (ctl_conf_ptr->job_comp_pass);
2935 	ctl_conf_ptr->job_comp_port             = 0;
2936 	xfree (ctl_conf_ptr->job_comp_type);
2937 	xfree (ctl_conf_ptr->job_comp_user);
2938 	xfree (ctl_conf_ptr->job_container_plugin);
2939 	xfree (ctl_conf_ptr->job_credential_private_key);
2940 	xfree (ctl_conf_ptr->job_credential_public_certificate);
2941 	FREE_NULL_LIST(ctl_conf_ptr->job_defaults_list);
2942 	ctl_conf_ptr->job_file_append		= NO_VAL16;
2943 	ctl_conf_ptr->job_requeue		= NO_VAL16;
2944 	xfree(ctl_conf_ptr->job_submit_plugins);
2945 	ctl_conf_ptr->keep_alive_time		= NO_VAL16;
2946 	ctl_conf_ptr->kill_on_bad_exit		= 0;
2947 	ctl_conf_ptr->kill_wait			= NO_VAL16;
2948 	xfree (ctl_conf_ptr->launch_params);
2949 	xfree (ctl_conf_ptr->launch_type);
2950 	xfree (ctl_conf_ptr->layouts);
2951 	xfree (ctl_conf_ptr->licenses);
2952 	xfree (ctl_conf_ptr->mail_domain);
2953 	xfree (ctl_conf_ptr->mail_prog);
2954 	ctl_conf_ptr->max_array_sz		= NO_VAL;
2955 	ctl_conf_ptr->max_dbd_msgs		= 0;
2956 	ctl_conf_ptr->max_job_cnt		= NO_VAL;
2957 	ctl_conf_ptr->max_job_id		= NO_VAL;
2958 	ctl_conf_ptr->max_mem_per_cpu           = 0;
2959 	ctl_conf_ptr->max_step_cnt		= NO_VAL;
2960 	xfree(ctl_conf_ptr->mcs_plugin);
2961 	xfree(ctl_conf_ptr->mcs_plugin_params);
2962 	ctl_conf_ptr->job_acct_oom_kill         = false;
2963 	ctl_conf_ptr->min_job_age = NO_VAL;
2964 	xfree (ctl_conf_ptr->mpi_default);
2965 	xfree (ctl_conf_ptr->mpi_params);
2966 	xfree (ctl_conf_ptr->msg_aggr_params);
2967 	ctl_conf_ptr->msg_timeout		= NO_VAL16;
2968 	ctl_conf_ptr->next_job_id		= NO_VAL;
2969 	xfree(ctl_conf_ptr->node_features_plugins);
2970 	xfree (ctl_conf_ptr->node_prefix);
2971 	ctl_conf_ptr->over_time_limit           = 0;
2972 	xfree (ctl_conf_ptr->plugindir);
2973 	xfree (ctl_conf_ptr->plugstack);
2974 	xfree (ctl_conf_ptr->power_parameters);
2975 	xfree (ctl_conf_ptr->power_plugin);
2976 	ctl_conf_ptr->preempt_mode              = 0;
2977 	xfree (ctl_conf_ptr->preempt_type);
2978 	xfree (ctl_conf_ptr->priority_params);
2979 	xfree (ctl_conf_ptr->priority_type);
2980 	xfree (ctl_conf_ptr->priority_weight_tres);
2981 	ctl_conf_ptr->private_data              = 0;
2982 	xfree (ctl_conf_ptr->proctrack_type);
2983 	xfree (ctl_conf_ptr->prolog);
2984 	ctl_conf_ptr->prolog_flags				= 0;
2985 	ctl_conf_ptr->propagate_prio_process	= NO_VAL16;
2986 	xfree (ctl_conf_ptr->propagate_rlimits);
2987 	xfree (ctl_conf_ptr->propagate_rlimits_except);
2988 	xfree (ctl_conf_ptr->reboot_program);
2989 	ctl_conf_ptr->reconfig_flags		= 0;
2990 	xfree(ctl_conf_ptr->requeue_exit);
2991 	xfree(ctl_conf_ptr->requeue_exit_hold);
2992 	ctl_conf_ptr->resume_timeout		= 0;
2993 	xfree (ctl_conf_ptr->resume_fail_program);
2994 	xfree (ctl_conf_ptr->resume_program);
2995 	ctl_conf_ptr->resume_rate		= NO_VAL16;
2996 	xfree (ctl_conf_ptr->resv_epilog);
2997 	ctl_conf_ptr->resv_over_run		= 0;
2998 	xfree (ctl_conf_ptr->resv_prolog);
2999 	ctl_conf_ptr->ret2service		= NO_VAL16;
3000 	xfree (ctl_conf_ptr->route_plugin);
3001 	xfree( ctl_conf_ptr->salloc_default_command);
3002 	xfree( ctl_conf_ptr->sbcast_parameters);
3003 	xfree( ctl_conf_ptr->sched_params );
3004 	ctl_conf_ptr->sched_time_slice		= NO_VAL16;
3005 	xfree( ctl_conf_ptr->schedtype );
3006 	xfree( ctl_conf_ptr->select_type );
3007 	ctl_conf_ptr->select_type_param         = NO_VAL16;
3008 	ctl_conf_ptr->slurm_user_id		= NO_VAL16;
3009 	xfree (ctl_conf_ptr->slurm_user_name);
3010 	ctl_conf_ptr->slurmd_user_id		= NO_VAL16;
3011 	xfree (ctl_conf_ptr->slurmd_user_name);
3012 	ctl_conf_ptr->slurmctld_debug		= NO_VAL16;
3013 	xfree (ctl_conf_ptr->slurmctld_logfile);
3014 	ctl_conf_ptr->slurmctld_syslog_debug    = NO_VAL16;
3015 	xfree (ctl_conf_ptr->sched_logfile);
3016 	ctl_conf_ptr->sched_log_level		= NO_VAL16;
3017 	xfree (ctl_conf_ptr->slurmctld_addr);
3018 	xfree (ctl_conf_ptr->slurmctld_pidfile);
3019 	xfree (ctl_conf_ptr->slurmctld_plugstack);
3020 	ctl_conf_ptr->slurmctld_port		= NO_VAL;
3021 	ctl_conf_ptr->slurmctld_port_count	= 1;
3022 	xfree (ctl_conf_ptr->slurmctld_primary_off_prog);
3023 	xfree (ctl_conf_ptr->slurmctld_primary_on_prog);
3024 	ctl_conf_ptr->slurmctld_timeout		= NO_VAL16;
3025 	xfree (ctl_conf_ptr->slurmctld_params);
3026 	ctl_conf_ptr->slurmd_debug		= NO_VAL16;
3027 	xfree (ctl_conf_ptr->slurmd_logfile);
3028 	xfree (ctl_conf_ptr->slurmd_params);
3029 	ctl_conf_ptr->slurmd_syslog_debug       = NO_VAL16;
3030 	xfree (ctl_conf_ptr->slurmd_pidfile);
3031  	ctl_conf_ptr->slurmd_port		= NO_VAL;
3032 	xfree (ctl_conf_ptr->slurmd_spooldir);
3033 	ctl_conf_ptr->slurmd_timeout		= NO_VAL16;
3034 	xfree (ctl_conf_ptr->srun_prolog);
3035 	xfree (ctl_conf_ptr->srun_epilog);
3036 	xfree (ctl_conf_ptr->state_save_location);
3037 	xfree (ctl_conf_ptr->suspend_exc_nodes);
3038 	xfree (ctl_conf_ptr->suspend_exc_parts);
3039 	xfree (ctl_conf_ptr->suspend_program);
3040 	ctl_conf_ptr->suspend_rate		= NO_VAL16;
3041 	ctl_conf_ptr->suspend_time		= NO_VAL16;
3042 	ctl_conf_ptr->suspend_timeout		= 0;
3043 	xfree (ctl_conf_ptr->switch_type);
3044 	xfree (ctl_conf_ptr->task_epilog);
3045 	xfree (ctl_conf_ptr->task_plugin);
3046 	ctl_conf_ptr->task_plugin_param		= 0;
3047 	xfree (ctl_conf_ptr->task_prolog);
3048 	ctl_conf_ptr->tcp_timeout		= NO_VAL16;
3049 	xfree (ctl_conf_ptr->tmp_fs);
3050 	xfree (ctl_conf_ptr->topology_param);
3051 	xfree (ctl_conf_ptr->topology_plugin);
3052 	ctl_conf_ptr->tree_width       		= NO_VAL16;
3053 	xfree (ctl_conf_ptr->unkillable_program);
3054 	ctl_conf_ptr->unkillable_timeout        = NO_VAL16;
3055 	ctl_conf_ptr->vsize_factor              = 0;
3056 	ctl_conf_ptr->wait_time			= NO_VAL16;
3057 	xfree (ctl_conf_ptr->x11_params);
3058 	ctl_conf_ptr->prolog_epilog_timeout = NO_VAL16;
3059 
3060 	_free_name_hashtbl();
3061 	_init_name_hashtbl();
3062 
3063 	return;
3064 }
3065 
3066 /* caller must lock conf_lock */
_init_slurm_conf(const char * file_name)3067 static int _init_slurm_conf(const char *file_name)
3068 {
3069 	char *name = (char *)file_name;
3070 	int rc = SLURM_SUCCESS;
3071 
3072 	if (name == NULL) {
3073 		name = getenv("SLURM_CONF");
3074 		if (name == NULL)
3075 			name = default_slurm_config_file;
3076 	}
3077 	if (conf_initialized)
3078 		error("the conf_hashtbl is already inited");
3079 	debug("Reading slurm.conf file: %s", name);
3080 	conf_hashtbl = s_p_hashtbl_create(slurm_conf_options);
3081 	conf_ptr->last_update = time(NULL);
3082 
3083 	/* init hash to 0 */
3084 	conf_ptr->hash_val = 0;
3085 	rc = s_p_parse_file(conf_hashtbl, &conf_ptr->hash_val, name, false);
3086 	/* s_p_dump_values(conf_hashtbl, slurm_conf_options); */
3087 
3088 	if (_validate_and_set_defaults(conf_ptr, conf_hashtbl) == SLURM_ERROR)
3089 		rc = SLURM_ERROR;
3090 	conf_ptr->slurm_conf = xstrdup(name);
3091 
3092 	no_addr_cache = false;
3093 	if (xstrcasestr("NoAddrCache", conf_ptr->comm_params))
3094 		no_addr_cache = true;
3095 
3096 	conf_initialized = true;
3097 
3098 	return rc;
3099 }
3100 
3101 /* caller must lock conf_lock */
3102 static void
_destroy_slurm_conf(void)3103 _destroy_slurm_conf(void)
3104 {
3105 	if (plugstack_conf) {
3106 		xfree(plugstack_conf);
3107 		close(plugstack_fd);
3108 	}
3109 
3110 	if (topology_conf) {
3111 		xfree(topology_conf);
3112 		close(topology_fd);
3113 	}
3114 
3115 	s_p_hashtbl_destroy(conf_hashtbl);
3116 	if (default_frontend_tbl != NULL) {
3117 		s_p_hashtbl_destroy(default_frontend_tbl);
3118 		default_frontend_tbl = NULL;
3119 	}
3120 	if (default_nodename_tbl != NULL) {
3121 		s_p_hashtbl_destroy(default_nodename_tbl);
3122 		default_nodename_tbl = NULL;
3123 	}
3124 	if (default_partition_tbl != NULL) {
3125 		s_p_hashtbl_destroy(default_partition_tbl);
3126 		default_partition_tbl = NULL;
3127 	}
3128 	free_slurm_conf(conf_ptr, true);
3129 	conf_initialized = false;
3130 
3131 	/* xfree(conf_ptr); */
3132 }
3133 
3134 /*
3135  * Precedence order for user commands:
3136  *
3137  * 1. direct file
3138  *   a. argument if not NULL
3139  *   b. SLURM_CONF if not NULL
3140  *   c. default_slurm_config_file if it exists.
3141  *   d. /run/slurm/conf/slurm.conf if it exists.
3142  * 2. SLURM_CONF_SERVER env var (not documented, meant for testing only)
3143  * 3. DNS SRV record
3144  */
_establish_config_source(char ** config_file,int * memfd)3145 static int _establish_config_source(char **config_file, int *memfd)
3146 {
3147 	struct stat stat_buf;
3148 	config_response_msg_t *config = NULL;
3149 
3150 	/*
3151 	 * If config_file was defined (e.g., through the -f option to slurmd)
3152 	 * or the SLURM_CONF variable is set we will always respect those,
3153 	 * and leave s_p_parse_file() to see if it can actually load the file.
3154 	 */
3155 	if (*config_file) {
3156 		debug2("%s: using config_file=%s (provided)",
3157 		       __func__, *config_file);
3158 		return SLURM_SUCCESS;
3159 	}
3160 	if ((*config_file = xstrdup(getenv("SLURM_CONF")))) {
3161 		debug("%s: using config_file=%s (environment)",
3162 		      __func__, *config_file);
3163 		return SLURM_SUCCESS;
3164 	}
3165 
3166 	/*
3167 	 * Use default_slurm_config_file iff the file exists.
3168 	 * This is needed so the "configless" user commands do not get stuck
3169 	 * attempting to load a non-existent config file for an entire
3170 	 * minute in s_p_parse_file(), and we can fall back to our other
3171 	 * options.
3172 	 */
3173 	if (!stat(default_slurm_config_file, &stat_buf)) {
3174 		*config_file = xstrdup(default_slurm_config_file);
3175 		debug2("%s: using config_file=%s (default)",
3176 		       __func__, *config_file);
3177 		return SLURM_SUCCESS;
3178 	}
3179 
3180 	/*
3181 	 * Check /run for a usable symlink. This will only exist if slurmd
3182 	 * is running in configless mode.
3183 	 */
3184 	if (!stat("/run/slurm/conf/slurm.conf", &stat_buf)) {
3185 		*config_file = xstrdup("/run/slurm/conf/slurm.conf");
3186 		debug2("%s: using config_file=%s (cached)",
3187 		       __func__, *config_file);
3188 		return SLURM_SUCCESS;
3189 	}
3190 
3191 	/*
3192 	 * One last shot - try the SLURM_CONF_SERVER envvar or DNS SRV
3193 	 * entries to fetch the configs from the slurmctld.
3194 	 */
3195 	if (!(config = fetch_config(NULL, CONFIG_REQUEST_SLURM_CONF)) ||
3196 	    !config->config) {
3197 		error("%s: failed to fetch config", __func__);
3198 		return SLURM_ERROR;
3199 	}
3200 
3201 	/*
3202 	 * memfd is always created successfully as any failure causes the
3203 	 * process to die with a fatal() error.
3204 	 */
3205 	*memfd = dump_to_memfd("slurm.conf", config->config, config_file);
3206 	/*
3207 	 * If we've been handed a plugstack.conf or topology.conf file then
3208 	 * slurmctld thinks we'll need it. Stash it in case of an eventual
3209 	 * spank_stack_init() / slurm_topo_init().
3210 	 */
3211 	if (config->plugstack_config)
3212 		plugstack_fd = dump_to_memfd("plugstack.conf",
3213 					     config->plugstack_config,
3214 					     &plugstack_conf);
3215 	if (config->topology_config)
3216 		topology_fd = dump_to_memfd("topology.conf",
3217 					    config->topology_config,
3218 					    &topology_conf);
3219 	slurm_free_config_response_msg(config);
3220 	debug2("%s: using config_file=%s (fetched)", __func__, *config_file);
3221 
3222 	return SLURM_SUCCESS;
3223 }
3224 
3225 /*
3226  * slurm_conf_init - load the slurm configuration from the a file.
3227  * IN file_name - name of the slurm configuration file to be read
3228  *	If file_name is NULL, then this routine tries to use
3229  *	the value in the SLURM_CONF env variable.  Failing that,
3230  *	it uses the compiled-in default file name.
3231  *	If the conf structures have already been initialized by a call to
3232  *	slurm_conf_init, any subsequent calls will do nothing until
3233  *	slurm_conf_destroy is called.
3234  * RET SLURM_SUCCESS if conf file is initialized.  If the slurm conf
3235  *       was already initialied, return SLURM_ERROR.
3236  */
3237 extern int
slurm_conf_init(const char * file_name)3238 slurm_conf_init(const char *file_name)
3239 {
3240 	char *config_file;
3241 	int memfd = -1;
3242 	slurm_mutex_lock(&conf_lock);
3243 
3244 	if (conf_initialized) {
3245 		slurm_mutex_unlock(&conf_lock);
3246 		return SLURM_ERROR;
3247 	}
3248 
3249 	config_file = xstrdup(file_name);
3250 	if (_establish_config_source(&config_file, &memfd)) {
3251 		log_var(lvl, "Could not establish a configuration source");
3252 		xfree(config_file);
3253 		return SLURM_ERROR;
3254 	}
3255 	debug("%s: using config_file=%s", __func__, config_file);
3256 
3257 	/*
3258 	 * Ensure this determination is propagated throughout. A number of
3259 	 * other internal functions will call getenv("SLURM_CONF") rather
3260 	 * than use slurmctld_conf.slurm_conf, and we want to ensure they
3261 	 * don't need to make similar decisions on where the configs live.
3262 	 */
3263 	setenv("SLURM_CONF", config_file, 1);
3264 
3265 #ifndef NDEBUG
3266 	/*
3267 	 * This is done here to ensure all user commands parse this once at
3268 	 * launch, rather than trying to test this during each RPC call.
3269 	 * This environment variable is undocumented, and only
3270 	 * respected in development builds. When set, the remote end
3271 	 * will treat the request as if it was issued by an unprivileged
3272 	 * user account rather than the (likely elevated) privileges that
3273 	 * the account usually operates under. This makes it possible to
3274 	 * test various access controls while running the testsuite under
3275 	 * a single user account.
3276 	 */
3277 	if (getenv("SLURM_TESTSUITE_DROP_PRIV"))
3278 		drop_priv_flag = SLURM_DROP_PRIV;
3279 #endif
3280 
3281 	init_slurm_conf(conf_ptr);
3282 	if (_init_slurm_conf(config_file) != SLURM_SUCCESS) {
3283 		log_var(lvl, "Unable to process configuration file");
3284 		local_test_config_rc = 1;
3285 	}
3286 
3287 	if (memfd != -1) {
3288 		unsetenv("SLURM_CONF");
3289 		close(memfd);
3290 	}
3291 	slurm_mutex_unlock(&conf_lock);
3292 	xfree(config_file);
3293 	return SLURM_SUCCESS;
3294 }
3295 
_internal_reinit(const char * file_name)3296 static int _internal_reinit(const char *file_name)
3297 {
3298 	char *name = (char *)file_name;
3299 	int rc = SLURM_SUCCESS;
3300 
3301 	if (name == NULL) {
3302 		name = getenv("SLURM_CONF");
3303 		if (name == NULL)
3304 			name = default_slurm_config_file;
3305 	}
3306 
3307 	if (conf_initialized) {
3308 		/* could check modified time on slurm.conf here */
3309 		_destroy_slurm_conf();
3310 	}
3311 
3312 	if (_init_slurm_conf(name) != SLURM_SUCCESS) {
3313 		log_var(lvl, "Unable to process configuration file");
3314 		local_test_config_rc = 1;
3315 	}
3316 
3317 
3318 	return rc;
3319 }
3320 
3321 /*
3322  * slurm_conf_reinit - reload the slurm configuration from a file.
3323  * IN file_name - name of the slurm configuration file to be read
3324  *	If file_name is NULL, then this routine tries to use
3325  *	the value in the SLURM_CONF env variable.  Failing that,
3326  *	it uses the compiled-in default file name.
3327  *	Unlike slurm_conf_init, slurm_conf_reinit will always reread the
3328  *	file and reinitialize the configuration structures.
3329  * RET SLURM_SUCCESS if conf file is reinitialized, otherwise SLURM_ERROR.
3330  */
3331 extern int
slurm_conf_reinit(const char * file_name)3332 slurm_conf_reinit(const char *file_name)
3333 {
3334 	int rc;
3335 
3336 	slurm_mutex_lock(&conf_lock);
3337 	rc = _internal_reinit(file_name);
3338 	slurm_mutex_unlock(&conf_lock);
3339 
3340 	return rc;
3341 }
3342 
3343 extern void
slurm_conf_mutex_init(void)3344 slurm_conf_mutex_init(void)
3345 {
3346 	slurm_mutex_init(&conf_lock);
3347 }
3348 
3349 extern void
slurm_conf_install_fork_handlers(void)3350 slurm_conf_install_fork_handlers(void)
3351 {
3352 	int err;
3353 	if ((err = pthread_atfork(NULL, NULL, &slurm_conf_mutex_init)))
3354 		fatal("can't install slurm_conf atfork handler");
3355 	return;
3356 }
3357 
3358 extern int
slurm_conf_destroy(void)3359 slurm_conf_destroy(void)
3360 {
3361 	slurm_mutex_lock(&conf_lock);
3362 
3363 	if (!conf_initialized) {
3364 		slurm_mutex_unlock(&conf_lock);
3365 		return SLURM_SUCCESS;
3366 	}
3367 
3368 	_destroy_slurm_conf();
3369 
3370 	slurm_mutex_unlock(&conf_lock);
3371 
3372 	return SLURM_SUCCESS;
3373 }
3374 
3375 extern slurm_ctl_conf_t *
slurm_conf_lock(void)3376 slurm_conf_lock(void)
3377 {
3378 	int i;
3379 
3380 	slurm_mutex_lock(&conf_lock);
3381 	if (!conf_initialized) {
3382 		if (_init_slurm_conf(NULL) != SLURM_SUCCESS) {
3383 			/*
3384 			 * Clearing control_addr array entries results in
3385 			 * error for most APIs without generating a fatal
3386 			 * error and exiting. Slurm commands and daemons
3387 			 * should call slurm_conf_init() to get a fatal
3388 			 * error instead.
3389 			 */
3390 			for (i = 0; i < conf_ptr->control_cnt; i++)
3391 				xfree(conf_ptr->control_addr[i]);
3392 			xfree(conf_ptr->control_addr);
3393 			conf_ptr->control_cnt = 0;
3394 		}
3395 	}
3396 
3397 	return conf_ptr;
3398 }
3399 
3400 extern void
slurm_conf_unlock(void)3401 slurm_conf_unlock(void)
3402 {
3403 	slurm_mutex_unlock(&conf_lock);
3404 }
3405 
3406 /* Normalize supplied debug level to be in range per log.h definitions */
_normalize_debug_level(uint16_t * level)3407 static void _normalize_debug_level(uint16_t *level)
3408 {
3409 	if (*level > LOG_LEVEL_END) {
3410 		error("Normalizing debug level from %u to %d",
3411 		      *level, (LOG_LEVEL_END - 1));
3412 		*level = (LOG_LEVEL_END - 1);
3413 	}
3414 	/* level is uint16, always > LOG_LEVEL_QUIET(0), can't underflow */
3415 }
3416 
3417 /* Convert HealthCheckNodeState string to numeric value */
_health_node_state(char * state_str)3418 static uint16_t _health_node_state(char *state_str)
3419 {
3420 	uint16_t state_num = 0;
3421 	char *tmp_str = xstrdup(state_str);
3422 	char *token, *last = NULL;
3423 	bool state_set = false;
3424 
3425 	token = strtok_r(tmp_str, ",", &last);
3426 	while (token) {
3427 		if (!xstrcasecmp(token, "ANY")) {
3428 			state_num |= HEALTH_CHECK_NODE_ANY;
3429 			state_set = true;
3430 		} else if (!xstrcasecmp(token, "ALLOC")) {
3431 			state_num |= HEALTH_CHECK_NODE_ALLOC;
3432 			state_set = true;
3433 		} else if (!xstrcasecmp(token, "CYCLE")) {
3434 			state_num |= HEALTH_CHECK_CYCLE;
3435 		} else if (!xstrcasecmp(token, "IDLE")) {
3436 			state_num |= HEALTH_CHECK_NODE_IDLE;
3437 			state_set = true;
3438 		} else if (!xstrcasecmp(token, "MIXED")) {
3439 			state_num |= HEALTH_CHECK_NODE_MIXED;
3440 			state_set = true;
3441 		} else {
3442 			error("Invalid HealthCheckNodeState value %s ignored",
3443 			      token);
3444 		}
3445 		token = strtok_r(NULL, ",", &last);
3446 	}
3447 	if (!state_set)
3448 		state_num |= HEALTH_CHECK_NODE_ANY;
3449 	xfree(tmp_str);
3450 
3451 	return state_num;
3452 }
3453 
3454 /* Return TRUE if a comma-delimited token "hbm" is found */
_have_hbm_token(char * gres_plugins)3455 static bool _have_hbm_token(char *gres_plugins)
3456 {
3457 	char *tmp, *tok, *save_ptr = NULL;
3458 	bool rc = false;
3459 
3460 	if (!gres_plugins)
3461 		return false;
3462 
3463 	tmp = xstrdup(gres_plugins);
3464 	tok = strtok_r(tmp, ",", &save_ptr);
3465 	while (tok) {
3466 		if (!xstrcasecmp(tok, "hbm")) {
3467 			rc = true;
3468 			break;
3469 		}
3470 		tok = strtok_r(NULL, ",", &save_ptr);
3471 	}
3472 	xfree(tmp);
3473 
3474 	return rc;
3475 }
3476 
3477 /*
3478  *
3479  * IN/OUT ctl_conf_ptr - a configuration as loaded by read_slurm_conf_ctl
3480  *
3481  * NOTE: a backup_controller or control_machine of "localhost" are over-written
3482  *	with this machine's name.
3483  * NOTE: if control_addr is NULL, it is over-written by control_machine
3484  */
3485 static int
_validate_and_set_defaults(slurm_ctl_conf_t * conf,s_p_hashtbl_t * hashtbl)3486 _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
3487 {
3488 	char *temp_str = NULL;
3489 	long long_suspend_time;
3490 	bool truth;
3491 	char *default_storage_type = NULL, *default_storage_host = NULL;
3492 	char *default_storage_user = NULL, *default_storage_pass = NULL;
3493 	char *default_storage_loc = NULL;
3494 	uint32_t default_storage_port = 0;
3495 	uint16_t uint16_tmp;
3496 	uint64_t def_cpu_per_gpu = 0, def_mem_per_gpu = 0, tot_prio_weight;
3497 	job_defaults_t *job_defaults;
3498 	int i;
3499 
3500 	if (!s_p_get_uint16(&conf->batch_start_timeout, "BatchStartTimeout",
3501 			    hashtbl))
3502 		conf->batch_start_timeout = DEFAULT_BATCH_START_TIMEOUT;
3503 
3504 	s_p_get_string(&conf->cluster_name, "ClusterName", hashtbl);
3505 	/*
3506 	 * Some databases are case sensitive so we have to make sure
3507 	 * the cluster name is lower case since sacctmgr makes sure
3508 	 * this is the case as well.
3509 	 */
3510 	if (conf->cluster_name && *conf->cluster_name) {
3511 		for (i = 0; conf->cluster_name[i] != '\0'; i++)
3512 			conf->cluster_name[i] =
3513 				(char)tolower((int)conf->cluster_name[i]);
3514 	} else {
3515 		error("ClusterName needs to be specified");
3516 		return SLURM_ERROR;
3517 	}
3518 
3519 	if (!s_p_get_uint16(&conf->complete_wait, "CompleteWait", hashtbl))
3520 		conf->complete_wait = DEFAULT_COMPLETE_WAIT;
3521 
3522 	if (_load_slurmctld_host(conf))
3523 		return SLURM_ERROR;
3524 
3525 	if (!s_p_get_string(&conf->acct_gather_energy_type,
3526 			    "AcctGatherEnergyType", hashtbl))
3527 		conf->acct_gather_energy_type =
3528 			xstrdup(DEFAULT_ACCT_GATHER_ENERGY_TYPE);
3529 
3530 	if (!s_p_get_string(&conf->acct_gather_profile_type,
3531 			    "AcctGatherProfileType", hashtbl))
3532 		conf->acct_gather_profile_type =
3533 			xstrdup(DEFAULT_ACCT_GATHER_PROFILE_TYPE);
3534 
3535 	if (!s_p_get_string(&conf->acct_gather_interconnect_type,
3536 			    "AcctGatherInterconnectType", hashtbl) &&
3537 	    !s_p_get_string(&conf->acct_gather_interconnect_type,
3538 			    "AcctGatherInfinibandType", hashtbl))
3539 		conf->acct_gather_interconnect_type =
3540 			xstrdup(DEFAULT_ACCT_GATHER_INTERCONNECT_TYPE);
3541 	else
3542 		xstrsubstituteall(conf->acct_gather_interconnect_type,
3543 				  "infiniband", "interconnect");
3544 
3545 	if (!s_p_get_string(&conf->acct_gather_filesystem_type,
3546 			   "AcctGatherFilesystemType", hashtbl))
3547 		conf->acct_gather_filesystem_type =
3548 			xstrdup(DEFAULT_ACCT_GATHER_FILESYSTEM_TYPE);
3549 
3550 	if (!s_p_get_uint16(&conf->acct_gather_node_freq,
3551 			    "AcctGatherNodeFreq", hashtbl))
3552 		conf->acct_gather_node_freq = 0;
3553 
3554 	conf->conf_flags = 0;
3555 	if (s_p_get_boolean(&truth, "AllowSpecResourcesUsage", hashtbl)) {
3556 		if (truth)
3557 			conf->conf_flags |= CTL_CONF_ASRU;
3558 	} else if (DEFAULT_ALLOW_SPEC_RESOURCE_USAGE)
3559 		conf->conf_flags |= CTL_CONF_ASRU;
3560 
3561 	(void) s_p_get_string(&default_storage_type, "DefaultStorageType",
3562 			      hashtbl);
3563 	(void) s_p_get_string(&default_storage_host, "DefaultStorageHost",
3564 			      hashtbl);
3565 	(void) s_p_get_string(&default_storage_user, "DefaultStorageUser",
3566 			      hashtbl);
3567 	(void) s_p_get_string(&default_storage_pass, "DefaultStoragePass",
3568 			      hashtbl);
3569 	(void) s_p_get_string(&default_storage_loc,  "DefaultStorageLoc",
3570 			      hashtbl);
3571 	(void) s_p_get_uint32(&default_storage_port, "DefaultStoragePort",
3572 			      hashtbl);
3573 	(void) s_p_get_string(&conf->job_credential_private_key,
3574 			     "JobCredentialPrivateKey", hashtbl);
3575 	(void) s_p_get_string(&conf->job_credential_public_certificate,
3576 			      "JobCredentialPublicCertificate", hashtbl);
3577 
3578 	(void) s_p_get_string(&conf->authalttypes, "AuthAltTypes", hashtbl);
3579 
3580 	(void) s_p_get_string(&conf->authinfo, "AuthInfo", hashtbl);
3581 
3582 	if (!s_p_get_string(&conf->authtype, "AuthType", hashtbl))
3583 		conf->authtype = xstrdup(DEFAULT_AUTH_TYPE);
3584 
3585 	(void) s_p_get_string(&conf->bb_type, "BurstBufferType", hashtbl);
3586 
3587 	if (s_p_get_uint16(&uint16_tmp, "CacheGroups", hashtbl))
3588 		debug("Ignoring obsolete CacheGroups option.");
3589 
3590 	(void) s_p_get_string(&conf->comm_params, "CommunicationParameters",
3591 			      hashtbl);
3592 
3593 	if (!s_p_get_string(&conf->core_spec_plugin, "CoreSpecPlugin",
3594 	    hashtbl)) {
3595 		conf->core_spec_plugin =
3596 			xstrdup(DEFAULT_CORE_SPEC_PLUGIN);
3597 	}
3598 
3599 	if (s_p_get_string(&temp_str, "CheckpointType", hashtbl)) {
3600 		xfree(temp_str);
3601 		debug("Ignoring obsolete CheckpointType option.");
3602 	}
3603 
3604 	(void) s_p_get_string(&conf->cli_filter_plugins, "CliFilterPlugins",
3605 			      hashtbl);
3606 
3607 	if (s_p_get_string(&temp_str, "CpuFreqDef", hashtbl)) {
3608 		if (cpu_freq_verify_def(temp_str, &conf->cpu_freq_def)) {
3609 			error("Ignoring invalid CpuFreqDef: %s", temp_str);
3610 			conf->cpu_freq_def = NO_VAL;
3611 		}
3612 		xfree(temp_str);
3613 	} else {
3614 		conf->cpu_freq_def = NO_VAL;
3615 	}
3616 
3617 	if (s_p_get_string(&temp_str, "CpuFreqGovernors", hashtbl)) {
3618 		if (cpu_freq_verify_govlist(temp_str, &conf->cpu_freq_govs)) {
3619 			error("Ignoring invalid CpuFreqGovernors: %s",
3620 				temp_str);
3621 			conf->cpu_freq_govs = CPU_FREQ_ONDEMAND    |
3622 					      CPU_FREQ_PERFORMANCE |
3623 					      CPU_FREQ_USERSPACE;
3624 		}
3625 		xfree(temp_str);
3626 	} else {
3627 		conf->cpu_freq_govs = CPU_FREQ_ONDEMAND | CPU_FREQ_PERFORMANCE |
3628 				      CPU_FREQ_USERSPACE;
3629 	}
3630 
3631 	if (!s_p_get_string(&conf->cred_type, "CredType", hashtbl)) {
3632 		if (s_p_get_string(&conf->cred_type, "CryptoType", hashtbl)) {
3633 			/* swap crypto/ for cred/ */
3634 			xstrsubstitute(conf->cred_type, "crypto", "cred");
3635 		} else
3636 			 conf->cred_type = xstrdup(DEFAULT_CRED_TYPE);
3637 	}
3638 
3639 	conf->def_mem_per_cpu = 0;
3640 	if (s_p_get_uint64(&conf->def_mem_per_cpu, "DefMemPerCPU", hashtbl))
3641 		conf->def_mem_per_cpu |= MEM_PER_CPU;
3642 	else if (!s_p_get_uint64(&conf->def_mem_per_cpu, "DefMemPerNode",
3643 				 hashtbl))
3644 		conf->def_mem_per_cpu = DEFAULT_MEM_PER_CPU;
3645 
3646 	if (s_p_get_uint64(&def_cpu_per_gpu, "DefCPUPerGPU", hashtbl)) {
3647 		job_defaults = xmalloc(sizeof(job_defaults_t));
3648 		job_defaults->type  = JOB_DEF_CPU_PER_GPU;
3649 		job_defaults->value = def_cpu_per_gpu;
3650 		if (!conf->job_defaults_list) {
3651 			conf->job_defaults_list = list_create(xfree_ptr);
3652 		}
3653 		list_append(conf->job_defaults_list, job_defaults);
3654 	}
3655 
3656 	if (s_p_get_uint64(&def_mem_per_gpu, "DefMemPerGPU", hashtbl)) {
3657 		job_defaults = xmalloc(sizeof(job_defaults_t));
3658 		job_defaults->type  = JOB_DEF_MEM_PER_GPU;
3659 		job_defaults->value = def_mem_per_gpu;
3660 		if (!conf->job_defaults_list) {
3661 			conf->job_defaults_list = list_create(xfree_ptr);
3662 		}
3663 		list_append(conf->job_defaults_list, job_defaults);
3664 	}
3665 
3666 	if (s_p_get_string(&temp_str, "DebugFlags", hashtbl)) {
3667 		if (debug_str2flags(temp_str, &conf->debug_flags)
3668 		    != SLURM_SUCCESS) {
3669 			error("DebugFlags invalid: %s", temp_str);
3670 			return SLURM_ERROR;
3671 		}
3672 		xfree(temp_str);
3673 	} else	/* Default: no DebugFlags */
3674 		conf->debug_flags = 0;
3675 
3676 	(void) s_p_get_string(&conf->dependency_params,
3677 			      "DependencyParameters", hashtbl);
3678 
3679 	if (s_p_get_boolean(&truth, "DisableRootJobs", hashtbl) && truth)
3680 		conf->conf_flags |= CTL_CONF_DRJ;
3681 
3682 	if (s_p_get_string(&temp_str,
3683 			   "EnforcePartLimits", hashtbl)) {
3684 		uint16_t enforce_param;
3685 		if (parse_part_enforce_type(temp_str, &enforce_param) < 0) {
3686 			error("Bad EnforcePartLimits: %s", temp_str);
3687 			xfree(temp_str);
3688 			return SLURM_ERROR;
3689 		}
3690 		xfree(temp_str);
3691 		conf->enforce_part_limits = enforce_param;
3692 	} else {
3693 		conf->enforce_part_limits = DEFAULT_ENFORCE_PART_LIMITS;
3694 	}
3695 
3696 	(void) s_p_get_string(&conf->epilog, "Epilog", hashtbl);
3697 
3698 	if (!s_p_get_uint32(&conf->epilog_msg_time, "EpilogMsgTime", hashtbl))
3699 		conf->epilog_msg_time = DEFAULT_EPILOG_MSG_TIME;
3700 
3701 	(void) s_p_get_string(&conf->epilog_slurmctld, "EpilogSlurmctld",
3702 			      hashtbl);
3703 
3704 	if (!s_p_get_string(&conf->ext_sensors_type,
3705 			    "ExtSensorsType", hashtbl))
3706 		conf->ext_sensors_type =
3707 			xstrdup(DEFAULT_EXT_SENSORS_TYPE);
3708 
3709 	if (!s_p_get_uint16(&conf->ext_sensors_freq,
3710 			    "ExtSensorsFreq", hashtbl))
3711 		conf->ext_sensors_freq = 0;
3712 
3713 	if (!s_p_get_uint16(&conf->fs_dampening_factor,
3714 			    "FairShareDampeningFactor", hashtbl))
3715 		conf->fs_dampening_factor = 1;
3716 
3717 	if (s_p_get_uint16(&uint16_tmp, "FastSchedule", hashtbl) &&
3718 	    running_in_slurmctld()) {
3719 		if (uint16_tmp == 1)
3720 			error("Ignoring obsolete FastSchedule=1 option. Please remove from your configuration.");
3721 		else if (uint16_tmp == 2)
3722 			fatal("The FastSchedule option has been removed. The FastSchedule=2 functionality is available through the SlurmdParameters=config_overrides option.");
3723 		else
3724 			fatal("The FastSchedule option has been removed. Please update your configuration.");
3725 	}
3726 
3727 	(void) s_p_get_string(&conf->fed_params, "FederationParameters",
3728 			      hashtbl);
3729 
3730 	if (!s_p_get_uint32(&conf->first_job_id, "FirstJobId", hashtbl))
3731 		conf->first_job_id = DEFAULT_FIRST_JOB_ID;
3732 
3733 	(void) s_p_get_string(&conf->gres_plugins, "GresTypes", hashtbl);
3734 
3735 	if (!s_p_get_uint16(&conf->group_force, "GroupUpdateForce", hashtbl))
3736 		conf->group_force = DEFAULT_GROUP_FORCE;
3737 
3738 	if (!s_p_get_uint16(&conf->group_time, "GroupUpdateTime", hashtbl))
3739 		conf->group_time = DEFAULT_GROUP_TIME;
3740 
3741 	if (!s_p_get_string(&conf->gpu_freq_def, "GpuFreqDef", hashtbl))
3742 		conf->gpu_freq_def = xstrdup("high,memory=high");
3743 
3744 	if (!s_p_get_uint16(&conf->inactive_limit, "InactiveLimit", hashtbl))
3745 		conf->inactive_limit = DEFAULT_INACTIVE_LIMIT;
3746 
3747 	if (!s_p_get_string(&conf->job_acct_gather_freq,
3748 			    "JobAcctGatherFrequency", hashtbl))
3749 		conf->job_acct_gather_freq =
3750 			xstrdup(DEFAULT_JOB_ACCT_GATHER_FREQ);
3751 
3752 	if (!s_p_get_string(&conf->job_acct_gather_type,
3753 			   "JobAcctGatherType", hashtbl))
3754 		conf->job_acct_gather_type =
3755 			xstrdup(DEFAULT_JOB_ACCT_GATHER_TYPE);
3756 
3757 	(void) s_p_get_string(&conf->job_acct_gather_params,
3758 			     "JobAcctGatherParams", hashtbl);
3759 
3760 	conf->job_acct_oom_kill = false;
3761 	if (conf->job_acct_gather_params) {
3762 		char *save_ptr = NULL;
3763 		char *tmp = xstrdup(conf->job_acct_gather_params);
3764 		char *tok = strtok_r(tmp, ",", &save_ptr);
3765 
3766 		while (tok) {
3767 			if (xstrcasecmp(tok, "OverMemoryKill") == 0) {
3768 				conf->job_acct_oom_kill = true;
3769 				break;
3770 			}
3771 			tok = strtok_r(NULL, ",", &save_ptr);
3772 		}
3773 		xfree(tmp);
3774 	}
3775 
3776 	if (s_p_get_string(&temp_str, "JobCheckpointDir", hashtbl)) {
3777 		xfree(temp_str);
3778 		debug("Ignoring obsolete JobCheckpointDir option.");
3779 	}
3780 
3781 	if (s_p_get_string(&temp_str, "MemLimitEnforce", hashtbl)) {
3782 		fatal("Invalid parameter MemLimitEnforce. The option is no longer supported, please use OverMemoryKill instead.");
3783 	}
3784 
3785 	if (!s_p_get_string(&conf->job_comp_type, "JobCompType", hashtbl)) {
3786 		if (default_storage_type) {
3787 			if (!xstrcasecmp("slurmdbd", default_storage_type)) {
3788 				error("Can not use the default storage type "
3789 				      "specified for jobcomp since there is "
3790 				      "not slurmdbd type.  We are using %s "
3791 				      "as the type. To disable this message "
3792 				      "set JobCompType in your slurm.conf",
3793 				      DEFAULT_JOB_COMP_TYPE);
3794 				conf->job_comp_type =
3795 					xstrdup(DEFAULT_JOB_COMP_TYPE);
3796 			} else
3797 				conf->job_comp_type =
3798 					xstrdup_printf("jobcomp/%s",
3799 						       default_storage_type);
3800 		} else
3801 			conf->job_comp_type = xstrdup(DEFAULT_JOB_COMP_TYPE);
3802 	}
3803 	if (!s_p_get_string(&conf->job_comp_loc, "JobCompLoc", hashtbl)) {
3804 		if (default_storage_loc)
3805 			conf->job_comp_loc = xstrdup(default_storage_loc);
3806 		else if (!xstrcmp(conf->job_comp_type, "jobcomp/mysql"))
3807 			conf->job_comp_loc = xstrdup(DEFAULT_JOB_COMP_DB);
3808 		else
3809 			conf->job_comp_loc = xstrdup(DEFAULT_JOB_COMP_LOC);
3810 	}
3811 
3812 	if (!s_p_get_string(&conf->job_comp_host, "JobCompHost",
3813 			    hashtbl)) {
3814 		if (default_storage_host)
3815 			conf->job_comp_host = xstrdup(default_storage_host);
3816 		else
3817 			conf->job_comp_host = xstrdup(DEFAULT_STORAGE_HOST);
3818 	}
3819 	if (!s_p_get_string(&conf->job_comp_user, "JobCompUser",
3820 			    hashtbl)) {
3821 		if (default_storage_user)
3822 			conf->job_comp_user = xstrdup(default_storage_user);
3823 		else
3824 			conf->job_comp_user = xstrdup(DEFAULT_STORAGE_USER);
3825 	}
3826 	s_p_get_string(&conf->job_comp_params, "JobCompParams", hashtbl);
3827 	if (!s_p_get_string(&conf->job_comp_pass, "JobCompPass",
3828 			    hashtbl)) {
3829 		if (default_storage_pass)
3830 			conf->job_comp_pass = xstrdup(default_storage_pass);
3831 	}
3832 	if (!s_p_get_uint32(&conf->job_comp_port, "JobCompPort",
3833 			    hashtbl)) {
3834 		if (default_storage_port)
3835 			conf->job_comp_port = default_storage_port;
3836 		else if (!xstrcmp(conf->job_comp_type, "job_comp/mysql"))
3837 			conf->job_comp_port = DEFAULT_MYSQL_PORT;
3838 		else
3839 			conf->job_comp_port = DEFAULT_STORAGE_PORT;
3840 	}
3841 
3842 
3843 	if (!s_p_get_string(&conf->job_container_plugin, "JobContainerType",
3844 	    hashtbl)) {
3845 		conf->job_container_plugin =
3846 			xstrdup(DEFAULT_JOB_CONTAINER_PLUGIN);
3847 	}
3848 
3849 	if (!s_p_get_uint16(&conf->job_file_append, "JobFileAppend", hashtbl))
3850 		conf->job_file_append = 0;
3851 
3852 	if (!s_p_get_uint16(&conf->job_requeue, "JobRequeue", hashtbl))
3853 		conf->job_requeue = 1;
3854 	else if (conf->job_requeue > 1)
3855 		conf->job_requeue = 1;
3856 
3857 	(void) s_p_get_string(&conf->job_submit_plugins, "JobSubmitPlugins",
3858 			      hashtbl);
3859 
3860 	if (!s_p_get_uint16(&conf->get_env_timeout, "GetEnvTimeout", hashtbl))
3861 		conf->get_env_timeout = DEFAULT_GET_ENV_TIMEOUT;
3862 
3863 	(void) s_p_get_uint16(&conf->health_check_interval,
3864 			      "HealthCheckInterval", hashtbl);
3865 	if (s_p_get_string(&temp_str, "HealthCheckNodeState", hashtbl)) {
3866 		conf->health_check_node_state = _health_node_state(temp_str);
3867 		xfree(temp_str);
3868 	} else
3869 		conf->health_check_node_state = HEALTH_CHECK_NODE_ANY;
3870 
3871 	(void) s_p_get_string(&conf->health_check_program, "HealthCheckProgram",
3872 			      hashtbl);
3873 
3874 	if (!s_p_get_uint16(&conf->keep_alive_time, "KeepAliveTime", hashtbl))
3875 		conf->keep_alive_time = DEFAULT_KEEP_ALIVE_TIME;
3876 
3877 	if (!s_p_get_uint16(&conf->kill_on_bad_exit, "KillOnBadExit", hashtbl))
3878 		conf->kill_on_bad_exit = DEFAULT_KILL_ON_BAD_EXIT;
3879 
3880 	if (!s_p_get_uint16(&conf->kill_wait, "KillWait", hashtbl))
3881 		conf->kill_wait = DEFAULT_KILL_WAIT;
3882 
3883 	(void) s_p_get_string(&conf->launch_params, "LaunchParameters",
3884 			      hashtbl);
3885 
3886 	if (!s_p_get_string(&conf->launch_type, "LaunchType", hashtbl))
3887 		conf->launch_type = xstrdup(DEFAULT_LAUNCH_TYPE);
3888 
3889 	(void) s_p_get_string(&conf->licenses, "Licenses", hashtbl);
3890 
3891 	if (s_p_get_string(&temp_str, "LogTimeFormat", hashtbl)) {
3892 		/*
3893 		 * If adding to this please update src/api/config_log.c to do
3894 		 * the reverse translation.
3895 		 */
3896 		if (xstrcasestr(temp_str, "iso8601_ms"))
3897 			conf->log_fmt = LOG_FMT_ISO8601_MS;
3898 		else if (xstrcasestr(temp_str, "iso8601"))
3899 			conf->log_fmt = LOG_FMT_ISO8601;
3900 		else if (xstrcasestr(temp_str, "rfc5424_ms"))
3901 			conf->log_fmt = LOG_FMT_RFC5424_MS;
3902 		else if (xstrcasestr(temp_str, "rfc5424"))
3903 			conf->log_fmt = LOG_FMT_RFC5424;
3904 		else if (xstrcasestr(temp_str, "clock"))
3905 			conf->log_fmt = LOG_FMT_CLOCK;
3906 		else if (xstrcasestr(temp_str, "short"))
3907 			conf->log_fmt = LOG_FMT_SHORT;
3908 		else if (xstrcasestr(temp_str, "thread_id"))
3909 			conf->log_fmt = LOG_FMT_THREAD_ID;
3910 		xfree(temp_str);
3911 	} else
3912 		conf->log_fmt = LOG_FMT_ISO8601_MS;
3913 
3914 	(void) s_p_get_string(&conf->mail_domain, "MailDomain", hashtbl);
3915 
3916 	if (!s_p_get_string(&conf->mail_prog, "MailProg", hashtbl)) {
3917 		struct stat stat_buf;
3918 		if ((stat(DEFAULT_MAIL_PROG,     &stat_buf) == 0) ||
3919 		    (stat(DEFAULT_MAIL_PROG_ALT, &stat_buf) != 0))
3920 			conf->mail_prog = xstrdup(DEFAULT_MAIL_PROG);
3921 		else
3922 			conf->mail_prog = xstrdup(DEFAULT_MAIL_PROG_ALT);
3923 	}
3924 
3925 	if (!s_p_get_uint32(&conf->max_array_sz, "MaxArraySize", hashtbl))
3926 		conf->max_array_sz = DEFAULT_MAX_ARRAY_SIZE;
3927 	else if (conf->max_array_sz > 4000001) {
3928 		error("MaxArraySize value (%u) is greater than 4000001",
3929 		      conf->max_array_sz);
3930 	}
3931 
3932 	if (!s_p_get_uint32(&conf->max_dbd_msgs, "MaxDBDMsgs", hashtbl))
3933 		conf->max_dbd_msgs = 0;
3934 	else if (conf->max_dbd_msgs < DEFAULT_MAX_DBD_MSGS) {
3935 		error("MaxDBDMsgs value (%u) needs to be greater than %d",
3936 		      conf->max_dbd_msgs, DEFAULT_MAX_DBD_MSGS);
3937 		return SLURM_ERROR;
3938 	}
3939 
3940 	if (!s_p_get_uint32(&conf->max_job_cnt, "MaxJobCount", hashtbl))
3941 		conf->max_job_cnt = DEFAULT_MAX_JOB_COUNT;
3942 	else if (conf->max_job_cnt < 1) {
3943 		error("MaxJobCount=%u, No jobs permitted", conf->max_job_cnt);
3944 		return SLURM_ERROR;
3945 	}
3946 
3947 	if (!s_p_get_uint32(&conf->max_job_id, "MaxJobId", hashtbl))
3948 		conf->max_job_id = DEFAULT_MAX_JOB_ID;
3949 	if (conf->max_job_id > MAX_JOB_ID) {
3950 		error("MaxJobId can not exceed MAX_JOB_ID, resetting value");
3951 		conf->max_job_id = MAX_JOB_ID;
3952 	}
3953 
3954 	if (conf->first_job_id > conf->max_job_id) {
3955 		error("FirstJobId > MaxJobId");
3956 		return SLURM_ERROR;
3957 	} else {
3958 		uint32_t tmp32 = conf->max_job_id - conf->first_job_id + 1;
3959 		if (conf->max_job_cnt > tmp32) {
3960 			/* Needed for job array support */
3961 			info("Resetting MaxJobCnt from %u to %u "
3962 			     "(MaxJobId - FirstJobId + 1)",
3963 			     conf->max_job_cnt, tmp32);
3964 			conf->max_job_cnt = tmp32;
3965 		}
3966 	}
3967 
3968 	conf->max_mem_per_cpu = 0;
3969 	if (s_p_get_uint64(&conf->max_mem_per_cpu,
3970 			   "MaxMemPerCPU", hashtbl)) {
3971 		conf->max_mem_per_cpu |= MEM_PER_CPU;
3972 	} else if (!s_p_get_uint64(&conf->max_mem_per_cpu,
3973 				 "MaxMemPerNode", hashtbl)) {
3974 		conf->max_mem_per_cpu = DEFAULT_MAX_MEM_PER_CPU;
3975 	}
3976 
3977 	if (!s_p_get_uint32(&conf->max_step_cnt, "MaxStepCount", hashtbl))
3978 		conf->max_step_cnt = DEFAULT_MAX_STEP_COUNT;
3979 	else if (conf->max_step_cnt < 1) {
3980 		error("MaxStepCount=%u, No steps permitted",
3981 		      conf->max_step_cnt);
3982 		return SLURM_ERROR;
3983 	}
3984 
3985 	if (!s_p_get_uint16(&conf->max_tasks_per_node, "MaxTasksPerNode",
3986 			    hashtbl)) {
3987 		conf->max_tasks_per_node = DEFAULT_MAX_TASKS_PER_NODE;
3988 	}
3989 
3990 	(void) s_p_get_string(&conf->mcs_plugin_params, "MCSParameters",
3991 			      hashtbl);
3992 	if (!s_p_get_string(&conf->mcs_plugin, "MCSPlugin", hashtbl)) {
3993 		conf->mcs_plugin = xstrdup(DEFAULT_MCS_PLUGIN);
3994 		if (conf->mcs_plugin_params) {
3995 			/* no plugin mcs and a mcs plugin param */
3996 			error("MCSParameters=%s used and no MCSPlugin",
3997 				conf->mcs_plugin_params);
3998 			return SLURM_ERROR;
3999 		}
4000 	}
4001 	if (conf->mcs_plugin_params &&
4002 	    !xstrcmp(conf->mcs_plugin, "mcs/none")) {
4003 		/* plugin mcs none and a mcs plugin param */
4004 		info("WARNING: MCSParameters=%s can't be used with"
4005 			"MCSPlugin=mcs/none",
4006 			conf->mcs_plugin_params);
4007 	}
4008 	if (!conf->mcs_plugin_params &&
4009 	    !xstrcmp(conf->mcs_plugin, "mcs/group")) {
4010 		/* plugin mcs/group and no mcs plugin param */
4011 		 error("MCSPlugin is mcs/group and no MCSParameters");
4012 		 return SLURM_ERROR;
4013 	}
4014 
4015 	if (!s_p_get_uint16(&conf->msg_timeout, "MessageTimeout", hashtbl))
4016 		conf->msg_timeout = DEFAULT_MSG_TIMEOUT;
4017 	else if (conf->msg_timeout > 100) {
4018 		if (getuid() == 0) {
4019 			info("WARNING: MessageTimeout is too high for "
4020 				"effective fault-tolerance");
4021 		} else {
4022 			debug("WARNING: MessageTimeout is too high for "
4023 				"effective fault-tolerance");
4024 		}
4025 	}
4026 
4027 	if (!s_p_get_uint32(&conf->min_job_age, "MinJobAge", hashtbl))
4028 		conf->min_job_age = DEFAULT_MIN_JOB_AGE;
4029 	else if (conf->min_job_age < 2) {
4030 		if (getuid() == 0)
4031 			info("WARNING: MinJobAge must be at least 2");
4032 		else
4033 			debug("WARNING: MinJobAge must be at least 2");
4034 		conf->min_job_age = 2;
4035 	}
4036 
4037 	if (!s_p_get_string(&conf->mpi_default, "MpiDefault", hashtbl))
4038 		conf->mpi_default = xstrdup(DEFAULT_MPI_DEFAULT);
4039 	else if (!xstrcmp(conf->mpi_default, "openmpi")) {
4040 		xfree(conf->mpi_default);
4041 		conf->mpi_default = xstrdup("none");
4042 		if (running_in_slurmctld())
4043 			error("Translating obsolete 'MpiDefault=openmpi' option to 'MpiDefault=none'. Please update your configuration.");
4044 	}
4045 
4046 	(void) s_p_get_string(&conf->mpi_params, "MpiParams", hashtbl);
4047 #if defined(HAVE_NATIVE_CRAY)
4048 	if (conf->mpi_params == NULL ||
4049 	    strstr(conf->mpi_params, "ports=") == NULL) {
4050 		error("MpiParams=ports= is required on Cray/Aries systems");
4051 		return SLURM_ERROR;
4052 	}
4053 #endif
4054 
4055 	(void) s_p_get_string(&conf->msg_aggr_params, "MsgAggregationParams",
4056 			      hashtbl);
4057 
4058 	if (s_p_get_boolean((bool *)&truth, "TrackWCKey", hashtbl) && truth)
4059 		conf->conf_flags |= CTL_CONF_WCKEY;
4060 
4061 	if (!s_p_get_string(&conf->accounting_storage_type,
4062 			    "AccountingStorageType", hashtbl)) {
4063 		if (default_storage_type)
4064 			conf->accounting_storage_type =
4065 				xstrdup_printf("accounting_storage/%s",
4066 					       default_storage_type);
4067 		else
4068 			conf->accounting_storage_type =
4069 				xstrdup(DEFAULT_ACCOUNTING_STORAGE_TYPE);
4070 	} else {
4071 		if (xstrcasestr(conf->accounting_storage_type, "mysql"))
4072 			fatal("AccountingStorageType=accounting_storage/mysql "
4073 			      "only permitted in SlurmDBD.");
4074 	}
4075 
4076 	(void) s_p_get_string(&conf->node_features_plugins,
4077 			     "NodeFeaturesPlugins", hashtbl);
4078 
4079 	if (xstrstr(conf->node_features_plugins, "knl_") &&
4080 	    !_have_hbm_token(conf->gres_plugins)) {
4081 		/* KNL nodes implicitly add GRES type of "hbm" */
4082 		if (conf->gres_plugins && conf->gres_plugins[0])
4083 			xstrcat(conf->gres_plugins, ",hbm");
4084 		else
4085 			xstrcat(conf->gres_plugins, "hbm");
4086 	}
4087 
4088 	if (!s_p_get_string(&conf->accounting_storage_tres,
4089 			    "AccountingStorageTRES", hashtbl))
4090 		conf->accounting_storage_tres =
4091 			xstrdup(DEFAULT_ACCOUNTING_TRES);
4092 	else
4093 		xstrfmtcat(conf->accounting_storage_tres,
4094 			   ",%s", DEFAULT_ACCOUNTING_TRES);
4095 
4096 	if (s_p_get_string(&temp_str, "AccountingStorageEnforce", hashtbl)) {
4097 		if (xstrcasestr(temp_str, "1")
4098 		    || xstrcasestr(temp_str, "associations"))
4099 			conf->accounting_storage_enforce
4100 				|= ACCOUNTING_ENFORCE_ASSOCS;
4101 
4102 		if (xstrcasestr(temp_str, "2")
4103 		    || xstrcasestr(temp_str, "limits")) {
4104 			conf->accounting_storage_enforce
4105 				|= ACCOUNTING_ENFORCE_ASSOCS;
4106 			conf->accounting_storage_enforce
4107 				|= ACCOUNTING_ENFORCE_LIMITS;
4108 		}
4109 
4110 		if (xstrcasestr(temp_str, "safe")) {
4111 			conf->accounting_storage_enforce
4112 				|= ACCOUNTING_ENFORCE_ASSOCS;
4113 			conf->accounting_storage_enforce
4114 				|= ACCOUNTING_ENFORCE_LIMITS;
4115 			conf->accounting_storage_enforce
4116 				|= ACCOUNTING_ENFORCE_SAFE;
4117 		}
4118 
4119 		if (xstrcasestr(temp_str, "wckeys")) {
4120 			conf->accounting_storage_enforce
4121 				|= ACCOUNTING_ENFORCE_ASSOCS;
4122 			conf->accounting_storage_enforce
4123 				|= ACCOUNTING_ENFORCE_WCKEYS;
4124 			conf->conf_flags |= CTL_CONF_WCKEY;
4125 		}
4126 
4127 		if (xstrcasestr(temp_str, "qos")) {
4128 			conf->accounting_storage_enforce
4129 				|= ACCOUNTING_ENFORCE_ASSOCS;
4130 			conf->accounting_storage_enforce
4131 				|= ACCOUNTING_ENFORCE_QOS;
4132 		}
4133 
4134 		if (xstrcasestr(temp_str, "all")) {
4135 			conf->accounting_storage_enforce = 0xffff;
4136 			conf->conf_flags |= CTL_CONF_WCKEY;
4137 			/* If all is used, nojobs and nosteps aren't
4138 			   part of it.  They must be requested as well.
4139 			*/
4140 			conf->accounting_storage_enforce
4141 				&= (~ACCOUNTING_ENFORCE_NO_JOBS);
4142 			conf->accounting_storage_enforce
4143 				&= (~ACCOUNTING_ENFORCE_NO_STEPS);
4144 		}
4145 
4146 		/* Everything that "all" doesn't mean should be put here */
4147 		if (xstrcasestr(temp_str, "nojobs")) {
4148 			conf->accounting_storage_enforce
4149 				|= ACCOUNTING_ENFORCE_NO_JOBS;
4150 			conf->accounting_storage_enforce
4151 				|= ACCOUNTING_ENFORCE_NO_STEPS;
4152 		}
4153 
4154 		if (xstrcasestr(temp_str, "nosteps")) {
4155 			conf->accounting_storage_enforce
4156 				|= ACCOUNTING_ENFORCE_NO_STEPS;
4157 		}
4158 
4159 		xfree(temp_str);
4160 	} else
4161 		conf->accounting_storage_enforce = 0;
4162 
4163 	/* if no backup we don't care */
4164 	(void) s_p_get_string(&conf->accounting_storage_backup_host,
4165 			      "AccountingStorageBackupHost", hashtbl);
4166 
4167 	s_p_get_string(&conf->accounting_storage_ext_host,
4168 			    "AccountingStorageExternalHost", hashtbl);
4169 
4170 	if (!s_p_get_string(&conf->accounting_storage_host,
4171 			    "AccountingStorageHost", hashtbl)) {
4172 		if (default_storage_host)
4173 			conf->accounting_storage_host =
4174 				xstrdup(default_storage_host);
4175 		else
4176 			conf->accounting_storage_host =
4177 				xstrdup(DEFAULT_STORAGE_HOST);
4178 	}
4179 
4180 	if (!s_p_get_string(&conf->accounting_storage_loc,
4181 			    "AccountingStorageLoc", hashtbl)) {
4182 		if (default_storage_loc)
4183 			conf->accounting_storage_loc =
4184 				xstrdup(default_storage_loc);
4185 		else if (!xstrcmp(conf->accounting_storage_type,
4186 				 "accounting_storage/mysql"))
4187 			conf->accounting_storage_loc =
4188 				xstrdup(DEFAULT_ACCOUNTING_DB);
4189 		else
4190 			conf->accounting_storage_loc =
4191 				xstrdup(DEFAULT_STORAGE_LOC);
4192 	}
4193 	if (!s_p_get_string(&conf->accounting_storage_user,
4194 			    "AccountingStorageUser", hashtbl)) {
4195 		if (default_storage_user)
4196 			conf->accounting_storage_user =
4197 				xstrdup(default_storage_user);
4198 		else
4199 			conf->accounting_storage_user =
4200 				xstrdup(DEFAULT_STORAGE_USER);
4201 	}
4202 	if (!s_p_get_string(&conf->accounting_storage_pass,
4203 			    "AccountingStoragePass", hashtbl)) {
4204 		if (default_storage_pass)
4205 			conf->accounting_storage_pass =
4206 				xstrdup(default_storage_pass);
4207 	}
4208 	if (!s_p_get_boolean(&truth, "AccountingStoreJobComment", hashtbl)
4209 	    || truth)
4210 		conf->conf_flags |= CTL_CONF_SJC;
4211 
4212 	if (!s_p_get_uint32(&conf->accounting_storage_port,
4213 			    "AccountingStoragePort", hashtbl)) {
4214 		if (default_storage_port)
4215 			conf->accounting_storage_port = default_storage_port;
4216 		else if (!xstrcmp(conf->accounting_storage_type,
4217 				"accounting_storage/slurmdbd"))
4218 			conf->accounting_storage_port = SLURMDBD_PORT;
4219 		else if (!xstrcmp(conf->accounting_storage_type,
4220 			  "accounting_storage/mysql"))
4221 			conf->accounting_storage_port = DEFAULT_MYSQL_PORT;
4222 		else
4223 			conf->accounting_storage_port = DEFAULT_STORAGE_PORT;
4224 	}
4225 
4226 	/* remove the user and loc if using slurmdbd */
4227 	if (!xstrcmp(conf->accounting_storage_type,
4228 		   "accounting_storage/slurmdbd")) {
4229 		xfree(conf->accounting_storage_loc);
4230 		conf->accounting_storage_loc = xstrdup("N/A");
4231 		xfree(conf->accounting_storage_user);
4232 		conf->accounting_storage_user = xstrdup("N/A");
4233 	}
4234 
4235 	(void) s_p_get_uint16(&conf->over_time_limit, "OverTimeLimit", hashtbl);
4236 
4237 	if (!s_p_get_string(&conf->plugindir, "PluginDir", hashtbl))
4238 		conf->plugindir = xstrdup(default_plugin_path);
4239 	if (!_is_valid_path(conf->plugindir, "PluginDir")) {
4240 		error("Bad value \"%s\" for PluginDir", conf->plugindir);
4241 		return SLURM_ERROR;
4242 	}
4243 
4244 	s_p_get_string(&conf->plugstack, "PlugStackConfig", hashtbl);
4245 
4246 	(void) s_p_get_string(&conf->power_parameters, "PowerParameters",
4247 			      hashtbl);
4248 	if (!s_p_get_string(&conf->power_plugin, "PowerPlugin", hashtbl))
4249 		conf->power_plugin = xstrdup(DEFAULT_POWER_PLUGIN);
4250 
4251 	if (s_p_get_string(&temp_str, "PreemptExemptTime", hashtbl)) {
4252 		uint32_t exempt_time = time_str2secs(temp_str);
4253 		if (exempt_time == NO_VAL) {
4254 			error("PreemptExemptTime=%s invalid", temp_str);
4255 			xfree(temp_str);
4256 			return SLURM_ERROR;
4257 		}
4258 		conf->preempt_exempt_time = exempt_time;
4259 		xfree(temp_str);
4260 	}
4261 
4262 	if (s_p_get_string(&temp_str, "PreemptMode", hashtbl)) {
4263 		conf->preempt_mode = preempt_mode_num(temp_str);
4264 		if (conf->preempt_mode == NO_VAL16) {
4265 			error("PreemptMode=%s invalid", temp_str);
4266 			return SLURM_ERROR;
4267 		}
4268 		if (conf->preempt_mode == PREEMPT_MODE_SUSPEND) {
4269 			error("PreemptMode=SUSPEND requires GANG too");
4270 			return SLURM_ERROR;
4271 		}
4272 		xfree(temp_str);
4273 	} else {
4274 		conf->preempt_mode = PREEMPT_MODE_OFF;
4275 	}
4276 	if (!s_p_get_string(&conf->preempt_type, "PreemptType", hashtbl))
4277 		conf->preempt_type = xstrdup(DEFAULT_PREEMPT_TYPE);
4278 	if (xstrcmp(conf->preempt_type, "preempt/qos") == 0) {
4279 		int preempt_mode = conf->preempt_mode & (~PREEMPT_MODE_GANG);
4280 		if (preempt_mode == PREEMPT_MODE_OFF) {
4281 			error("PreemptType and PreemptMode values "
4282 			      "incompatible");
4283 			return SLURM_ERROR;
4284 		}
4285 	} else if (xstrcmp(conf->preempt_type, "preempt/partition_prio") == 0) {
4286 		int preempt_mode = conf->preempt_mode & (~PREEMPT_MODE_GANG);
4287 		if (preempt_mode == PREEMPT_MODE_OFF) {
4288 			error("PreemptType and PreemptMode values "
4289 			      "incompatible");
4290 			return SLURM_ERROR;
4291 		}
4292 	} else if (xstrcmp(conf->preempt_type, "preempt/none") == 0) {
4293 		int preempt_mode = conf->preempt_mode & (~PREEMPT_MODE_GANG);
4294 		if (preempt_mode != PREEMPT_MODE_OFF) {
4295 			error("PreemptType and PreemptMode values "
4296 			      "incompatible");
4297 			return SLURM_ERROR;
4298 		}
4299 	}
4300 
4301 	(void) s_p_get_string(&conf->prep_params, "PrEpParameters", hashtbl);
4302 	if (!s_p_get_string(&conf->prep_plugins, "PrEpPlugins", hashtbl))
4303 		conf->prep_plugins = xstrdup(DEFAULT_PREP_PLUGINS);
4304 
4305 	if (s_p_get_string(&temp_str, "PriorityDecayHalfLife", hashtbl)) {
4306 		int max_time = time_str2mins(temp_str);
4307 		if ((max_time < 0) && (max_time != INFINITE)) {
4308 			error("Bad value \"%s\" for PriorityDecayHalfLife",
4309 			      temp_str);
4310 			return SLURM_ERROR;
4311 		}
4312 		conf->priority_decay_hl = max_time * 60;
4313 		xfree(temp_str);
4314 	} else
4315 		conf->priority_decay_hl = DEFAULT_PRIORITY_DECAY;
4316 
4317 	if (s_p_get_string(&temp_str, "PriorityCalcPeriod", hashtbl)) {
4318 		int calc_period = time_str2mins(temp_str);
4319 		if (calc_period < 1) {
4320 			error("Bad value \"%s\" for PriorityCalcPeriod",
4321 			      temp_str);
4322 			return SLURM_ERROR;
4323 		}
4324 		conf->priority_calc_period = calc_period * 60;
4325 		xfree(temp_str);
4326 	} else
4327 		conf->priority_calc_period = DEFAULT_PRIORITY_CALC_PERIOD;
4328 
4329 	if (s_p_get_boolean(&truth, "PriorityFavorSmall", hashtbl) && truth)
4330 		conf->priority_favor_small = 1;
4331 	else
4332 		conf->priority_favor_small = 0;
4333 
4334 	conf->priority_flags = PRIORITY_FLAGS_FAIR_TREE;
4335 	if (s_p_get_string(&temp_str, "PriorityFlags", hashtbl)) {
4336 		if (xstrcasestr(temp_str, "ACCRUE_ALWAYS"))
4337 			conf->priority_flags |= PRIORITY_FLAGS_ACCRUE_ALWAYS;
4338 		if (xstrcasestr(temp_str, "SMALL_RELATIVE_TO_TIME"))
4339 			conf->priority_flags |= PRIORITY_FLAGS_SIZE_RELATIVE;
4340 		if (xstrcasestr(temp_str, "CALCULATE_RUNNING"))
4341 			conf->priority_flags |= PRIORITY_FLAGS_CALCULATE_RUNNING;
4342 
4343 		if (xstrcasestr(temp_str, "DEPTH_OBLIVIOUS")) {
4344 			conf->priority_flags |= PRIORITY_FLAGS_DEPTH_OBLIVIOUS;
4345 			conf->priority_flags &= ~PRIORITY_FLAGS_FAIR_TREE;
4346 		} else if (xstrcasestr(temp_str, "NO_FAIR_TREE"))
4347 			conf->priority_flags &= ~PRIORITY_FLAGS_FAIR_TREE;
4348 
4349 		if (xstrcasestr(temp_str, "INCR_ONLY"))
4350 			conf->priority_flags |= PRIORITY_FLAGS_INCR_ONLY;
4351 
4352 		if (xstrcasestr(temp_str, "MAX_TRES"))
4353 			conf->priority_flags |= PRIORITY_FLAGS_MAX_TRES;
4354 
4355 		if (xstrcasestr(temp_str, "NO_NORMAL_ALL"))
4356 			conf->priority_flags |=
4357 				PRIORITY_FLAGS_NO_NORMAL_ASSOC |
4358 				PRIORITY_FLAGS_NO_NORMAL_PART  |
4359 				PRIORITY_FLAGS_NO_NORMAL_QOS   |
4360 				PRIORITY_FLAGS_NO_NORMAL_TRES;
4361 		if (xstrcasestr(temp_str, "NO_NORMAL_ASSOC"))
4362 			conf->priority_flags |= PRIORITY_FLAGS_NO_NORMAL_ASSOC;
4363 		if (xstrcasestr(temp_str, "NO_NORMAL_PART"))
4364 			conf->priority_flags |= PRIORITY_FLAGS_NO_NORMAL_PART;
4365 		if (xstrcasestr(temp_str, "NO_NORMAL_QOS"))
4366 			conf->priority_flags |= PRIORITY_FLAGS_NO_NORMAL_QOS;
4367 		if (xstrcasestr(temp_str, "NO_NORMAL_TRES"))
4368 			conf->priority_flags |= PRIORITY_FLAGS_NO_NORMAL_TRES;
4369 
4370 		xfree(temp_str);
4371 	}
4372 
4373 	if (s_p_get_string(&temp_str, "PriorityMaxAge", hashtbl)) {
4374 		int max_time = time_str2mins(temp_str);
4375 		if ((max_time < 0) && (max_time != INFINITE)) {
4376 			error("Bad value \"%s\" for PriorityMaxAge",
4377 			      temp_str);
4378 			return SLURM_ERROR;
4379 		}
4380 		conf->priority_max_age = max_time * 60;
4381 		xfree(temp_str);
4382 	} else
4383 		conf->priority_max_age = DEFAULT_PRIORITY_DECAY;
4384 
4385 	(void) s_p_get_string(&conf->priority_params, "PriorityParameters",
4386 			      hashtbl);
4387 
4388 
4389 	if (s_p_get_string(&temp_str, "PriorityUsageResetPeriod", hashtbl)) {
4390 		if (xstrcasecmp(temp_str, "none") == 0)
4391 			conf->priority_reset_period = PRIORITY_RESET_NONE;
4392 		else if (xstrcasecmp(temp_str, "now") == 0)
4393 			conf->priority_reset_period = PRIORITY_RESET_NOW;
4394 		else if (xstrcasecmp(temp_str, "daily") == 0)
4395 			conf->priority_reset_period = PRIORITY_RESET_DAILY;
4396 		else if (xstrcasecmp(temp_str, "weekly") == 0)
4397 			conf->priority_reset_period = PRIORITY_RESET_WEEKLY;
4398 		else if (xstrcasecmp(temp_str, "monthly") == 0)
4399 			conf->priority_reset_period = PRIORITY_RESET_MONTHLY;
4400 		else if (xstrcasecmp(temp_str, "quarterly") == 0)
4401 			conf->priority_reset_period = PRIORITY_RESET_QUARTERLY;
4402 		else if (xstrcasecmp(temp_str, "yearly") == 0)
4403 			conf->priority_reset_period = PRIORITY_RESET_YEARLY;
4404 		else {
4405 			error("Bad value \"%s\" for PriorityUsageResetPeriod",
4406 			      temp_str);
4407 			return SLURM_ERROR;
4408 		}
4409 		xfree(temp_str);
4410 	} else {
4411 		conf->priority_reset_period = PRIORITY_RESET_NONE;
4412 		if (!conf->priority_decay_hl) {
4413 			error("You have to either have "
4414 			      "PriorityDecayHalfLife != 0 or "
4415 			      "PriorityUsageResetPeriod set to something "
4416 			      "or the priority plugin will result in "
4417 			      "rolling over.");
4418 			return SLURM_ERROR;
4419 		}
4420 	}
4421 
4422 	(void) s_p_get_string(&conf->site_factor_params,
4423 			      "PrioritySiteFactorParameters", hashtbl);
4424 
4425 	if (!s_p_get_string(&conf->site_factor_plugin,
4426 			    "PrioritySiteFactorPlugin", hashtbl))
4427 		conf->site_factor_plugin = xstrdup(DEFAULT_SITE_FACTOR_PLUGIN);
4428 
4429 	if (!s_p_get_string(&conf->priority_type, "PriorityType", hashtbl))
4430 		conf->priority_type = xstrdup(DEFAULT_PRIORITY_TYPE);
4431 
4432 	if (!s_p_get_uint32(&conf->priority_weight_age,
4433 			    "PriorityWeightAge", hashtbl))
4434 		conf->priority_weight_age = 0;
4435 	if (!s_p_get_uint32(&conf->priority_weight_assoc,
4436 			    "PriorityWeightAssoc", hashtbl))
4437 		conf->priority_weight_assoc = 0;
4438 	if (!s_p_get_uint32(&conf->priority_weight_fs,
4439 			    "PriorityWeightFairshare", hashtbl))
4440 		conf->priority_weight_fs = 0;
4441 	if (!s_p_get_uint32(&conf->priority_weight_js,
4442 			    "PriorityWeightJobSize", hashtbl))
4443 		conf->priority_weight_js = 0;
4444 	if (!s_p_get_uint32(&conf->priority_weight_part,
4445 			    "PriorityWeightPartition", hashtbl))
4446 		conf->priority_weight_part = 0;
4447 	if (!s_p_get_uint32(&conf->priority_weight_qos,
4448 			    "PriorityWeightQOS", hashtbl))
4449 		conf->priority_weight_qos = 0;
4450 	if (!s_p_get_string(&conf->priority_weight_tres, "PriorityWeightTRES",
4451 			    hashtbl))
4452 		conf->priority_weight_tres = NULL;
4453 
4454 	/* Check for possible overflow of priority.
4455 	 * We also check when doing the computation for each job. */
4456 	tot_prio_weight = (uint64_t) conf->priority_weight_age   +
4457 		(uint64_t) conf->priority_weight_assoc +
4458 		(uint64_t) conf->priority_weight_fs   +
4459 		(uint64_t) conf->priority_weight_js   +
4460 		(uint64_t) conf->priority_weight_part +
4461 		(uint64_t) conf->priority_weight_qos;
4462 	/* TODO include TRES weights */
4463 	if (tot_prio_weight > 0xffffffff)
4464 		error("PriorityWeight values too high, job priority value may overflow");
4465 
4466 	/* Out of order due to use with ProctrackType */
4467 	if (!s_p_get_string(&conf->switch_type, "SwitchType", hashtbl))
4468 		conf->switch_type = xstrdup(DEFAULT_SWITCH_TYPE);
4469 
4470 	if (!s_p_get_string(&conf->proctrack_type, "ProctrackType", hashtbl)) {
4471 		conf->proctrack_type = xstrdup(DEFAULT_PROCTRACK_TYPE);
4472 	}
4473 #ifdef HAVE_NATIVE_CRAY
4474 	if (xstrcmp(conf->proctrack_type, "proctrack/cray_aries")) {
4475 		error("On a Cray/Aries ProctrackType=proctrack/cray_aries "
4476 		      "is required");
4477 		return SLURM_ERROR;
4478 	}
4479 #endif
4480 
4481 	conf->private_data = 0; /* Set to default before parsing PrivateData */
4482 	if (s_p_get_string(&temp_str, "PrivateData", hashtbl)) {
4483 		if (xstrcasestr(temp_str, "account"))
4484 			conf->private_data |= PRIVATE_DATA_ACCOUNTS;
4485 		if (xstrcasestr(temp_str, "cloud"))
4486 			conf->private_data |= PRIVATE_CLOUD_NODES;
4487 		if (xstrcasestr(temp_str, "event"))
4488 			conf->private_data |= PRIVATE_DATA_EVENTS;
4489 		if (xstrcasestr(temp_str, "job"))
4490 			conf->private_data |= PRIVATE_DATA_JOBS;
4491 		if (xstrcasestr(temp_str, "node"))
4492 			conf->private_data |= PRIVATE_DATA_NODES;
4493 		if (xstrcasestr(temp_str, "partition"))
4494 			conf->private_data |= PRIVATE_DATA_PARTITIONS;
4495 		if (xstrcasestr(temp_str, "reservation"))
4496 			conf->private_data |= PRIVATE_DATA_RESERVATIONS;
4497 		if (xstrcasestr(temp_str, "usage"))
4498 			conf->private_data |= PRIVATE_DATA_USAGE;
4499 		if (xstrcasestr(temp_str, "user"))
4500 			conf->private_data |= PRIVATE_DATA_USERS;
4501 		if (xstrcasestr(temp_str, "all"))
4502 			conf->private_data = 0xffff;
4503 		xfree(temp_str);
4504 	}
4505 
4506 	(void) s_p_get_string(&conf->prolog, "Prolog", hashtbl);
4507 	(void) s_p_get_string(&conf->prolog_slurmctld, "PrologSlurmctld",
4508 			      hashtbl);
4509 
4510 	if (s_p_get_string(&temp_str, "PrologFlags", hashtbl)) {
4511 		conf->prolog_flags = prolog_str2flags(temp_str);
4512 		if (conf->prolog_flags == NO_VAL16) {
4513 			fatal("PrologFlags invalid: %s", temp_str);
4514 		}
4515 
4516 		if ((conf->prolog_flags & PROLOG_FLAG_NOHOLD) &&
4517 		    (conf->prolog_flags & PROLOG_FLAG_CONTAIN)) {
4518 			fatal("PrologFlags invalid combination: NoHold cannot be combined with Contain and/or X11");
4519 		}
4520 		if ((conf->prolog_flags & PROLOG_FLAG_CONTAIN)) {
4521 			/* X11 is incompatible with proctrack/linuxproc */
4522 			if (conf->prolog_flags & PROLOG_FLAG_X11 &&
4523 			    !xstrcmp(conf->proctrack_type,
4524 				     "proctrack/linuxproc"))
4525 				fatal("Invalid combination: PrologFlags=X11 cannot be combined with proctrack/linuxproc");
4526 			/*
4527 			 * proctrack/cray_aries or proctrack/cgroup are
4528 			 * required for pam_slurm_adopt, but don't fatal if
4529 			 * using a different proctrack plugin.
4530 			 */
4531 			if (running_in_slurmctld() &&
4532 			    xstrcmp(conf->proctrack_type, "proctrack/cgroup") &&
4533 			    xstrcmp(conf->proctrack_type,
4534 				    "proctrack/cray_aries"))
4535 				error("If using PrologFlags=Contain for pam_slurm_adopt, either proctrack/cgroup or proctrack/cray_aries is required.  If not using pam_slurm_adopt, please ignore error.");
4536 		}
4537 		if (conf->prolog_flags & PROLOG_FLAG_NOHOLD) {
4538 			conf->prolog_flags |= PROLOG_FLAG_ALLOC;
4539 		}
4540 #ifdef HAVE_FRONT_END
4541 		if (conf->prolog_flags & PROLOG_FLAG_ALLOC) {
4542 			/* Batch job launches will fail without enhancements */
4543 			fatal("PrologFlags=alloc not supported on FrontEnd configurations");
4544 		}
4545 #endif
4546 		xfree(temp_str);
4547 	} else { /* Default: no Prolog Flags are set */
4548 		conf->prolog_flags = 0;
4549 	}
4550 
4551 	if (!s_p_get_uint16(&conf->propagate_prio_process,
4552 			"PropagatePrioProcess", hashtbl)) {
4553 		conf->propagate_prio_process = PROP_PRIO_OFF;
4554 	} else if (conf->propagate_prio_process > PROP_PRIO_NICER) {
4555 		error("Bad PropagatePrioProcess: %u",
4556 			conf->propagate_prio_process);
4557 		return SLURM_ERROR;
4558 	}
4559 
4560 	if (s_p_get_string(&conf->propagate_rlimits_except,
4561 			   "PropagateResourceLimitsExcept", hashtbl)) {
4562 		if ((parse_rlimits(conf->propagate_rlimits_except,
4563 				   NO_PROPAGATE_RLIMITS)) < 0) {
4564 			error("Bad PropagateResourceLimitsExcept: %s",
4565 			      conf->propagate_rlimits_except);
4566 			return SLURM_ERROR;
4567 		}
4568 	} else {
4569 		if (!s_p_get_string(&conf->propagate_rlimits,
4570 				    "PropagateResourceLimits", hashtbl))
4571 			conf->propagate_rlimits = xstrdup( "ALL" );
4572 		if ((parse_rlimits(conf->propagate_rlimits,
4573 				   PROPAGATE_RLIMITS )) < 0) {
4574 			error("Bad PropagateResourceLimits: %s",
4575 			      conf->propagate_rlimits);
4576 			return SLURM_ERROR;
4577 		}
4578 	}
4579 
4580 	if (s_p_get_string(&temp_str, "ReconfigFlags", hashtbl)) {
4581 		conf->reconfig_flags = reconfig_str2flags(temp_str);
4582 		if (conf->reconfig_flags == 0xffff) {
4583 			error("ReconfigFlags invalid: %s", temp_str);
4584 			return SLURM_ERROR;
4585 		}
4586 		xfree(temp_str);
4587 	} else  /* Default: no ReconfigFlags */
4588 		conf->reconfig_flags = 0;
4589 
4590 	if (!s_p_get_uint16(&conf->ret2service, "ReturnToService", hashtbl))
4591 		conf->ret2service = DEFAULT_RETURN_TO_SERVICE;
4592 
4593 	(void) s_p_get_string(&conf->resv_epilog, "ResvEpilog", hashtbl);
4594 	(void) s_p_get_uint16(&conf->resv_over_run, "ResvOverRun", hashtbl);
4595 	(void)s_p_get_string(&conf->resv_prolog, "ResvProlog", hashtbl);
4596 
4597 	(void)s_p_get_string(&conf->resume_fail_program, "ResumeFailProgram",
4598 			     hashtbl);
4599 	(void)s_p_get_string(&conf->resume_program, "ResumeProgram", hashtbl);
4600 	if (!s_p_get_uint16(&conf->resume_rate, "ResumeRate", hashtbl))
4601 		conf->resume_rate = DEFAULT_RESUME_RATE;
4602 	if (!s_p_get_uint16(&conf->resume_timeout, "ResumeTimeout", hashtbl))
4603 		conf->resume_timeout = DEFAULT_RESUME_TIMEOUT;
4604 
4605 	(void) s_p_get_string(&conf->reboot_program, "RebootProgram", hashtbl);
4606 
4607 	if (!s_p_get_string(&conf->route_plugin, "RoutePlugin", hashtbl))
4608 		conf->route_plugin = xstrdup(DEFAULT_ROUTE_PLUGIN);
4609 
4610 	(void) s_p_get_string(&conf->salloc_default_command,
4611 			      "SallocDefaultCommand", hashtbl);
4612 	(void) s_p_get_string(&conf->sbcast_parameters,
4613 			      "SbcastParameters", hashtbl);
4614 
4615 	(void) s_p_get_string(&conf->sched_params, "SchedulerParameters",
4616 			      hashtbl);
4617 
4618 	if (s_p_get_uint16(&uint16_tmp, "SchedulerPort", hashtbl)) {
4619 		debug("Ignoring obsolete SchedulerPort option.");
4620 	}
4621 
4622 	if (s_p_get_uint16(&uint16_tmp, "SchedulerRootFilter", hashtbl)) {
4623 		debug("Ignoring obsolete SchedulerRootFilter option.");
4624 	}
4625 
4626 	if (!s_p_get_uint16(&conf->sched_time_slice, "SchedulerTimeSlice",
4627 	    hashtbl))
4628 		conf->sched_time_slice = DEFAULT_SCHED_TIME_SLICE;
4629 	else if (conf->sched_time_slice < 5) {
4630 		error("SchedulerTimeSlice must be at least 5 seconds");
4631 		conf->sched_time_slice = DEFAULT_SCHED_TIME_SLICE;
4632 	}
4633 
4634 	if (!s_p_get_string(&conf->schedtype, "SchedulerType", hashtbl))
4635 		conf->schedtype = xstrdup(DEFAULT_SCHEDTYPE);
4636 
4637 	if (!s_p_get_string(&conf->select_type, "SelectType", hashtbl))
4638 		conf->select_type = xstrdup(DEFAULT_SELECT_TYPE);
4639 
4640 	if (s_p_get_string(&temp_str,
4641 			   "SelectTypeParameters", hashtbl)) {
4642 		uint16_t type_param;
4643 		if ((parse_select_type_param(temp_str, &type_param) < 0)) {
4644 			error("Bad SelectTypeParameter: %s", temp_str);
4645 			xfree(temp_str);
4646 			return SLURM_ERROR;
4647 		}
4648 		conf->select_type_param = type_param;
4649 		xfree(temp_str);
4650 	} else
4651 		conf->select_type_param = 0;
4652 
4653 	if (!s_p_get_string( &conf->slurm_user_name, "SlurmUser", hashtbl)) {
4654 		conf->slurm_user_name = xstrdup("root");
4655 		conf->slurm_user_id   = 0;
4656 	} else {
4657 		uid_t my_uid;
4658 		if (uid_from_string (conf->slurm_user_name, &my_uid) < 0) {
4659 			error ("Invalid user for SlurmUser %s, ignored",
4660 			       conf->slurm_user_name);
4661 			xfree(conf->slurm_user_name);
4662 			return SLURM_ERROR;
4663 		} else {
4664 			conf->slurm_user_id = my_uid;
4665 		}
4666 	}
4667 #ifdef HAVE_NATIVE_CRAY
4668 	/*
4669 	 * When running on Native Cray the SlurmUser must be root
4670 	 * to access the needed libraries.
4671 	 */
4672 	if (conf->slurm_user_id != 0) {
4673 		error("Cray/Aries requires SlurmUser=root (default), but have '%s'.",
4674 			conf->slurm_user_name);
4675 		return SLURM_ERROR;
4676 	}
4677 #endif
4678 
4679 	if (!s_p_get_string( &conf->slurmd_user_name, "SlurmdUser", hashtbl)) {
4680 		conf->slurmd_user_name = xstrdup("root");
4681 		conf->slurmd_user_id   = 0;
4682 	} else {
4683 		uid_t my_uid;
4684 		if (uid_from_string (conf->slurmd_user_name, &my_uid) < 0) {
4685 			error("Invalid user for SlurmdUser %s, ignored",
4686 			       conf->slurmd_user_name);
4687 			xfree(conf->slurmd_user_name);
4688 			return SLURM_ERROR;
4689 		} else {
4690 			conf->slurmd_user_id = my_uid;
4691 		}
4692 	}
4693 
4694 	(void) s_p_get_string(&conf->slurmctld_addr, "SlurmctldAddr",
4695 			      hashtbl);
4696 
4697 	if (s_p_get_string(&temp_str, "SlurmctldDebug", hashtbl)) {
4698 		conf->slurmctld_debug = log_string2num(temp_str);
4699 		if (conf->slurmctld_debug == NO_VAL16) {
4700 			error("Invalid SlurmctldDebug %s", temp_str);
4701 			return SLURM_ERROR;
4702 		}
4703 		xfree(temp_str);
4704 		_normalize_debug_level(&conf->slurmctld_debug);
4705 	} else
4706 		conf->slurmctld_debug = LOG_LEVEL_INFO;
4707 
4708 	if (!s_p_get_string(&conf->slurmctld_pidfile,
4709 			    "SlurmctldPidFile", hashtbl))
4710 		conf->slurmctld_pidfile = xstrdup(DEFAULT_SLURMCTLD_PIDFILE);
4711 
4712 	(void) s_p_get_string(&conf->slurmctld_plugstack, "SlurmctldPlugstack",
4713 			      hashtbl);
4714 
4715 	(void )s_p_get_string(&conf->slurmctld_logfile, "SlurmctldLogFile",
4716 			      hashtbl);
4717 
4718 	if (s_p_get_string(&temp_str, "SlurmctldSyslogDebug", hashtbl)) {
4719 		conf->slurmctld_syslog_debug = log_string2num(temp_str);
4720 		if (conf->slurmctld_syslog_debug == NO_VAL16) {
4721 			error("Invalid SlurmctldSyslogDebug %s", temp_str);
4722 			return SLURM_ERROR;
4723 		}
4724 		xfree(temp_str);
4725 		_normalize_debug_level(&conf->slurmctld_syslog_debug);
4726 	} else
4727 		conf->slurmctld_syslog_debug = LOG_LEVEL_END;
4728 
4729 	if (s_p_get_string(&temp_str, "SlurmctldPort", hashtbl)) {
4730 		char *end_ptr = NULL;
4731 		long port_long;
4732 		slurm_seterrno(0);
4733 		port_long = strtol(temp_str, &end_ptr, 10);
4734 		if ((port_long == LONG_MIN) || (port_long == LONG_MAX) ||
4735 		    (port_long <= 0) || errno) {
4736 			error("Invalid SlurmctldPort %s", temp_str);
4737 			return SLURM_ERROR;
4738 		}
4739 		conf->slurmctld_port = port_long;
4740 		if (end_ptr[0] == '-') {
4741 			port_long = strtol(end_ptr+1, NULL, 10);
4742 			if ((port_long == LONG_MIN) ||
4743 			    (port_long == LONG_MAX) ||
4744 			    (port_long <= conf->slurmctld_port) || errno) {
4745 				error("Invalid SlurmctldPort %s", temp_str);
4746 				return SLURM_ERROR;
4747 			}
4748 			conf->slurmctld_port_count = port_long + 1 -
4749 						     conf->slurmctld_port;
4750 		} else if (end_ptr[0] != '\0') {
4751 			error("Invalid SlurmctldPort %s", temp_str);
4752 			return SLURM_ERROR;
4753 		} else {
4754 			conf->slurmctld_port_count = 1;
4755 		}
4756 		xfree(temp_str);
4757 	} else {
4758 		conf->slurmctld_port = SLURMCTLD_PORT;
4759 		conf->slurmctld_port_count = SLURMCTLD_PORT_COUNT;
4760 	}
4761 
4762 	(void) s_p_get_string(&conf->slurmctld_primary_off_prog,
4763 			      "SlurmctldPrimaryOffProg", hashtbl);
4764 	(void) s_p_get_string(&conf->slurmctld_primary_on_prog,
4765 			      "SlurmctldPrimaryOnProg", hashtbl);
4766 
4767 	if (!s_p_get_uint16(&conf->slurmctld_timeout,
4768 			    "SlurmctldTimeout", hashtbl))
4769 		conf->slurmctld_timeout = DEFAULT_SLURMCTLD_TIMEOUT;
4770 
4771 	(void) s_p_get_string(&conf->slurmctld_params,
4772 			      "SlurmctldParameters", hashtbl);
4773 
4774 	if (s_p_get_string(&temp_str, "SlurmdDebug", hashtbl)) {
4775 		conf->slurmd_debug = log_string2num(temp_str);
4776 		if (conf->slurmd_debug == NO_VAL16) {
4777 			error("Invalid SlurmdDebug %s", temp_str);
4778 			return SLURM_ERROR;
4779 		}
4780 		xfree(temp_str);
4781 		_normalize_debug_level(&conf->slurmd_debug);
4782 	} else
4783 		conf->slurmd_debug = LOG_LEVEL_INFO;
4784 
4785 	(void) s_p_get_string(&conf->slurmd_logfile, "SlurmdLogFile", hashtbl);
4786 
4787 	(void) s_p_get_string(&conf->slurmd_params, "SlurmdParameters", hashtbl);
4788 	if (xstrcasestr(conf->slurmd_params, "config_overrides"))
4789 		conf->conf_flags |= CTL_CONF_OR;
4790 
4791 	if (!s_p_get_string(&conf->slurmd_pidfile, "SlurmdPidFile", hashtbl))
4792 		conf->slurmd_pidfile = xstrdup(DEFAULT_SLURMD_PIDFILE);
4793 
4794 	if (!s_p_get_uint32(&conf->slurmd_port, "SlurmdPort", hashtbl))
4795 		conf->slurmd_port = SLURMD_PORT;
4796 
4797 	(void) s_p_get_string(&conf->sched_logfile, "SlurmSchedLogFile",
4798 			      hashtbl);
4799 
4800 	if (!s_p_get_uint16(&conf->sched_log_level,
4801 			   "SlurmSchedLogLevel", hashtbl))
4802 		conf->sched_log_level = DEFAULT_SCHED_LOG_LEVEL;
4803 	if (conf->sched_log_level && !conf->sched_logfile) {
4804 		error("SlurmSchedLogLevel requires SlurmSchedLogFile value");
4805 		return SLURM_ERROR;
4806 	}
4807 
4808 	if (!s_p_get_string(&conf->slurmd_spooldir, "SlurmdSpoolDir", hashtbl))
4809 		conf->slurmd_spooldir = xstrdup(DEFAULT_SPOOLDIR);
4810 
4811 	if (s_p_get_string(&temp_str, "SlurmdSyslogDebug", hashtbl)) {
4812 		conf->slurmd_syslog_debug = log_string2num(temp_str);
4813 		if (conf->slurmd_syslog_debug == NO_VAL16) {
4814 			error("Invalid SlurmdSyslogDebug %s", temp_str);
4815 			return SLURM_ERROR;
4816 		}
4817 		xfree(temp_str);
4818 		_normalize_debug_level(&conf->slurmd_syslog_debug);
4819 	} else
4820 		conf->slurmd_syslog_debug = LOG_LEVEL_END;
4821 
4822 	if (!s_p_get_uint16(&conf->slurmd_timeout, "SlurmdTimeout", hashtbl))
4823 		conf->slurmd_timeout = DEFAULT_SLURMD_TIMEOUT;
4824 
4825 	(void) s_p_get_string(&conf->srun_prolog, "SrunProlog", hashtbl);
4826 	if (s_p_get_string(&temp_str, "SrunPortRange", hashtbl)) {
4827 		conf->srun_port_range = _parse_srun_ports(temp_str);
4828 		xfree(temp_str);
4829 	}
4830 	(void) s_p_get_string(&conf->srun_epilog, "SrunEpilog", hashtbl);
4831 
4832 	if (!s_p_get_string(&conf->state_save_location,
4833 			    "StateSaveLocation", hashtbl))
4834 		conf->state_save_location = xstrdup(DEFAULT_SAVE_STATE_LOC);
4835 
4836 	(void) s_p_get_string(&conf->suspend_exc_nodes, "SuspendExcNodes",
4837 			      hashtbl);
4838 	(void) s_p_get_string(&conf->suspend_exc_parts, "SuspendExcParts",
4839 			      hashtbl);
4840 	(void) s_p_get_string(&conf->suspend_program, "SuspendProgram",
4841 			      hashtbl);
4842 	if (!s_p_get_uint16(&conf->suspend_rate, "SuspendRate", hashtbl))
4843 		conf->suspend_rate = DEFAULT_SUSPEND_RATE;
4844 	if (s_p_get_string(&temp_str, "SuspendTime", hashtbl)) {
4845 		if (!xstrcasecmp(temp_str, "NONE"))
4846 			long_suspend_time = -1;
4847 		else
4848 			long_suspend_time = atoi(temp_str);
4849 		xfree(temp_str);
4850 		if (long_suspend_time < -1) {
4851 			error("SuspendTime value (%ld) is less than -1",
4852 			      long_suspend_time);
4853 		} else
4854 			conf->suspend_time = long_suspend_time + 1;
4855 	} else {
4856 		conf->suspend_time = 0;
4857 	}
4858 	if (!s_p_get_uint16(&conf->suspend_timeout, "SuspendTimeout", hashtbl))
4859 		conf->suspend_timeout = DEFAULT_SUSPEND_TIMEOUT;
4860 
4861 	/* see above for switch_type, order dependent */
4862 
4863 	if (!s_p_get_string(&conf->task_plugin, "TaskPlugin", hashtbl))
4864 		conf->task_plugin = xstrdup(DEFAULT_TASK_PLUGIN);
4865 #ifdef HAVE_FRONT_END
4866 	if (xstrcmp(conf->task_plugin, "task/none")) {
4867 		error("On FrontEnd systems TaskPlugin=task/none is required");
4868 		return SLURM_ERROR;
4869 	}
4870 #endif
4871 
4872 	conf->task_plugin_param = 0;
4873 	if (s_p_get_string(&temp_str, "TaskPluginParam", hashtbl)) {
4874 		char *last = NULL, *tok;
4875 		bool set_mode = false, set_unit = false, set_auto = false;
4876 		tok = strtok_r(temp_str, ",", &last);
4877 		while (tok) {
4878 			if (xstrcasecmp(tok, "none") == 0) {
4879 				if (set_unit) {
4880 					error("Bad TaskPluginParam: %s", tok);
4881 					return SLURM_ERROR;
4882 				}
4883 				set_unit = true;
4884 				conf->task_plugin_param |= CPU_BIND_NONE;
4885 			} else if (xstrcasecmp(tok, "boards") == 0) {
4886 				if (set_unit) {
4887 					error("Bad TaskPluginParam: %s", tok);
4888 					return SLURM_ERROR;
4889 				}
4890 				set_unit = true;
4891 				conf->task_plugin_param |= CPU_BIND_TO_BOARDS;
4892 			} else if (xstrcasecmp(tok, "sockets") == 0) {
4893 				if (set_unit) {
4894 					error("Bad TaskPluginParam: %s", tok);
4895 					return SLURM_ERROR;
4896 				}
4897 				set_unit = true;
4898 				conf->task_plugin_param |= CPU_BIND_TO_SOCKETS;
4899 			} else if (xstrcasecmp(tok, "cores") == 0) {
4900 				if (set_unit) {
4901 					error("Bad TaskPluginParam: %s", tok);
4902 					return SLURM_ERROR;
4903 				}
4904 				set_unit = true;
4905 				conf->task_plugin_param |= CPU_BIND_TO_CORES;
4906 			} else if (xstrcasecmp(tok, "threads") == 0) {
4907 				if (set_unit) {
4908 					error("Bad TaskPluginParam: %s", tok);
4909 					return SLURM_ERROR;
4910 				}
4911 				set_unit = true;
4912 				conf->task_plugin_param |= CPU_BIND_TO_THREADS;
4913 			} else if (xstrcasecmp(tok, "cpusets") == 0) {
4914 				if (set_mode) {
4915 					error("Bad TaskPluginParam: %s", tok);
4916 					return SLURM_ERROR;
4917 				}
4918 				set_mode = true;
4919 				conf->task_plugin_param |= CPU_BIND_CPUSETS;
4920 			} else if (xstrcasecmp(tok, "sched") == 0) {
4921 				if (set_mode) {
4922 					error("Bad TaskPluginParam: %s", tok);
4923 					return SLURM_ERROR;
4924 				}
4925 				set_mode = true;
4926 				/* No change to task_plugin_param,
4927 				 * this is the default */
4928 			} else if (xstrcasecmp(tok, "verbose") == 0) {
4929 				conf->task_plugin_param |= CPU_BIND_VERBOSE;
4930 			} else if (xstrncasecmp(tok, "autobind=",
4931 						strlen("autobind=")) == 0) {
4932 				char *val_ptr = tok + strlen("autobind=");
4933 
4934 				if (set_auto) {
4935 					error("Bad TaskPluginParam: "
4936 							"autobind already set");
4937 					return SLURM_ERROR;
4938 				}
4939 
4940 				if (xstrcasecmp(val_ptr, "none") == 0) {
4941 					set_auto = true;
4942 				} else if (xstrcasecmp(val_ptr,
4943 						       "threads") == 0) {
4944 					set_auto = true;
4945 					conf->task_plugin_param |=
4946 						CPU_AUTO_BIND_TO_THREADS;
4947 				} else if (xstrcasecmp(val_ptr,
4948 						       "cores") == 0) {
4949 					set_auto = true;
4950 					conf->task_plugin_param |=
4951 						CPU_AUTO_BIND_TO_CORES;
4952 				} else if (xstrcasecmp(val_ptr,
4953 						       "sockets") == 0) {
4954 					set_auto = true;
4955 					conf->task_plugin_param |=
4956 						CPU_AUTO_BIND_TO_SOCKETS;
4957 				} else {
4958 					error("Bad TaskPluginParam autobind "
4959 							"value: %s",val_ptr);
4960 					return SLURM_ERROR;
4961 				}
4962 			} else if (xstrcasecmp(tok, "SlurmdOffSpec") == 0) {
4963 				if (xstrcasestr(conf->task_plugin,
4964 						"cray_aries")) {
4965 					error("TaskPluginParam=SlurmdOffSpec invalid with TaskPlugin=task/cray_aries");
4966 					return SLURM_ERROR;
4967 				}
4968 				conf->task_plugin_param |= SLURMD_OFF_SPEC;
4969 			} else {
4970 				error("Bad TaskPluginParam: %s", tok);
4971 				return SLURM_ERROR;
4972 			}
4973 			tok = strtok_r(NULL, ",", &last);
4974 		}
4975 		xfree(temp_str);
4976 	}
4977 
4978 	(void) s_p_get_string(&conf->task_epilog, "TaskEpilog", hashtbl);
4979 	(void) s_p_get_string(&conf->task_prolog, "TaskProlog", hashtbl);
4980 
4981 	if (!s_p_get_uint16(&conf->tcp_timeout, "TCPTimeout", hashtbl))
4982 		conf->tcp_timeout = DEFAULT_TCP_TIMEOUT;
4983 
4984 	if (!s_p_get_string(&conf->tmp_fs, "TmpFS", hashtbl))
4985 		conf->tmp_fs = xstrdup(DEFAULT_TMP_FS);
4986 
4987 	if (!s_p_get_uint16(&conf->wait_time, "WaitTime", hashtbl))
4988 		conf->wait_time = DEFAULT_WAIT_TIME;
4989 
4990 	(void) s_p_get_string(&conf->x11_params, "X11Parameters", hashtbl);
4991 
4992 	(void) s_p_get_string(&conf->topology_param, "TopologyParam", hashtbl);
4993 	if (conf->topology_param) {
4994 		/* Move legacy settings over to new spot */
4995 		char *legacy_var = "NoInAddrAny";
4996 		if (xstrcasestr(conf->topology_param, legacy_var) &&
4997 		    !xstrcasestr(conf->comm_params, legacy_var))
4998 			xstrfmtcat(conf->comm_params, "%s%s",
4999 				   conf->comm_params ? "," : "", legacy_var);
5000 
5001 		legacy_var = "NoCtldInAddrAny";
5002 		if (xstrcasestr(conf->topology_param, legacy_var) &&
5003 		    !xstrcasestr(conf->comm_params, legacy_var))
5004 			xstrfmtcat(conf->comm_params, "%s%s",
5005 				   conf->comm_params ? "," : "", legacy_var);
5006 	}
5007 
5008 	if (!s_p_get_string(&conf->topology_plugin, "TopologyPlugin", hashtbl))
5009 		conf->topology_plugin = xstrdup(DEFAULT_TOPOLOGY_PLUGIN);
5010 
5011 	if (s_p_get_uint16(&conf->tree_width, "TreeWidth", hashtbl)) {
5012 		if (conf->tree_width == 0) {
5013 			error("TreeWidth=0 is invalid");
5014 			conf->tree_width = DEFAULT_TREE_WIDTH;
5015 		}
5016 	} else {
5017 		conf->tree_width = DEFAULT_TREE_WIDTH;
5018 	}
5019 
5020 	if (s_p_get_boolean(&truth, "UsePAM", hashtbl) && truth)
5021 		conf->conf_flags |= CTL_CONF_PAM;
5022 
5023 	s_p_get_string(&conf->unkillable_program,
5024 		       "UnkillableStepProgram", hashtbl);
5025 	if (!s_p_get_uint16(&conf->unkillable_timeout,
5026 			    "UnkillableStepTimeout", hashtbl))
5027 		conf->unkillable_timeout = DEFAULT_UNKILLABLE_TIMEOUT;
5028 
5029 	(void) s_p_get_uint16(&conf->vsize_factor, "VSizeFactor", hashtbl);
5030 
5031 	/* The default values for both of these variables are NULL.
5032 	 */
5033 	(void) s_p_get_string(&conf->requeue_exit, "RequeueExit", hashtbl);
5034 	(void) s_p_get_string(&conf->requeue_exit_hold, "RequeueExitHold",
5035 			      hashtbl);
5036 
5037 	if (!s_p_get_string(&conf->layouts, "Layouts", hashtbl))
5038 		conf->layouts = xstrdup("");
5039 
5040 	/* srun eio network timeout with the slurmstepd
5041 	 */
5042 	if (!s_p_get_uint16(&conf->eio_timeout, "EioTimeout", hashtbl))
5043 		conf->eio_timeout = DEFAULT_EIO_SHUTDOWN_WAIT;
5044 
5045 	if (!s_p_get_uint16(&conf->prolog_epilog_timeout,
5046 			    "PrologEpilogTimeout",
5047 			    hashtbl)) {
5048 		/* The default value is wait forever
5049 		 */
5050 		conf->prolog_epilog_timeout = NO_VAL16;
5051 	}
5052 
5053 	xfree(default_storage_type);
5054 	xfree(default_storage_loc);
5055 	xfree(default_storage_host);
5056 	xfree(default_storage_user);
5057 	xfree(default_storage_pass);
5058 
5059 	return SLURM_SUCCESS;
5060 }
5061 
5062 /*
5063  * Replace first "%h" in path string with NodeHostname.
5064  * Replace first "%n" in path string with NodeName.
5065  *
5066  * NOTE: Caller should be holding slurm_conf_lock() when calling this function.
5067  *
5068  * Returns an xmalloc()ed string which the caller must free with xfree().
5069  */
5070 extern char *
slurm_conf_expand_slurmd_path(const char * path,const char * node_name)5071 slurm_conf_expand_slurmd_path(const char *path, const char *node_name)
5072 {
5073 	char *hostname;
5074 	char *dir = NULL;
5075 
5076 	dir = xstrdup(path);
5077 	hostname = _internal_get_hostname(node_name);
5078 	xstrsubstitute(dir, "%h", hostname);
5079 	xfree(hostname);
5080 	xstrsubstitute(dir, "%n", node_name);
5081 
5082 	return dir;
5083 }
5084 
5085 /*
5086  * prolog_flags2str - convert a PrologFlags uint16_t to the equivalent string
5087  * Keep in sync with prolog_str2flags() below
5088  */
prolog_flags2str(uint16_t prolog_flags)5089 extern char * prolog_flags2str(uint16_t prolog_flags)
5090 {
5091 	char *rc = NULL;
5092 
5093 	if (prolog_flags & PROLOG_FLAG_ALLOC) {
5094 		if (rc)
5095 			xstrcat(rc, ",");
5096 		xstrcat(rc, "Alloc");
5097 	}
5098 
5099 	if (prolog_flags & PROLOG_FLAG_CONTAIN) {
5100 		if (rc)
5101 			xstrcat(rc, ",");
5102 		xstrcat(rc, "Contain");
5103 	}
5104 
5105 	if (prolog_flags & PROLOG_FLAG_NOHOLD) {
5106 		if (rc)
5107 			xstrcat(rc, ",");
5108 		xstrcat(rc, "NoHold");
5109 	}
5110 
5111 	if (prolog_flags & PROLOG_FLAG_SERIAL) {
5112 		if (rc)
5113 			xstrcat(rc, ",");
5114 		xstrcat(rc, "Serial");
5115 	}
5116 
5117 	if (prolog_flags & PROLOG_FLAG_X11) {
5118 		if (rc)
5119 			xstrcat(rc, ",");
5120 		xstrcat(rc, "X11");
5121 	}
5122 
5123 	return rc;
5124 }
5125 
5126 /*
5127  * prolog_str2flags - Convert a PrologFlags string to the equivalent uint16_t
5128  * Keep in sync with prolog_flags2str() above
5129  * Returns NO_VAL if invalid
5130  */
prolog_str2flags(char * prolog_flags)5131 extern uint16_t prolog_str2flags(char *prolog_flags)
5132 {
5133 	uint16_t rc = 0;
5134 	char *tmp_str, *tok, *last = NULL;
5135 
5136 	if (!prolog_flags)
5137 		return rc;
5138 
5139 	tmp_str = xstrdup(prolog_flags);
5140 	tok = strtok_r(tmp_str, ",", &last);
5141 	while (tok) {
5142 		if (xstrcasecmp(tok, "Alloc") == 0)
5143 			rc |= PROLOG_FLAG_ALLOC;
5144 		else if (xstrcasecmp(tok, "Contain") == 0)
5145 			rc |= (PROLOG_FLAG_ALLOC | PROLOG_FLAG_CONTAIN);
5146 		else if (xstrcasecmp(tok, "NoHold") == 0)
5147 			rc |= PROLOG_FLAG_NOHOLD;
5148 		else if (xstrcasecmp(tok, "Serial") == 0)
5149 			rc |= PROLOG_FLAG_SERIAL;
5150 		else if (xstrcasecmp(tok, "X11") == 0) {
5151 #ifdef WITH_SLURM_X11
5152 			rc |= (PROLOG_FLAG_ALLOC | PROLOG_FLAG_CONTAIN
5153 			       | PROLOG_FLAG_X11);
5154 #else
5155 			error("X11 forwarding not built in, cannot enable.");
5156 			rc = NO_VAL16;
5157 			break;
5158 #endif
5159 		} else {
5160 			error("Invalid PrologFlag: %s", tok);
5161 			rc = NO_VAL16;
5162 			break;
5163 		}
5164 		tok = strtok_r(NULL, ",", &last);
5165 	}
5166 	xfree(tmp_str);
5167 
5168 	return rc;
5169 }
5170 
5171 /*
5172  * debug_flags2str - convert a DebugFlags uint64_t to the equivalent string
5173  * Keep in sync with debug_str2flags() below
5174  */
debug_flags2str(uint64_t debug_flags)5175 extern char * debug_flags2str(uint64_t debug_flags)
5176 {
5177 	char *rc = NULL;
5178 
5179 	/* When adding to this please attempt to keep flags in
5180 	 * alphabetical order.
5181 	 */
5182 
5183 	if (debug_flags & DEBUG_FLAG_ACCRUE) {
5184 		if (rc)
5185 			xstrcat(rc, ",");
5186 		xstrcat(rc, "Accrue");
5187 	}
5188 	if (debug_flags & DEBUG_FLAG_AGENT) {
5189 		if (rc)
5190 			xstrcat(rc, ",");
5191 		xstrcat(rc, "Agent");
5192 	}
5193 	if (debug_flags & DEBUG_FLAG_BACKFILL) {
5194 		if (rc)
5195 			xstrcat(rc, ",");
5196 		xstrcat(rc, "Backfill");
5197 	}
5198 	if (debug_flags & DEBUG_FLAG_BACKFILL_MAP) {
5199 		if (rc)
5200 			xstrcat(rc, ",");
5201 		xstrcat(rc, "BackfillMap");
5202 	}
5203 	if (debug_flags & DEBUG_FLAG_BURST_BUF) {
5204 		if (rc)
5205 			xstrcat(rc, ",");
5206 		xstrcat(rc, "BurstBuffer");
5207 	}
5208 	if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
5209 		if (rc)
5210 			xstrcat(rc, ",");
5211 		xstrcat(rc, "CpuFrequency");
5212 	}
5213 	if (debug_flags & DEBUG_FLAG_CPU_BIND) {
5214 		if (rc)
5215 			xstrcat(rc, ",");
5216 		xstrcat(rc, "CPU_Bind");
5217 	}
5218 	if (debug_flags & DEBUG_FLAG_DATA) {
5219 		if (rc)
5220 			xstrcat(rc, ",");
5221 		xstrcat(rc, "Data");
5222 	}
5223 	if (debug_flags & DEBUG_FLAG_DB_ARCHIVE) {
5224 		if (rc)
5225 			xstrcat(rc, ",");
5226 		xstrcat(rc, "DB_Archive");
5227 	}
5228 	if (debug_flags & DEBUG_FLAG_DB_ASSOC) {
5229 		if (rc)
5230 			xstrcat(rc, ",");
5231 		xstrcat(rc, "DB_Assoc");
5232 	}
5233 	if (debug_flags & DEBUG_FLAG_DB_TRES) {
5234 		if (rc)
5235 			xstrcat(rc, ",");
5236 		xstrcat(rc, "DB_TRES");
5237 	}
5238 	if (debug_flags & DEBUG_FLAG_DB_EVENT) {
5239 		if (rc)
5240 			xstrcat(rc, ",");
5241 		xstrcat(rc, "DB_Event");
5242 	}
5243 	if (debug_flags & DEBUG_FLAG_DB_JOB) {
5244 		if (rc)
5245 			xstrcat(rc, ",");
5246 		xstrcat(rc, "DB_Job");
5247 	}
5248 	if (debug_flags & DEBUG_FLAG_DB_QOS) {
5249 		if (rc)
5250 			xstrcat(rc, ",");
5251 		xstrcat(rc, "DB_QOS");
5252 	}
5253 	if (debug_flags & DEBUG_FLAG_DB_QUERY) {
5254 		if (rc)
5255 			xstrcat(rc, ",");
5256 		xstrcat(rc, "DB_Query");
5257 	}
5258 	if (debug_flags & DEBUG_FLAG_DB_RESV) {
5259 		if (rc)
5260 			xstrcat(rc, ",");
5261 		xstrcat(rc, "DB_Reservation");
5262 	}
5263 	if (debug_flags & DEBUG_FLAG_DB_RES) {
5264 		if (rc)
5265 			xstrcat(rc, ",");
5266 		xstrcat(rc, "DB_Resource");
5267 	}
5268 	if (debug_flags & DEBUG_FLAG_DB_STEP) {
5269 		if (rc)
5270 			xstrcat(rc, ",");
5271 		xstrcat(rc, "DB_Step");
5272 	}
5273 	if (debug_flags & DEBUG_FLAG_DB_USAGE) {
5274 		if (rc)
5275 			xstrcat(rc, ",");
5276 		xstrcat(rc, "DB_Usage");
5277 	}
5278 	if (debug_flags & DEBUG_FLAG_DB_WCKEY) {
5279 		if (rc)
5280 			xstrcat(rc, ",");
5281 		xstrcat(rc, "DB_WCKey");
5282 	}
5283 	if (debug_flags & DEBUG_FLAG_DEPENDENCY) {
5284 		if (rc)
5285 			xstrcat(rc, ",");
5286 		xstrcat(rc, "Dependency");
5287 	}
5288 	if (debug_flags & DEBUG_FLAG_ESEARCH) {
5289 		if (rc)
5290 			xstrcat(rc, ",");
5291 		xstrcat(rc, "Elasticsearch");
5292 	}
5293 	if (debug_flags & DEBUG_FLAG_ENERGY) {
5294 		if (rc)
5295 			xstrcat(rc, ",");
5296 		xstrcat(rc, "Energy");
5297 	}
5298 	if (debug_flags & DEBUG_FLAG_EXT_SENSORS) {
5299 		if (rc)
5300 			xstrcat(rc, ",");
5301 		xstrcat(rc, "ExtSensors");
5302 	}
5303 	if (debug_flags & DEBUG_FLAG_FILESYSTEM) {
5304 		if (rc)
5305 			xstrcat(rc, ",");
5306 		xstrcat(rc, "Filesystem");
5307 	}
5308 	if (debug_flags & DEBUG_FLAG_FEDR) {
5309 		if (rc)
5310 			xstrcat(rc, ",");
5311 		xstrcat(rc, "Federation");
5312 	}
5313 	if (debug_flags & DEBUG_FLAG_FRONT_END) {
5314 		if (rc)
5315 			xstrcat(rc, ",");
5316 		xstrcat(rc, "FrontEnd");
5317 	}
5318 	if (debug_flags & DEBUG_FLAG_GANG) {
5319 		if (rc)
5320 			xstrcat(rc, ",");
5321 		xstrcat(rc, "Gang");
5322 	}
5323 	if (debug_flags & DEBUG_FLAG_GRES) {
5324 		if (rc)
5325 			xstrcat(rc, ",");
5326 		xstrcat(rc, "Gres");
5327 	}
5328 	if (debug_flags & DEBUG_FLAG_HETJOB) {
5329 		if (rc)
5330 			xstrcat(rc, ",");
5331 		xstrcat(rc, "Hetjob");
5332 	}
5333 	if (debug_flags & DEBUG_FLAG_INTERCONNECT) {
5334 		if (rc)
5335 			xstrcat(rc, ",");
5336 		xstrcat(rc, "Interconnect");
5337 	}
5338 	if (debug_flags & DEBUG_FLAG_JOB_CONT) {
5339 		if (rc)
5340 			xstrcat(rc, ",");
5341 		xstrcat(rc, "JobContainer");
5342 	}
5343 	if (debug_flags & DEBUG_FLAG_NODE_FEATURES) {
5344 		if (rc)
5345 			xstrcat(rc, ",");
5346 		xstrcat(rc, "NodeFeatures");
5347 	}
5348 	if (debug_flags & DEBUG_FLAG_LICENSE) {
5349 		if (rc)
5350 			xstrcat(rc, ",");
5351 		xstrcat(rc, "License");
5352 	}
5353 	if (debug_flags & DEBUG_FLAG_NET) {
5354 		if (rc)
5355 			xstrcat(rc, ",");
5356 		xstrcat(rc, "Network");
5357 	}
5358 	if (debug_flags & DEBUG_FLAG_NO_CONF_HASH) {
5359 		if (rc)
5360 			xstrcat(rc, ",");
5361 		xstrcat(rc, "NO_CONF_HASH");
5362 	}
5363 	if (debug_flags & DEBUG_FLAG_NO_REALTIME) {
5364 		if (rc)
5365 			xstrcat(rc, ",");
5366 		xstrcat(rc, "NoRealTime");
5367 	}
5368 	if (debug_flags & DEBUG_FLAG_POWER) {
5369 		if (rc)
5370 			xstrcat(rc, ",");
5371 		xstrcat(rc, "Power");
5372 	}
5373 	if (debug_flags & DEBUG_FLAG_POWER_SAVE) {
5374 		if (rc)
5375 			xstrcat(rc, ",");
5376 		xstrcat(rc, "PowerSave");
5377 	}
5378 	if (debug_flags & DEBUG_FLAG_PRIO) {
5379 		if (rc)
5380 			xstrcat(rc, ",");
5381 		xstrcat(rc, "Priority");
5382 	}
5383 	if (debug_flags & DEBUG_FLAG_PROFILE) {
5384 		if (rc)
5385 			xstrcat(rc, ",");
5386 		xstrcat(rc, "Profile");
5387 	}
5388 	if (debug_flags & DEBUG_FLAG_PROTOCOL) {
5389 		if (rc)
5390 			xstrcat(rc, ",");
5391 		xstrcat(rc, "Protocol");
5392 	}
5393 	if (debug_flags & DEBUG_FLAG_RESERVATION) {
5394 		if (rc)
5395 			xstrcat(rc, ",");
5396 		xstrcat(rc, "Reservation");
5397 	}
5398 	if (debug_flags & DEBUG_FLAG_ROUTE) {
5399 		if (rc)
5400 			xstrcat(rc, ",");
5401 		xstrcat(rc, "Route");
5402 	}
5403 	if (debug_flags & DEBUG_FLAG_SELECT_TYPE) {
5404 		if (rc)
5405 			xstrcat(rc, ",");
5406 		xstrcat(rc, "SelectType");
5407 	}
5408 	if (debug_flags & DEBUG_FLAG_STEPS) {
5409 		if (rc)
5410 			xstrcat(rc, ",");
5411 		xstrcat(rc, "Steps");
5412 	}
5413 	if (debug_flags & DEBUG_FLAG_SWITCH) {
5414 		if (rc)
5415 			xstrcat(rc, ",");
5416 		xstrcat(rc, "Switch");
5417 	}
5418 	if (debug_flags & DEBUG_FLAG_TASK) {
5419 		if (rc)
5420 			xstrcat(rc, ",");
5421 		xstrcat(rc, "Task");
5422 	}
5423 	if (debug_flags & DEBUG_FLAG_TIME_CRAY) {
5424 		if (rc)
5425 			xstrcat(rc, ",");
5426 		xstrcat(rc, "TimeCray");
5427 	}
5428 	if (debug_flags & DEBUG_FLAG_TRES_NODE) {
5429 		if (rc)
5430 			xstrcat(rc, ",");
5431 		xstrcat(rc, "TRESNode");
5432 	}
5433 	if (debug_flags & DEBUG_FLAG_TRACE_JOBS) {
5434 		if (rc)
5435 			xstrcat(rc, ",");
5436 		xstrcat(rc, "TraceJobs");
5437 	}
5438 	if (debug_flags & DEBUG_FLAG_TRIGGERS) {
5439 		if (rc)
5440 			xstrcat(rc, ",");
5441 		xstrcat(rc, "Triggers");
5442 	}
5443 	if (debug_flags & DEBUG_FLAG_WORKQ) {
5444 		if (rc)
5445 			xstrcat(rc, ",");
5446 		xstrcat(rc, "WorkQueue");
5447 	}
5448 
5449 	return rc;
5450 }
5451 
5452 /*
5453  * debug_str2flags - Convert a DebugFlags string to the equivalent uint64_t
5454  * Keep in sycn with debug_flags2str() above
5455  * Returns SLURM_ERROR if invalid
5456  */
debug_str2flags(char * debug_flags,uint64_t * flags_out)5457 extern int debug_str2flags(char *debug_flags, uint64_t *flags_out)
5458 {
5459 	int rc = SLURM_SUCCESS;
5460 	char *tmp_str, *tok, *last = NULL;
5461 
5462 	xassert(flags_out);
5463 
5464 	(*flags_out) = 0;
5465 
5466 	if (!debug_flags)
5467 		return rc;
5468 
5469 	tmp_str = xstrdup(debug_flags);
5470 	tok = strtok_r(tmp_str, ",", &last);
5471 	while (tok) {
5472 		if (xstrcasecmp(tok, "Accrue") == 0)
5473 			(*flags_out) |= DEBUG_FLAG_ACCRUE;
5474 		else if (xstrcasecmp(tok, "Agent") == 0)
5475 			(*flags_out) |= DEBUG_FLAG_AGENT;
5476 		else if (xstrcasecmp(tok, "Backfill") == 0)
5477 			(*flags_out) |= DEBUG_FLAG_BACKFILL;
5478 		else if (xstrcasecmp(tok, "BackfillMap") == 0)
5479 			(*flags_out) |= DEBUG_FLAG_BACKFILL_MAP;
5480 		else if (xstrcasecmp(tok, "BurstBuffer") == 0)
5481 			(*flags_out) |= DEBUG_FLAG_BURST_BUF;
5482 		else if (xstrcasecmp(tok, "CPU_Bind") == 0)
5483 			(*flags_out) |= DEBUG_FLAG_CPU_BIND;
5484 		else if (xstrcasecmp(tok, "Data") == 0)
5485 			(*flags_out) |= DEBUG_FLAG_DATA;
5486 		else if (xstrcasecmp(tok, "DB_Archive") == 0)
5487 			(*flags_out) |= DEBUG_FLAG_DB_ARCHIVE;
5488 		else if (xstrcasecmp(tok, "DB_Assoc") == 0)
5489 			(*flags_out) |= DEBUG_FLAG_DB_ASSOC;
5490 		else if (xstrcasecmp(tok, "DB_TRES") == 0)
5491 			(*flags_out) |= DEBUG_FLAG_DB_TRES;
5492 		else if (xstrcasecmp(tok, "DB_Event") == 0)
5493 			(*flags_out) |= DEBUG_FLAG_DB_EVENT;
5494 		else if (xstrcasecmp(tok, "DB_Job") == 0)
5495 			(*flags_out) |= DEBUG_FLAG_DB_JOB;
5496 		else if (xstrcasecmp(tok, "DB_QOS") == 0)
5497 			(*flags_out) |= DEBUG_FLAG_DB_QOS;
5498 		else if (xstrcasecmp(tok, "DB_Query") == 0)
5499 			(*flags_out) |= DEBUG_FLAG_DB_QUERY;
5500 		else if (xstrcasecmp(tok, "DB_Reservation") == 0)
5501 			(*flags_out) |= DEBUG_FLAG_DB_RESV;
5502 		else if (xstrcasecmp(tok, "DB_Resource") == 0)
5503 			(*flags_out) |= DEBUG_FLAG_DB_RES;
5504 		else if (xstrcasecmp(tok, "DB_Step") == 0)
5505 			(*flags_out) |= DEBUG_FLAG_DB_STEP;
5506 		else if (xstrcasecmp(tok, "DB_Usage") == 0)
5507 			(*flags_out) |= DEBUG_FLAG_DB_USAGE;
5508 		else if (xstrcasecmp(tok, "DB_WCKey") == 0)
5509 			(*flags_out) |= DEBUG_FLAG_DB_WCKEY;
5510 		else if (xstrcasecmp(tok, "Dependency") == 0)
5511 			(*flags_out) |= DEBUG_FLAG_DEPENDENCY;
5512 		else if (xstrcasecmp(tok, "Elasticsearch") == 0)
5513 			(*flags_out) |= DEBUG_FLAG_ESEARCH;
5514 		else if (xstrcasecmp(tok, "Energy") == 0)
5515 			(*flags_out) |= DEBUG_FLAG_ENERGY;
5516 		else if (xstrcasecmp(tok, "ExtSensors") == 0)
5517 			(*flags_out) |= DEBUG_FLAG_EXT_SENSORS;
5518 		else if (xstrcasecmp(tok, "Federation") == 0)
5519 			(*flags_out) |= DEBUG_FLAG_FEDR;
5520 		else if (xstrcasecmp(tok, "FrontEnd") == 0)
5521 			(*flags_out) |= DEBUG_FLAG_FRONT_END;
5522 		else if (xstrcasecmp(tok, "Gang") == 0)
5523 			(*flags_out) |= DEBUG_FLAG_GANG;
5524 		else if (xstrcasecmp(tok, "Gres") == 0)
5525 			(*flags_out) |= DEBUG_FLAG_GRES;
5526 		else if (xstrcasecmp(tok, "Hetjob") == 0)
5527 			(*flags_out) |= DEBUG_FLAG_HETJOB;
5528 		else if (xstrcasecmp(tok, "Federation") == 0)
5529 			(*flags_out) |= DEBUG_FLAG_FEDR;
5530 		else if (xstrcasecmp(tok, "Interconnect") == 0)
5531 			(*flags_out) |= DEBUG_FLAG_INTERCONNECT;
5532 		else if (xstrcasecmp(tok, "Filesystem") == 0)
5533 			(*flags_out) |= DEBUG_FLAG_FILESYSTEM;
5534 		else if (xstrcasecmp(tok, "JobContainer") == 0)
5535 			(*flags_out) |= DEBUG_FLAG_JOB_CONT;
5536 		else if (xstrcasecmp(tok, "License") == 0)
5537 			(*flags_out) |= DEBUG_FLAG_LICENSE;
5538 		else if (xstrcasecmp(tok, "Network") == 0 ||
5539 			 xstrcasecmp(tok, "Net") == 0)
5540 			(*flags_out) |= DEBUG_FLAG_NET;
5541 		else if (xstrcasecmp(tok, "NO_CONF_HASH") == 0)
5542 			(*flags_out) |= DEBUG_FLAG_NO_CONF_HASH;
5543 		else if (xstrcasecmp(tok, "NodeFeatures") == 0)
5544 			(*flags_out) |= DEBUG_FLAG_NODE_FEATURES;
5545 		else if (xstrcasecmp(tok, "NoRealTime") == 0)
5546 			(*flags_out) |= DEBUG_FLAG_NO_REALTIME;
5547 		else if (xstrcasecmp(tok, "Priority") == 0)
5548 			(*flags_out) |= DEBUG_FLAG_PRIO;
5549 		else if (xstrcasecmp(tok, "Profile") == 0)
5550 			(*flags_out) |= DEBUG_FLAG_PROFILE;
5551 		else if (xstrcasecmp(tok, "Protocol") == 0)
5552 			(*flags_out) |= DEBUG_FLAG_PROTOCOL;
5553 		else if (xstrcasecmp(tok, "Reservation") == 0)
5554 			(*flags_out) |= DEBUG_FLAG_RESERVATION;
5555 		else if (xstrcasecmp(tok, "Route") == 0)
5556 			(*flags_out) |= DEBUG_FLAG_ROUTE;
5557 		else if (xstrcasecmp(tok, "SelectType") == 0)
5558 			(*flags_out) |= DEBUG_FLAG_SELECT_TYPE;
5559 		else if (xstrcasecmp(tok, "Steps") == 0)
5560 			(*flags_out) |= DEBUG_FLAG_STEPS;
5561 		else if (xstrcasecmp(tok, "Switch") == 0)
5562 			(*flags_out) |= DEBUG_FLAG_SWITCH;
5563 		else if (xstrcasecmp(tok, "Task") == 0)
5564 			(*flags_out) |= DEBUG_FLAG_TASK;
5565 		else if (xstrcasecmp(tok, "TraceJobs") == 0)
5566 			(*flags_out) |= DEBUG_FLAG_TRACE_JOBS;
5567 		else if (xstrcasecmp(tok, "TRESNode") == 0)
5568 			(*flags_out) |= DEBUG_FLAG_TRES_NODE;
5569 		else if (xstrcasecmp(tok, "Trigger") == 0)
5570 			(*flags_out) |= DEBUG_FLAG_TRIGGERS;
5571 		else if (xstrcasecmp(tok, "Triggers") == 0)
5572 			(*flags_out) |= DEBUG_FLAG_TRIGGERS;
5573 		else if (xstrcasecmp(tok, "CpuFrequency") == 0)
5574 			(*flags_out) |= DEBUG_FLAG_CPU_FREQ;
5575 		else if (xstrcasecmp(tok, "Power") == 0)
5576 			(*flags_out) |= DEBUG_FLAG_POWER;
5577 		else if (xstrcasecmp(tok, "PowerSave") == 0)
5578 			(*flags_out) |= DEBUG_FLAG_POWER_SAVE;
5579 		else if (xstrcasecmp(tok, "TimeCray") == 0)
5580 			(*flags_out) |= DEBUG_FLAG_TIME_CRAY;
5581 		else if (xstrcasecmp(tok, "WorkQueue") == 0 ||
5582 			 xstrcasecmp(tok, "WorkQ") == 0)
5583 			(*flags_out) |= DEBUG_FLAG_WORKQ;
5584 		else {
5585 			error("Invalid DebugFlag: %s", tok);
5586 			(*flags_out) = 0;
5587 			rc = SLURM_ERROR;
5588 			break;
5589 		}
5590 		tok = strtok_r(NULL, ",", &last);
5591 	}
5592 	xfree(tmp_str);
5593 
5594 	return rc;
5595 }
5596 
5597 /*
5598  * reconfig_flags2str - convert a ReconfFlags uint16_t to the equivalent string
5599  * Keep in sync with reconfig_str2flags() below
5600  */
reconfig_flags2str(uint16_t reconfig_flags)5601 extern char * reconfig_flags2str(uint16_t reconfig_flags)
5602 {
5603 	char *rc = NULL;
5604 
5605 	if (reconfig_flags & RECONFIG_KEEP_PART_INFO) {
5606 		if (rc)
5607 			xstrcat(rc, ",");
5608 		xstrcat(rc, "KeepPartInfo");
5609 	}
5610 	if (reconfig_flags & RECONFIG_KEEP_PART_STAT) {
5611 		if (rc)
5612 			xstrcat(rc, ",");
5613 		xstrcat(rc, "KeepPartState");
5614 	}
5615 
5616 	return rc;
5617 }
5618 
5619 /*
5620  * reconfig_str2flags - Convert a ReconfFlags string to the equivalent uint16_t
5621  * Keep in sync with reconfig_flags2str() above
5622  * Returns NO_VAL if invalid
5623  */
reconfig_str2flags(char * reconfig_flags)5624 extern uint16_t reconfig_str2flags(char *reconfig_flags)
5625 {
5626 	uint16_t rc = 0;
5627 	char *tmp_str, *tok, *last = NULL;
5628 
5629 	if (!reconfig_flags)
5630 		 return rc;
5631 
5632 	tmp_str = xstrdup(reconfig_flags);
5633 	tok = strtok_r(tmp_str, ",", &last);
5634 	while (tok) {
5635 		if (xstrcasecmp(tok, "KeepPartInfo") == 0)
5636 			rc |= RECONFIG_KEEP_PART_INFO;
5637 		else if (xstrcasecmp(tok, "KeepPartState") == 0)
5638 			rc |= RECONFIG_KEEP_PART_STAT;
5639 		else {
5640 			error("Invalid ReconfigFlag: %s", tok);
5641 			rc = NO_VAL16;
5642 			break;
5643 		}
5644 		tok = strtok_r(NULL, ",", &last);
5645 	}
5646 	xfree(tmp_str);
5647 
5648 	return rc;
5649 }
5650 
destroy_config_plugin_params(void * object)5651 extern void destroy_config_plugin_params(void *object)
5652 {
5653 	config_plugin_params_t *plugin_ptr = (config_plugin_params_t *)object;
5654 
5655 	if (plugin_ptr) {
5656 		xfree(plugin_ptr->name);
5657 		FREE_NULL_LIST(plugin_ptr->key_pairs);
5658 		xfree(object);
5659 	}
5660 }
5661 
pack_config_plugin_params(void * in,uint16_t protocol_version,Buf buff)5662 extern void pack_config_plugin_params(void *in, uint16_t protocol_version,
5663 				      Buf buff)
5664 {
5665        config_plugin_params_t *object = (config_plugin_params_t *)in;
5666 
5667        packstr(object->name, buff);
5668        pack_key_pair_list((void *)object->key_pairs, protocol_version, buff);
5669 }
5670 
5671 extern int
unpack_config_plugin_params(void ** object,uint16_t protocol_version,Buf buff)5672 unpack_config_plugin_params(void **object, uint16_t protocol_version, Buf buff)
5673 {
5674 	uint32_t uint32_tmp;
5675 	config_plugin_params_t *object_ptr = xmalloc(sizeof(*object_ptr));
5676 
5677 	*object = object_ptr;
5678 	safe_unpackstr_xmalloc(&object_ptr->name,  &uint32_tmp, buff);
5679 
5680 	if (unpack_key_pair_list((void *) &object_ptr->key_pairs,
5681 				 protocol_version, buff) != SLURM_SUCCESS)
5682 		goto unpack_error;
5683 
5684 	return SLURM_SUCCESS;
5685 
5686 unpack_error:
5687 	destroy_config_plugin_params(object_ptr);
5688 	return SLURM_ERROR;
5689 }
5690 
5691 extern void
pack_config_plugin_params_list(void * in,uint16_t protocol_version,Buf buff)5692 pack_config_plugin_params_list(void *in, uint16_t protocol_version, Buf buff)
5693 {
5694 	uint32_t count = NO_VAL;
5695 
5696 	if (in)
5697 		count = list_count(in);
5698 	pack32(count, buff);
5699 	if (count && (count != NO_VAL))	{
5700 		ListIterator itr = list_iterator_create((List)in);
5701 		config_plugin_params_t *obj = NULL;
5702 		while ((obj = list_next(itr))) {
5703 			pack_config_plugin_params(obj, protocol_version, buff);
5704 		}
5705 		list_iterator_destroy(itr);
5706 	}
5707 }
5708 
5709 extern int
unpack_config_plugin_params_list(void ** plugin_params_l,uint16_t protocol_version,Buf buff)5710 unpack_config_plugin_params_list(void **plugin_params_l,
5711 				 uint16_t protocol_version, Buf buff)
5712 {
5713 	uint32_t count = NO_VAL;
5714 	List tmp_list = NULL;
5715 
5716 	safe_unpack32(&count, buff);
5717 	if (count > NO_VAL)
5718 		goto unpack_error;
5719 	if (count != NO_VAL) {
5720 		tmp_list = list_create(destroy_config_plugin_params);
5721 		config_plugin_params_t *object = NULL;
5722 		int i;
5723 		for (i = 0; i < count; i++) {
5724 			if (unpack_config_plugin_params(
5725 				    (void *)&object, protocol_version, buff)
5726 			    == SLURM_ERROR)
5727 				goto unpack_error;
5728 			list_append(tmp_list, object);
5729 		}
5730 		*plugin_params_l = (void *)tmp_list;
5731 	}
5732 	return SLURM_SUCCESS;
5733 
5734 unpack_error:
5735 	FREE_NULL_LIST(tmp_list);
5736 	return SLURM_ERROR;
5737 }
5738 
destroy_config_key_pair(void * object)5739 extern void destroy_config_key_pair(void *object)
5740 {
5741 	config_key_pair_t *key_pair_ptr = (config_key_pair_t *)object;
5742 
5743 	if (key_pair_ptr) {
5744 		xfree(key_pair_ptr->name);
5745 		xfree(key_pair_ptr->value);
5746 		xfree(key_pair_ptr);
5747 	}
5748 }
5749 
5750 extern void
pack_config_key_pair(void * in,uint16_t protocol_version,Buf buffer)5751 pack_config_key_pair(void *in, uint16_t protocol_version, Buf buffer)
5752 {
5753 	config_key_pair_t *object = (config_key_pair_t *)in;
5754 	packstr(object->name,  buffer);
5755 	packstr(object->value, buffer);
5756 }
5757 
5758 extern int
unpack_config_key_pair(void ** object,uint16_t protocol_version,Buf buffer)5759 unpack_config_key_pair(void **object, uint16_t protocol_version, Buf buffer)
5760 {
5761 	uint32_t uint32_tmp;
5762 	config_key_pair_t *object_ptr = xmalloc(sizeof(config_key_pair_t));
5763 
5764 	*object = object_ptr;
5765 	safe_unpackstr_xmalloc(&object_ptr->name,  &uint32_tmp, buffer);
5766 	safe_unpackstr_xmalloc(&object_ptr->value, &uint32_tmp, buffer);
5767 
5768 	return SLURM_SUCCESS;
5769 
5770 unpack_error:
5771 	destroy_config_key_pair(object_ptr);
5772 	*object = NULL;
5773 	return SLURM_ERROR;
5774 }
5775 
5776 extern void
pack_key_pair_list(void * key_pairs,uint16_t protocol_version,Buf buffer)5777 pack_key_pair_list(void *key_pairs, uint16_t protocol_version, Buf buffer)
5778 {
5779 	uint32_t count = NO_VAL;
5780 
5781 	if (key_pairs)
5782 		count = list_count(key_pairs);
5783 	pack32(count, buffer);
5784 	if (count && (count != NO_VAL)) {
5785 		ListIterator itr = list_iterator_create(
5786 			(List)key_pairs);
5787 		config_key_pair_t *key_pair = NULL;
5788 		while ((key_pair = list_next(itr))) {
5789 			pack_config_key_pair(key_pair, protocol_version,
5790 					     buffer);
5791 		}
5792 		list_iterator_destroy(itr);
5793 	}
5794 }
5795 
5796 extern int
unpack_key_pair_list(void ** key_pairs,uint16_t protocol_version,Buf buffer)5797 unpack_key_pair_list(void **key_pairs, uint16_t protocol_version, Buf buffer)
5798 {
5799 	uint32_t count = NO_VAL;
5800 	List tmp_list = NULL;
5801 
5802 	safe_unpack32(&count, buffer);
5803 	if (count > NO_VAL)
5804 		goto unpack_error;
5805 	if (count != NO_VAL) {
5806 		tmp_list = list_create(destroy_config_key_pair);
5807 		config_key_pair_t *object = NULL;
5808 		int i;
5809 		for (i = 0; i < count; i++) {
5810 			if (unpack_config_key_pair((void *)&object,
5811 						   protocol_version, buffer)
5812 			    == SLURM_ERROR)
5813 				goto unpack_error;
5814 			list_append(tmp_list, object);
5815 		}
5816 		*key_pairs = (void *)tmp_list;
5817 	}
5818 	return SLURM_SUCCESS;
5819 
5820 unpack_error:
5821 	FREE_NULL_LIST(tmp_list);
5822 	return SLURM_ERROR;
5823 }
5824 
sort_key_pairs(void * v1,void * v2)5825 extern int sort_key_pairs(void *v1, void *v2)
5826 {
5827 	config_key_pair_t *key_a = *(config_key_pair_t **)v1;
5828 	config_key_pair_t *key_b = *(config_key_pair_t **)v2;
5829 
5830 	int size_a = xstrcmp(key_a->name, key_b->name);
5831 
5832 	if (size_a < 0)
5833 		return -1;
5834 	else if (size_a > 0)
5835 		return 1;
5836 
5837 	return 0;
5838 }
5839 
5840 /*
5841  * Return the pathname of the extra .conf file
5842  */
get_extra_conf_path(char * conf_name)5843 extern char *get_extra_conf_path(char *conf_name)
5844 {
5845 	char *val = getenv("SLURM_CONF");
5846 	char *rc = NULL, *slash;
5847 
5848 	if (!val)
5849 		val = default_slurm_config_file;
5850 
5851 	/*
5852 	 * Both plugstack.conf and topology.conf need special handling in
5853 	 * "configless" operation as client commands will need to load them.
5854 	 */
5855 
5856 	if (plugstack_conf && !xstrcmp(conf_name, "plugstack.conf"))
5857 		return xstrdup(plugstack_conf);
5858 
5859 	if (topology_conf && !xstrcmp(conf_name, "topology.conf"))
5860 		return xstrdup(topology_conf);
5861 
5862 	/* Replace file name on end of path */
5863 	rc = xstrdup(val);
5864 	if ((slash = strrchr(rc, '/')))
5865 		slash[1] = '\0';
5866 	else
5867 		rc[0] = '\0';
5868 	xstrcat(rc, conf_name);
5869 
5870 	return rc;
5871 }
5872 
5873 /*
5874  * Add nodes and corresponding pre-configured slurm_addr_t's to node conf hash
5875  * tables.
5876  *
5877  * IN node_list - node_list allocated to job
5878  * IN node_addrs - array of slurm_addr_t that corresponds to nodes built from
5879  * 	host_list. See build_node_details().
5880  * RET return SLURM_SUCCESS on success, SLURM_ERROR otherwise.
5881  */
add_remote_nodes_to_conf_tbls(char * node_list,slurm_addr_t * node_addrs)5882 extern int add_remote_nodes_to_conf_tbls(char *node_list,
5883 					 slurm_addr_t *node_addrs)
5884 {
5885 	char *hostname       = NULL;
5886 	hostlist_t host_list = NULL;
5887 	int i = 0;
5888 
5889 	xassert(node_list);
5890 	xassert(node_addrs);
5891 
5892 	if ((host_list = hostlist_create(node_list)) == NULL) {
5893 		error("hostlist_create error for %s: %m",
5894 		      node_list);
5895 		return SLURM_ERROR;
5896 	}
5897 
5898 	/*
5899 	 * flush tables since clusters could share the same nodes names.
5900 	 * Leave nodehash_intialized so that the tables don't get overridden
5901 	 * later
5902 	 */
5903 	_free_name_hashtbl();
5904 	nodehash_initialized = true;
5905 
5906 	while ((hostname = hostlist_shift(host_list))) {
5907 		_push_to_hashtbls(hostname, hostname,
5908 				  NULL, NULL, 0, 0,
5909 				  0, 0, 0, 0, false, NULL, 0,
5910 				  0, &node_addrs[i++], true);
5911 		free(hostname);
5912 	}
5913 
5914 	hostlist_destroy(host_list);
5915 
5916 	return SLURM_SUCCESS;
5917 }
5918 
5919 /*
5920  * Get result of configuration file test.
5921  * RET SLURM_SUCCESS or error code
5922  */
config_test_result(void)5923 extern int config_test_result(void)
5924 {
5925 	return local_test_config_rc;
5926 }
5927 
5928 
5929 /*
5930  * Start configuration file test mode. Disables fatal errors.
5931  */
config_test_start(void)5932 extern void config_test_start(void)
5933 {
5934 	lvl = LOG_LEVEL_ERROR;
5935 	local_test_config_rc = 0;
5936 }
5937