1 #if HAVE_CONFIG_H
2 #   include "config.h"
3 #endif
4 
5 /* $Header: /tmp/hpctools/ga/tcgmsg/ipcv4.0/cluster.c,v 1.11 2004-04-01 02:04:56 manoj Exp $ */
6 
7 #include <stdio.h>
8 #include <stdlib.h>
9 
10 #ifdef SEQUENT
11 #include <strings.h>
12 #else
13 #include <string.h>
14 #endif
15 
16 #include "sndrcvP.h"
17 #include "defglobals.h"
18 
19 #if defined(ALLIANT) || defined(ENCORE) || defined(SEQUENT)|| defined(AIX)  \
20                      || defined(CONVEX) || defined(ARDENT) || defined(ULTRIX) \
21                      || defined(NEXT)
22 extern char *strdup();
23 extern char *strtok();
24 #endif
25 
26 extern void Error();
27 
InitClusInfoNotParallel()28 void InitClusInfoNotParallel()
29 {
30 int SR_n_clus = 0;
31 
32     SR_clus_info[SR_n_clus].user = "?";
33     SR_clus_info[SR_n_clus].hostname = "?";
34     SR_clus_info[SR_n_clus].nslave = 1;
35     SR_clus_info[SR_n_clus].image = "?";
36     SR_clus_info[SR_n_clus].workdir = "?";
37     SR_clus_info[SR_n_clus].masterid = 0;
38 }
39 
InitClusInfo(procgrp,masterhostname)40 void InitClusInfo(procgrp, masterhostname)
41      char *procgrp, *masterhostname;
42 /*
43   Initialize the SR_clus_info structure, SR_n_clus and SR_n_proc
44   by parsing the PROCGRP info.
45 
46   The procgrp file consists of white space separated records.
47   user host nslave image workdir
48 
49   Masterhostname is the name of the host running the parallel command.
50 
51   This routine could do with some more error checking.
52 
53 */
54 {
55   char *user, *host, *nslave, *image, *workdir;
56   char *white = " \t\n";
57   char *tmp = strdup(procgrp);
58   int i;
59 
60   SR_n_clus = 0;
61   SR_n_proc = 0;
62 
63   if (!tmp) Error("InitClusInfo: no memory", 0L);
64 
65   while (1) {
66     user = strtok(tmp, white);
67     tmp = (char *) NULL;
68     if (user == (char *) NULL)
69       break;
70     host = strtok(tmp, white);
71     nslave = strtok(tmp, white);
72     image = strtok(tmp, white);
73     workdir = strtok(tmp, white);
74     if (workdir == (char *) NULL)
75 	Error("InitClusInfo: error parsing PROCGRP, line=",SR_n_clus+1);
76 
77     if (SR_n_clus == MAX_CLUSTER)
78       Error("InitClusInfo: maximum no. of clusters exceeded",
79             (long) MAX_CLUSTER);
80 
81     if (atoi(nslave) > MAX_SLAVE)
82       Error("InitClusInfo: maximum no. of slaves per cluster exceeded",
83 	    (long) MAX_SLAVE);
84 
85     SR_clus_info[SR_n_clus].user = strdup(user);
86     SR_clus_info[SR_n_clus].hostname = strdup(host);
87     SR_clus_info[SR_n_clus].nslave = atoi(nslave);
88     SR_clus_info[SR_n_clus].image = strdup(image);
89     SR_clus_info[SR_n_clus].workdir = strdup(workdir);
90     SR_clus_info[SR_n_clus].masterid = SR_n_proc;
91 
92     if (!SR_clus_info[SR_n_clus].user || !SR_clus_info[SR_n_clus].hostname ||
93         !SR_clus_info[SR_n_clus].image || !SR_clus_info[SR_n_clus].workdir)
94       Error("InitClusInfo: no memory 2 ", 0L);
95 
96     for (i=0; i<atoi(nslave); i++)
97       SR_proc_info[SR_n_proc+i].clusid = SR_n_clus;
98 
99     SR_n_proc += SR_clus_info[SR_n_clus].nslave;
100     SR_n_clus++;
101   }
102 
103 
104 
105   /* Define info about the parallel command process */
106   SR_proc_info[SR_n_proc].clusid   = SR_n_clus;
107   SR_clus_info[SR_n_clus].hostname = strdup(masterhostname);
108   SR_clus_info[SR_n_clus].user     = "?";
109   SR_clus_info[SR_n_clus].workdir  = "?";
110   SR_clus_info[SR_n_clus].image    = "parallel";
111   if (!SR_clus_info[SR_n_clus].hostname)
112     Error("InitClusInfo: no memory 3 ", 0L);
113 
114   free(tmp);
115 }
116 
117 
PrintClusInfo()118 void PrintClusInfo()
119 {
120   long i, clus_to_print;
121 
122   clus_to_print = SR_parallel ? SR_n_clus+1: SR_n_clus;
123 
124   printf("No. Clusters: %ld\n", (long)SR_n_clus);
125   for (i=0; i<clus_to_print; i++)
126     (void) printf("Cluster %ld {\n  user = %s\n  host = %s\n  nslave = %ld\n\
127   image = %s\n  workdir = %s\n  masterid = %ld}\n",
128 		  i,
129 		  SR_clus_info[i].user,
130 		  SR_clus_info[i].hostname,
131 		  SR_clus_info[i].nslave,
132 		  SR_clus_info[i].image,
133 		  SR_clus_info[i].workdir,
134 		  SR_clus_info[i].masterid);
135   printf("SR_clus_info = %ld size=%d\n",(long) SR_clus_info, (int)sizeof(struct cluster_info_struct));
136   (void) fflush(stdout);
137 }
138 
InitGlobal()139 void InitGlobal()
140 /*
141   Initialize all the globals to something appropriate
142 */
143 {
144   long i;
145 
146   SR_n_clus = 1;
147   SR_n_proc = 1;
148 
149   SR_clus_id = 0;
150   SR_proc_id = 0;
151 
152   SR_debug = FALSE;
153 
154   SR_exit_on_error = TRUE;
155 
156   SR_error = FALSE;
157 
158   SR_numchild = 0;
159   for (i=0; i<MAX_SLAVE; i++)
160     SR_pids[i] = 0;
161 
162   for (i=0; i<MAX_CLUSTER; i++) {
163     SR_clus_info[i].user = (char *) NULL;
164     SR_clus_info[i].hostname = (char *) NULL;
165     SR_clus_info[i].nslave = 0;
166     SR_clus_info[i].image = (char *) NULL;
167     SR_clus_info[i].workdir = (char *) NULL;
168     SR_clus_info[i].masterid = 0;
169   }
170 
171   for (i=0; i<MAX_PROCESS; i++) {
172     SR_proc_info[i].clusid = 0;
173     SR_proc_info[i].slaveid = 0;
174     SR_proc_info[i].local = 0;
175     SR_proc_info[i].sock = -1;
176     SR_proc_info[i].shmem = (char *) NULL;
177     SR_proc_info[i].shmem_size = 0;
178     SR_proc_info[i].shmem_id = -1;
179     SR_proc_info[i].buffer = (char *) NULL;
180     SR_proc_info[i].buflen = 0;
181     SR_proc_info[i].header = (MessageHeader *) 0;
182     SR_proc_info[i].semid = -1;
183     SR_proc_info[i].sem_pend = -1;
184     SR_proc_info[i].sem_read = -1;
185     SR_proc_info[i].sem_written = -1;
186     SR_proc_info[i].n_rcv = 0;
187     SR_proc_info[i].nb_rcv = 0;
188     SR_proc_info[i].t_rcv = 0;
189     SR_proc_info[i].n_snd = 0;
190     SR_proc_info[i].nb_snd = 0;
191     SR_proc_info[i].t_snd = 0;
192     SR_proc_info[i].peeked = FALSE;
193   }
194 }
195 
196