1 // clang-format off
2 /* ----------------------------------------------------------------------
3    LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
4    https://www.lammps.org/, Sandia National Laboratories
5    Steve Plimpton, sjplimp@sandia.gov
6 
7    Copyright (2003) Sandia Corporation.  Under the terms of Contract
8    DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
9    certain rights in this software.  This software is distributed under
10    the GNU General Public License.
11 
12    See the README file in the top-level LAMMPS directory.
13 ------------------------------------------------------------------------- */
14 
15 /* ----------------------------------------------------------------------
16    Contributing author: Paul Coffman (IBM)
17 ------------------------------------------------------------------------- */
18 
19 #include "omp_compat.h"
20 #include "dump_cfg_mpiio.h"
21 
22 #include "atom.h"
23 #include "domain.h"
24 #include "update.h"
25 #include "memory.h"
26 #include "error.h"
27 
28 #include <cmath>
29 #include <cstring>
30 
31 #ifdef LMP_USER_IO_TIMER
32 #include <sys/times.h>
33 #include <hwi/include/bqc/A2_inlines.h>
34 long dumpCFGTimestamps[10];
35 #endif
36 
37 #if defined(_OPENMP)
38 #include <omp.h>
39 #endif
40 
41 using namespace LAMMPS_NS;
42 
43 #define MAX_TEXT_HEADER_SIZE 4096
44 #define DUMP_BUF_CHUNK_SIZE 16384
45 #define DUMP_BUF_INCREMENT_SIZE 4096
46 
47 #define UNWRAPEXPAND 10.0
48 #define ONEFIELD 32
49 #define DELTA 1048576
50 
51 /* ---------------------------------------------------------------------- */
52 
DumpCFGMPIIO(LAMMPS * lmp,int narg,char ** arg)53 DumpCFGMPIIO::DumpCFGMPIIO(LAMMPS *lmp, int narg, char **arg) :
54   DumpCFG(lmp, narg, arg) {}
55 
56 /* ---------------------------------------------------------------------- */
57 
~DumpCFGMPIIO()58 DumpCFGMPIIO::~DumpCFGMPIIO()
59 {
60   if (multifile == 0) MPI_File_close(&mpifh);
61 }
62 
63 /* ---------------------------------------------------------------------- */
64 
openfile()65 void DumpCFGMPIIO::openfile()
66 {
67   if (singlefile_opened) { // single file already opened, so just return after resetting filesize
68     mpifo = currentFileSize;
69     MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
70     currentFileSize = mpifo+headerSize+sumFileSize;
71     return;
72   }
73   if (multifile == 0) singlefile_opened = 1;
74 
75   // if one file per timestep, replace '*' with current timestep
76 
77   filecurrent = filename;
78 
79   if (multifile) {
80     char *filestar = filecurrent;
81     filecurrent = new char[strlen(filestar) + 16];
82     char *ptr = strchr(filestar,'*');
83     *ptr = '\0';
84     if (padflag == 0)
85       sprintf(filecurrent,"%s" BIGINT_FORMAT "%s",
86           filestar,update->ntimestep,ptr+1);
87     else {
88       char bif[8],pad[16];
89       strcpy(bif,BIGINT_FORMAT);
90       sprintf(pad,"%%s%%0%d%s%%s",padflag,&bif[1]);
91       sprintf(filecurrent,pad,filestar,update->ntimestep,ptr+1);
92     }
93     *ptr = '*';
94     if (maxfiles > 0) {
95       if (numfiles < maxfiles) {
96         nameslist[numfiles] = new char[strlen(filecurrent)+1];
97         strcpy(nameslist[numfiles],filecurrent);
98         ++numfiles;
99       } else {
100         remove(nameslist[fileidx]);
101         delete[] nameslist[fileidx];
102         nameslist[fileidx] = new char[strlen(filecurrent)+1];
103         strcpy(nameslist[fileidx],filecurrent);
104         fileidx = (fileidx + 1) % maxfiles;
105       }
106     }
107   }
108 
109   if (append_flag) { // append open
110     int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND | MPI_MODE_WRONLY  , MPI_INFO_NULL, &mpifh);
111     if (err != MPI_SUCCESS) {
112       char str[128];
113       sprintf(str,"Cannot open dump file %s",filecurrent);
114       error->one(FLERR,str);
115     }
116     int myrank;
117     MPI_Comm_rank(world,&myrank);
118     if (myrank == 0)
119       MPI_File_get_size(mpifh,&mpifo);
120     MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
121     MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
122     currentFileSize = mpifo+headerSize+sumFileSize;
123 
124   }
125   else { // replace open
126 
127     int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY  , MPI_INFO_NULL, &mpifh);
128     if (err != MPI_SUCCESS) {
129       char str[128];
130       sprintf(str,"Cannot open dump file %s",filecurrent);
131       error->one(FLERR,str);
132     }
133     mpifo = 0;
134 
135     MPI_File_set_size(mpifh,(MPI_Offset) (headerSize+sumFileSize));
136     currentFileSize = (headerSize+sumFileSize);
137 
138   }
139 }
140 
141 /* ---------------------------------------------------------------------- */
142 
write()143 void DumpCFGMPIIO::write()
144 {
145 
146 #ifdef LMP_USER_IO_TIMER
147         long startTimeBase, endTimeBase;
148         MPI_Barrier(world); // timestamp barrier
149         if (me == 0)
150                 startTimeBase = GetTimeBase();
151 #endif
152 
153   if (domain->triclinic == 0) {
154     boxxlo = domain->boxlo[0];
155     boxxhi = domain->boxhi[0];
156     boxylo = domain->boxlo[1];
157     boxyhi = domain->boxhi[1];
158     boxzlo = domain->boxlo[2];
159     boxzhi = domain->boxhi[2];
160   } else {
161     boxxlo = domain->boxlo_bound[0];
162     boxxhi = domain->boxhi_bound[0];
163     boxylo = domain->boxlo_bound[1];
164     boxyhi = domain->boxhi_bound[1];
165     boxzlo = domain->boxlo_bound[2];
166     boxzhi = domain->boxhi_bound[2];
167     boxxy = domain->xy;
168     boxxz = domain->xz;
169     boxyz = domain->yz;
170   }
171 
172   // nme = # of dump lines this proc contributes to dump
173 
174   nme = count();
175 
176   // ntotal = total # of dump lines in snapshot
177   // nmax = max # of dump lines on any proc
178 
179   bigint bnme = nme;
180   MPI_Allreduce(&bnme,&ntotal,1,MPI_LMP_BIGINT,MPI_SUM,world);
181 
182   int nmax;
183   MPI_Allreduce(&nme,&nmax,1,MPI_INT,MPI_MAX,world);
184 
185   // write timestep header
186   // for multiproc,
187   //   nheader = # of lines in this file via Allreduce on clustercomm
188 
189   bigint nheader = ntotal;
190 
191   // insure filewriter proc can receive everyone's info
192   // limit nmax*size_one to int since used as arg in MPI_Rsend() below
193   // pack my data into buf
194   // if sorting on IDs also request ID list from pack()
195   // sort buf as needed
196 
197   if (nmax > maxbuf) {
198     if ((bigint) nmax * size_one > MAXSMALLINT)
199       error->all(FLERR,"Too much per-proc info for dump");
200     maxbuf = nmax;
201     memory->destroy(buf);
202     memory->create(buf,(maxbuf*size_one),"dump:buf");
203   }
204   if (sort_flag && sortcol == 0 && nmax > maxids) {
205     maxids = nmax;
206     memory->destroy(ids);
207     memory->create(ids,maxids,"dump:ids");
208   }
209 
210   if (sort_flag && sortcol == 0) pack(ids);
211   else pack(nullptr);
212   if (sort_flag) sort();
213 
214   // determine how much data needs to be written for setting the file size and prepocess it prior to writing
215   performEstimate = 1;
216   write_header(nheader);
217   write_data(nme,buf);
218   MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs-1), world);
219 
220 #ifdef LMP_USER_IO_TIMER
221         MPI_Barrier(world); // timestamp barrier
222         dumpCFGTimestamps[0] = GetTimeBase();
223 #endif
224 
225   openfile();
226 
227 #ifdef LMP_USER_IO_TIMER
228         MPI_Barrier(world); // timestamp barrier
229         dumpCFGTimestamps[1] = GetTimeBase();
230 #endif
231 
232   performEstimate = 0;
233   write_header(nheader); // mpifo now points to end of header info
234 
235 #ifdef LMP_USER_IO_TIMER
236         MPI_Barrier(world); // timestamp barrier
237         dumpCFGTimestamps[2] = GetTimeBase();
238 #endif
239 
240   // now actually write the data
241   performEstimate = 0;
242   write_data(nme,buf);
243 
244 #ifdef LMP_USER_IO_TIMER
245         MPI_Barrier(world); // timestamp barrier
246         dumpCFGTimestamps[3] = GetTimeBase();
247 #endif
248 
249   if (multifile)    MPI_File_close(&mpifh);
250   if (multifile) delete [] filecurrent;
251 
252 #ifdef LMP_USER_IO_TIMER
253         MPI_Barrier(world); // timestamp barrier
254         dumpCFGTimestamps[4] = GetTimeBase();
255         if (me == 0) {
256                 endTimeBase = GetTimeBase();
257                 printf("total dump cycles: %ld - estimates and setup: %ld openfile: %ld write header: %ld write data: %ld close file: %ld\n",(long) (endTimeBase-startTimeBase),(long) (dumpCFGTimestamps[0]-startTimeBase),(long) (dumpCFGTimestamps[1]-dumpCFGTimestamps[0]),(long) (dumpCFGTimestamps[2]-dumpCFGTimestamps[1]),(long) (dumpCFGTimestamps[3]-dumpCFGTimestamps[2]),(long) (dumpCFGTimestamps[4]-dumpCFGTimestamps[3]));
258         }
259 #endif
260 
261 }
262 
263 /* ---------------------------------------------------------------------- */
264 
init_style()265 void DumpCFGMPIIO::init_style()
266 {
267   if (multifile == 0 && !multifile_override)
268     error->all(FLERR,"Dump cfg requires one snapshot per file");
269 
270   DumpCustom::init_style();
271 
272   // setup function ptrs
273 
274   write_choice = &DumpCFGMPIIO::write_string;
275 
276 }
277 
278 /* ---------------------------------------------------------------------- */
279 
write_header(bigint n)280 void DumpCFGMPIIO::write_header(bigint n)
281 {
282   // set scale factor used by AtomEye for CFG viz
283   // default = 1.0
284   // for peridynamics, set to pre-computed PD scale factor
285   //   so PD particles mimic C atoms
286   // for unwrapped coords, set to UNWRAPEXPAND (10.0)
287   //   so molecules are not split across periodic box boundaries
288 
289   if (performEstimate) {
290 
291     headerBuffer = (char *) malloc(MAX_TEXT_HEADER_SIZE);
292 
293     headerSize = 0;
294 
295     double scale = 1.0;
296     if (atom->peri_flag) scale = atom->pdscale;
297     else if (unwrapflag == 1) scale = UNWRAPEXPAND;
298 
299     char str[64];
300 
301     sprintf(str,"Number of particles = %s\n",BIGINT_FORMAT);
302     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),str,n);
303     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"A = %g Angstrom (basic length-scale)\n",scale);
304     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(1,1) = %g A\n",domain->xprd);
305     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(1,2) = 0 A \n");
306     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(1,3) = 0 A \n");
307     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(2,1) = %g A \n",domain->xy);
308     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(2,2) = %g A\n",domain->yprd);
309     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(2,3) = 0 A \n");
310     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(3,1) = %g A \n",domain->xz);
311     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(3,2) = %g A \n",domain->yz);
312     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"H0(3,3) = %g A\n",domain->zprd);
313     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),".NO_VELOCITY.\n");
314     headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"entry_count = %d\n",nfield-2);
315     for (int i = 0; i < nfield-5; i++)
316       headerSize += sprintf(((char*)&((char*)headerBuffer)[headerSize]),"auxiliary[%d] = %s\n",i,auxname[i]);
317   }
318   else { // write data
319 
320     if (me == 0)
321       MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,MPI_STATUS_IGNORE);
322     mpifo += headerSize;
323     free(headerBuffer);
324   }
325 }
326 
327 #if defined(_OPENMP)
328 
329 /* ----------------------------------------------------------------------
330    convert mybuf of doubles to one big formatted string in sbuf
331    return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
332 ------------------------------------------------------------------------- */
333 
convert_string_omp(int n,double * mybuf)334 int DumpCFGMPIIO::convert_string_omp(int n, double *mybuf)
335 {
336 
337   char **mpifh_buffer_line_per_thread;
338   int mpifhStringCount;
339   int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
340 
341   mpifhStringCount = 0;
342 
343   int nthreads = omp_get_max_threads();
344   if (nthreads > n) { // call serial version
345     convert_string(n,mybuf);
346 
347   }
348   else {
349     memory->create(mpifhStringCountPerThread,nthreads,"dump:mpifhStringCountPerThread");
350     mpifh_buffer_line_per_thread = (char **) malloc(nthreads*sizeof(char*));
351     memory->create(bufOffset,nthreads,"dump:bufOffset");
352     memory->create(bufRange,nthreads,"dump:bufRange");
353     memory->create(bufLength,nthreads,"dump:bufLength");
354 
355     int i=0;
356     for (i=0;i<(nthreads-1);i++) {
357       mpifhStringCountPerThread[i] = 0;
358       bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
359       bufRange[i] = (int)(floor((double)n/(double)nthreads));
360       bufLength[i] = DUMP_BUF_CHUNK_SIZE;
361       mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
362       mpifh_buffer_line_per_thread[i][0] = '\0';
363     }
364     mpifhStringCountPerThread[i] = 0;
365     bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
366     bufRange[i] =  n-(i*(int)(floor((double)n/(double)nthreads)));
367     bufLength[i] = DUMP_BUF_CHUNK_SIZE;
368     mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
369     mpifh_buffer_line_per_thread[i][0] = '\0';
370 
371 #pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, mpifhStringCountPerThread, mpifh_buffer_line_per_thread, mybuf)
372     {
373       int tid = omp_get_thread_num();
374       int m=0;
375 
376       if (unwrapflag == 0) {
377         for (int i = 0; i < bufRange[tid]; i++) {
378           if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
379             mpifh_buffer_line_per_thread[tid] = (char *) realloc(mpifh_buffer_line_per_thread[tid],(mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char));
380             bufLength[tid] = (mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char);
381           }
382           for (int j = 0; j < size_one; j++) {
383             if (j == 0) {
384               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%f \n",(mybuf[bufOffset[tid]+m]));
385             } else if (j == 1) {
386               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%s \n",typenames[(int) mybuf[bufOffset[tid]+m]]);
387             } else if (j >= 2) {
388             if (vtype[j] == Dump::INT)
389               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<int> (mybuf[bufOffset[tid]+m]));
390             else if (vtype[j] == Dump::DOUBLE)
391               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],mybuf[bufOffset[tid]+m]);
392             else if (vtype[j] == Dump::STRING)
393               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],typenames[(int) mybuf[bufOffset[tid]+m]]);
394             else if (vtype[j] == Dump::BIGINT)
395               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<bigint> (mybuf[bufOffset[tid]+m]));
396           }
397           m++;
398         } // for j
399         mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"\n");
400       } // for i
401     } // wrap flag
402     else if (unwrapflag == 1) {
403       for (int i = 0; i < bufRange[tid]; i++) {
404         if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
405           mpifh_buffer_line_per_thread[tid] = (char *) realloc(mpifh_buffer_line_per_thread[tid],(mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char));
406           bufLength[tid] = (mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char);
407         }
408         for (int j = 0; j < size_one; j++) {
409           double unwrap_coord;
410           if (j == 0) {
411           //offset += sprintf(&sbuf[offset],"%f \n",mybuf[m]);
412             mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%f \n",mybuf[bufOffset[tid]+m]);
413           } else if (j == 1) {
414          // offset += sprintf(&sbuf[offset],"%s \n",typenames[(int) mybuf[m]]);
415             mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%s \n",typenames[(int) mybuf[bufOffset[tid]+m]]);
416           } else if (j >= 2 && j <= 4) {
417             unwrap_coord = (mybuf[bufOffset[tid]+m] - 0.5)/UNWRAPEXPAND + 0.5;
418           //offset += sprintf(&sbuf[offset],vformat[j],unwrap_coord);
419             mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],unwrap_coord);
420           } else if (j >= 5) {
421             if (vtype[j] == Dump::INT)
422             //offset +=
423             //  sprintf(&sbuf[offset],vformat[j],static_cast<int> (mybuf[m]));
424               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<int> (mybuf[bufOffset[tid]+m]));
425             else if (vtype[j] == Dump::DOUBLE)
426             // offset += sprintf(&sbuf[offset],vformat[j],mybuf[m]);
427               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],mybuf[bufOffset[tid]+m]);
428             else if (vtype[j] == Dump::STRING)
429             // offset +=
430             //  sprintf(&sbuf[offset],vformat[j],typenames[(int) mybuf[m]]);
431               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],typenames[(int) mybuf[bufOffset[tid]+m]]);
432             else if (vtype[j] == Dump::BIGINT)
433             // offset +=
434             //  sprintf(&sbuf[offset],vformat[j],static_cast<bigint> (mybuf[m]));
435               mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<bigint> (mybuf[bufOffset[tid]+m]));
436           }
437           m++;
438         } // for j
439         mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"\n");
440       } // for i
441     } // unwrap flag
442   } // pragma omp parallel
443 
444 #pragma omp barrier
445     mpifhStringCount = 0;
446     for (i=0;i<nthreads;i++) {
447       mpifhStringCount += mpifhStringCountPerThread[i];
448     }
449 
450     memory->destroy(bufOffset);
451     memory->destroy(bufRange);
452     memory->destroy(bufLength);
453 
454     if (mpifhStringCount > 0) {
455       if (mpifhStringCount > maxsbuf) {
456         if (mpifhStringCount > MAXSMALLINT) return -1;
457         maxsbuf = mpifhStringCount+1;
458         memory->grow(sbuf,maxsbuf,"dump:sbuf");
459       }
460       sbuf[0] = '\0';
461     }
462 
463     for (int i=0;i<nthreads;i++) {
464       strcat(sbuf,mpifh_buffer_line_per_thread[i]);
465       free(mpifh_buffer_line_per_thread[i]);
466     }
467 
468     memory->destroy(mpifhStringCountPerThread);
469     free(mpifh_buffer_line_per_thread);
470 
471   } // else omp
472 
473   return mpifhStringCount;
474 
475 }
476 
477 #endif
478 
479 /* ---------------------------------------------------------------------- */
480 
write_data(int n,double * mybuf)481 void DumpCFGMPIIO::write_data(int n, double *mybuf)
482 {
483   (this->*write_choice)(n,mybuf);
484 }
485 
486 /* ---------------------------------------------------------------------- */
487 
write_string(int n,double * mybuf)488 void DumpCFGMPIIO::write_string(int n, double *mybuf)
489 {
490   if (performEstimate) {
491 
492 #if defined(_OPENMP)
493     int nthreads = omp_get_max_threads();
494     if (nthreads > 1)
495       nsme = convert_string_omp(n,mybuf);
496     else
497       nsme = convert_string(n,mybuf);
498 #else
499 
500     nsme = convert_string(n,mybuf);
501 #endif
502     bigint incPrefix = 0;
503     bigint bigintNsme = (bigint) nsme;
504     MPI_Scan(&bigintNsme,&incPrefix,1,MPI_LMP_BIGINT,MPI_SUM,world);
505     sumFileSize = (incPrefix*sizeof(char));
506     offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
507   }
508   else {
509     MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
510     if (flush_flag)
511       MPI_File_sync(mpifh);
512   }
513 }
514 
515