1 /*****************************************************************************
2 
3   Ver. 3.8 (21/Oct./2016)
4 
5   OpenMX (Open source package for Material eXplorer) is a program package
6   for linear scaling density functional calculations of large-scale materials.
7   Almost time-consuming parts can be performed in O(N) operations where N is
8   the number of atoms (or basis orbitals). Thus, the program might be useful
9   for studies of large-scale materials.
10   The distribution of this program package follows the practice of
11   the GNU General Public Licence (GPL).
12 
13   OpenMX is based on
14 
15    *  Local density and generalized gradient approximation (LDA, LSDA, GGA)
16       to the exchange-corellation term
17    *  Norm-conserving pseudo potentials
18    *  Variationally optimized pseudo atomic basis orbitals
19    *  Solution of Poisson's equation using FFT
20    *  Evaluation of two-center integrals using Fourier transformation
21    *  Evaluation of three-center integrals using fixed real space grids
22    *  Simple mixing, direct inversion in the interative subspace (DIIS),
23       and Guaranteed-reduction Pulay's methods for SCF calculations.
24    *  Solution of the eigenvalue problem using O(N) methods
25    *  ...
26 
27   See also our website (http://www.openmx-square.org/)
28   for recent developments.
29 
30 
31     **************************************************************
32      Copyright
33 
34      Taisuke Ozaki
35 
36      Present (21/Oct./2016) official address
37 
38      Institute for Solid State Physics, University of Tokyo,
39      Kashiwanoha 5-1-5, Kashiwa, Chiba 277-8581, Japan
40 
41      e-mail: t-ozaki@issp.u-tokyo.ac.jp
42     **************************************************************
43 
44 *****************************************************************************/
45 
46 /**********************************************************************
47   openmx.c:
48 
49      openmx.c is the main routine of OpenMX.
50 
51   Log of openmx.c:
52 
53      5/Oct/2003  Released by T.Ozaki
54 
55 ***********************************************************************/
56 
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <math.h>
60 #include <string.h>
61 #include <time.h>
62 /*  stat section */
63 #include <sys/types.h>
64 #include <sys/stat.h>
65 #include <unistd.h>
66 /*  end stat section */
67 #include "openmx_common.h"
68 #include "mpi.h"
69 #include <omp.h>
70 
71 #include "tran_prototypes.h"
72 #include "tran_variables.h"
73 
74 
main(int argc,char * argv[])75 int main(int argc, char *argv[])
76 {
77   static int numprocs,myid;
78   static int MD_iter,i,j,po,ip;
79   static char fileMemory[YOUSO10];
80   double TStime,TEtime;
81 
82   /* MPI initialize */
83 
84   mpi_comm_level1 = MPI_COMM_WORLD;
85   MPI_COMM_WORLD1 = MPI_COMM_WORLD;
86 
87   MPI_Init(&argc,&argv);
88   MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
89   MPI_Comm_rank(MPI_COMM_WORLD,&myid);
90   NUMPROCS_MPI_COMM_WORLD = numprocs;
91   MYID_MPI_COMM_WORLD = myid;
92   Num_Procs = numprocs;
93 
94   /* for measuring elapsed time */
95 
96   dtime(&TStime);
97 
98   /* check argv */
99 
100   if (argc==1){
101     printf("\nCould not find an input file.\n\n");
102     MPI_Finalize();
103     exit(0);
104   }
105 
106   /* initialize Runtest_flag */
107 
108   Runtest_flag = 0;
109 
110   /****************************************************
111     ./openmx -nt #
112 
113     specifies the number of threads in parallelization
114     by OpenMP
115   ****************************************************/
116 
117   openmp_threads_num = 1; /* default */
118 
119   po = 0;
120   if (myid==Host_ID){
121     for (i=0; i<argc; i++){
122       if ( strcmp(argv[i],"-nt")==0 ){
123         po = 1;
124         ip = i;
125       }
126     }
127   }
128 
129   MPI_Bcast(&po, 1, MPI_INT, Host_ID, MPI_COMM_WORLD1);
130   MPI_Bcast(&ip, 1, MPI_INT, Host_ID, MPI_COMM_WORLD1);
131 
132   if ( (argc-1)<(ip+1) ){
133     if (myid==Host_ID){
134       printf("cannot find the number of threads\n");
135     }
136     MPI_Finalize();
137     exit(0);
138   }
139 
140   if ( po==1 ){
141     openmp_threads_num = atoi(argv[ip+1]);
142 
143     if (openmp_threads_num<=0){
144       if (myid==Host_ID){
145         printf("check the number of threads\n");
146       }
147       MPI_Finalize();
148       exit(0);
149     }
150 
151   }
152 
153   omp_set_num_threads(openmp_threads_num);
154 
155   if (myid==Host_ID){
156     printf("\nThe number of threads in each node for OpenMP parallelization is %d.\n\n",openmp_threads_num);
157   }
158 
159   /****************************************************
160     ./openmx -show directory
161 
162     showing PAOs and VPS used in input files stored in
163     "directory".
164   ****************************************************/
165 
166   if (strcmp(argv[1],"-show")==0){
167     Show_DFT_DATA(argv);
168     exit(1);
169   }
170 
171   /****************************************************
172     ./openmx -maketest
173 
174     making of *.out files in order to check whether
175     OpenMX normally runs on many platforms or not.
176   ****************************************************/
177 
178   if ( (argc==2 || argc==3) && strcmp(argv[1],"-maketest")==0){
179     Maketest("S",argc,argv);
180     exit(1);
181   }
182 
183   /****************************************************
184    ./openmx -runtest
185 
186    check whether OpenMX normally runs on many platforms
187    or not by comparing the stored *.out and generated
188    *.out on your machine.
189   ****************************************************/
190 
191   if ( strcmp(argv[1],"-runtest")==0){
192     Runtest("S",argc,argv);
193   }
194 
195   /****************************************************
196    ./openmx -maketestL
197 
198     making of *.out files in order to check whether
199     OpenMX normally runs for relatively large systems
200     on many platforms or not
201   ****************************************************/
202 
203   if ( (argc==2 || argc==3) && strcmp(argv[1],"-maketestL")==0){
204     Maketest("L",argc,argv);
205     exit(1);
206   }
207 
208   /****************************************************
209    ./openmx -maketestL2
210 
211     making of *.out files in order to check whether
212     OpenMX normally runs for large systems on many
213     platforms or not
214   ****************************************************/
215 
216   if ( (argc==2 || argc==3) && strcmp(argv[1],"-maketestL2")==0){
217     Maketest("L2",argc,argv);
218     exit(1);
219   }
220 
221   /****************************************************
222    ./openmx -runtestL
223 
224    check whether OpenMX normally runs for relatively
225    large systems on many platforms or not by comparing
226    the stored *.out and generated *.out on your machine.
227   ****************************************************/
228 
229   if (strcmp(argv[1],"-runtestL")==0){
230     Runtest("L",argc,argv);
231   }
232 
233   /****************************************************
234    ./openmx -runtestL2
235 
236    check whether OpenMX normally runs for large systems
237    on many platforms or not by comparing the stored *.out
238    and generated *.out on your machine.
239   ****************************************************/
240 
241   if (strcmp(argv[1],"-runtestL2")==0){
242     Runtest("L2",argc,argv);
243   }
244 
245   /*******************************************************
246    check memory leak by monitoring actual used memory size
247   *******************************************************/
248 
249   if ( (argc==2 || argc==3) && strcmp(argv[1],"-mltest")==0){
250     Memory_Leak_test(argc,argv);
251     exit(1);
252   }
253 
254   /****************************************************
255    ./openmx -maketestG
256 
257     making of *.out files in order to check whether
258     OpenMX normally runs for geometry optimization
259     on many platforms or not
260   ****************************************************/
261 
262   if ( (argc==2 || argc==3) && strcmp(argv[1],"-maketestG")==0){
263     Maketest("G",argc,argv);
264     exit(1);
265   }
266 
267   /****************************************************
268    ./openmx -maketestC
269 
270     making of *.out files in order to check whether
271     OpenMX normally runs for geometry optimization
272     on many platforms or not
273   ****************************************************/
274 
275   if ( (argc==2 || argc==3) && strcmp(argv[1],"-maketestC")==0){
276     Maketest("C",argc,argv);
277     exit(1);
278   }
279 
280   /****************************************************
281    ./openmx -runtestG
282 
283    check whether OpenMX normally runs for geometry
284    optimization on many platforms or not by comparing
285    the stored *.out and generated *.out on your machine.
286   ****************************************************/
287 
288   if (strcmp(argv[1],"-runtestG")==0){
289     Runtest("G",argc,argv);
290   }
291 
292   /****************************************************
293    ./openmx -runtestC
294 
295    check whether OpenMX normally runs for simultaneous
296    optimization for cell and geometry on many platforms
297    or not by comparing the stored *.out and generated
298    *.out on your machine.
299   ****************************************************/
300 
301   if (strcmp(argv[1],"-runtestC")==0){
302     Runtest("C",argc,argv);
303   }
304 
305   /****************************************************
306    ./openmx -maketestWF
307 
308     making of *.out files in order to check whether
309     OpenMX normally runs for generation of Wannier
310     functions on many platforms or not
311   ****************************************************/
312 
313   if ( (argc==2 || argc==3) && strcmp(argv[1],"-maketestWF")==0){
314     Maketest("WF",argc,argv);
315     exit(1);
316   }
317 
318   /****************************************************
319    ./openmx -runtestWF
320 
321    check whether OpenMX normally runs for generating
322    Wannier functions on many platforms or not by comparing
323    the stored *.out and generated *.out on your machine.
324   ****************************************************/
325 
326   if (strcmp(argv[1],"-runtestWF")==0){
327     Runtest("WF",argc,argv);
328   }
329 
330   /****************************************************
331    ./openmx -maketestNEGF
332 
333     making of *.out files in order to check whether
334     OpenMX normally runs for NEGF calculations
335     on many platforms or not
336   ****************************************************/
337 
338   if ( (argc==2 || argc==3) && strcmp(argv[1],"-maketestNEGF")==0){
339     Maketest("NEGF",argc,argv);
340     exit(1);
341   }
342 
343   /****************************************************
344    ./openmx -runtestNEGF
345 
346     check whether OpenMX normally runs for NEGF calculations
347     on many platforms or not
348   ****************************************************/
349 
350   if (strcmp(argv[1],"-runtestNEGF")==0){
351     Runtest("NEGF",argc,argv);
352     MPI_Finalize();
353     exit(1);
354   }
355 
356   /*******************************************************
357    check consistency between analytic and numerical forces
358   *******************************************************/
359 
360   if ( (argc==3 || argc==4) && strcmp(argv[1],"-forcetest")==0){
361 
362     if      (strcmp(argv[2],"0")==0) force_flag = 0;
363     else if (strcmp(argv[2],"1")==0) force_flag = 1;
364     else if (strcmp(argv[2],"2")==0) force_flag = 2;
365     else if (strcmp(argv[2],"3")==0) force_flag = 3;
366     else if (strcmp(argv[2],"4")==0) force_flag = 4;
367     else if (strcmp(argv[2],"5")==0) force_flag = 5;
368     else if (strcmp(argv[2],"6")==0) force_flag = 6;
369     else if (strcmp(argv[2],"7")==0) force_flag = 7;
370     else if (strcmp(argv[2],"8")==0) force_flag = 8;
371     else {
372       printf("unsupported flag for -forcetest\n");
373       exit(1);
374     }
375 
376     Force_test(argc,argv);
377     exit(1);
378   }
379 
380   /*******************************************************
381    check consistency between analytic and numerical stress
382   *******************************************************/
383 
384   if ( (argc==3 || argc==4) && strcmp(argv[1],"-stresstest")==0){
385 
386     if      (strcmp(argv[2],"0")==0) stress_flag = 0;
387     else if (strcmp(argv[2],"1")==0) stress_flag = 1;
388     else if (strcmp(argv[2],"2")==0) stress_flag = 2;
389     else if (strcmp(argv[2],"3")==0) stress_flag = 3;
390     else if (strcmp(argv[2],"4")==0) stress_flag = 4;
391     else if (strcmp(argv[2],"5")==0) stress_flag = 5;
392     else if (strcmp(argv[2],"6")==0) stress_flag = 6;
393     else if (strcmp(argv[2],"7")==0) stress_flag = 7;
394     else if (strcmp(argv[2],"8")==0) stress_flag = 8;
395     else {
396       printf("unsupported flag for -stresstest\n");
397       exit(1);
398     }
399 
400     Stress_test(argc,argv);
401     MPI_Finalize();
402     exit(1);
403   }
404 
405   /*******************************************************
406     check the NEB calculation or not, and if yes, go to
407     the NEB calculation.
408   *******************************************************/
409 
410   if (neb_check(argv)) neb(argc,argv);
411 
412   /*******************************************************
413    allocation of CompTime and show the greeting message
414   *******************************************************/
415 
416   CompTime = (double**)malloc(sizeof(double*)*numprocs);
417   for (i=0; i<numprocs; i++){
418     CompTime[i] = (double*)malloc(sizeof(double)*30);
419     for (j=0; j<30; j++) CompTime[i][j] = 0.0;
420   }
421 
422   if (myid==Host_ID){
423     printf("\n*******************************************************\n");
424     printf("*******************************************************\n");
425     printf(" Welcome to OpenMX   Ver. %s                           \n",Version_OpenMX);
426     printf(" Copyright (C), 2002-2014, T. Ozaki                    \n");
427     printf(" OpenMX comes with ABSOLUTELY NO WARRANTY.             \n");
428     printf(" This is free software, and you are welcome to         \n");
429     printf(" redistribute it under the constitution of the GNU-GPL.\n");
430     printf("*******************************************************\n");
431     printf("*******************************************************\n\n");
432   }
433 
434   Init_List_YOUSO();
435   remake_headfile = 0;
436   ScaleSize = 1.2;
437 
438   /****************************************************
439                    Read the input file
440   ****************************************************/
441 
442   init_alloc_first();
443 
444   CompTime[myid][1] = readfile(argv);
445 
446   MPI_Barrier(MPI_COMM_WORLD1);
447 
448   /* initialize PrintMemory routine */
449 
450   sprintf(fileMemory,"%s%s.memory%i",filepath,filename,myid);
451   PrintMemory(fileMemory,0,"init");
452   PrintMemory_Fix();
453 
454   /* initialize */
455 
456   init();
457 
458   /* for DFTD-vdW by okuno */
459   /* for version_dftD by Ellner*/
460   if(dftD_switch==1 && version_dftD==2) DFTDvdW_init();
461   if(dftD_switch==1 && version_dftD==3) DFTD3vdW_init();
462 
463   /* check "-mltest2" mode */
464 
465   po = 0;
466   if (myid==Host_ID){
467     for (i=0; i<argc; i++){
468       if ( strcmp(argv[i],"-mltest2")==0 ){
469         po = 1;
470         ip = i;
471       }
472     }
473   }
474 
475   MPI_Bcast(&po, 1, MPI_INT, Host_ID, MPI_COMM_WORLD1);
476   MPI_Bcast(&ip, 1, MPI_INT, Host_ID, MPI_COMM_WORLD1);
477 
478   if ( po==1 ) ML_flag = 1;
479   else         ML_flag = 0;
480 
481   /* check "-forcetest2" mode */
482 
483   po = 0;
484   if (myid==Host_ID){
485     for (i=0; i<argc; i++){
486       if ( strcmp(argv[i],"-forcetest2")==0 ){
487         po = 1;
488         ip = i;
489       }
490     }
491   }
492 
493   MPI_Bcast(&po, 1, MPI_INT, Host_ID, MPI_COMM_WORLD1);
494   MPI_Bcast(&ip, 1, MPI_INT, Host_ID, MPI_COMM_WORLD1);
495 
496   if ( po==1 ){
497     force_flag = atoi(argv[ip+1]);
498     ForceConsistency_flag = 1;
499   }
500 
501   /* check force consistency
502      the number of processes
503      should be less than 2.
504   */
505 
506   if (ForceConsistency_flag==1){
507 
508     Check_Force(argv);
509     CompTime[myid][20] = OutData(argv[1]);
510     Merge_LogFile(argv[1]);
511     Free_Arrays(0);
512     MPI_Finalize();
513     exit(0);
514     return 1;
515   }
516 
517   /* check "-stresstest2" mode */
518 
519   po = 0;
520   if (myid==Host_ID){
521 
522 
523     for (i=0; i<argc; i++){
524       if ( strcmp(argv[i],"-stresstest2")==0 ){
525 
526         po = 1;
527         ip = i;
528       }
529     }
530   }
531 
532   MPI_Bcast(&po, 1, MPI_INT, Host_ID, MPI_COMM_WORLD1);
533   MPI_Bcast(&ip, 1, MPI_INT, Host_ID, MPI_COMM_WORLD1);
534 
535   if ( po==1 ){
536     stress_flag = atoi(argv[ip+1]);
537     StressConsistency_flag = 1;
538   }
539 
540   /* check stress consistency
541      the number of processes
542      should be less than 2.
543   */
544 
545   if (StressConsistency_flag==1){
546     Check_Stress(argv);
547     CompTime[myid][20] = OutData(argv[1]);
548     Merge_LogFile(argv[1]);
549     Free_Arrays(0);
550     MPI_Finalize();
551     exit(0);
552     return 1;
553   }
554 
555   /*******************************************************
556         optimization of cell and internal coordinates
557   *******************************************************/
558 
559   if (CellOpt_switch!=0) cellopt(argv, CompTime);
560 
561   /****************************************************
562       SCF-DFT calculations, MD and geometrical
563       optimization.
564   ****************************************************/
565 
566   MD_iter = 1;
567   Temp_MD_iter = 1;
568 
569   do {
570 
571     if (MD_switch==12)
572       CompTime[myid][2] += truncation(1,1);  /* EvsLC */
573     else if (MD_cellopt_flag==1)
574       CompTime[myid][2] += truncation(1,1);  /* cell optimization */
575     else
576       CompTime[myid][2] += truncation(MD_iter,1);
577 
578     if (ML_flag==1 && myid==Host_ID) Get_VSZ(MD_iter);
579 
580     if (Solver==4) {
581       TRAN_Calc_GridBound( mpi_comm_level1, atomnum, WhatSpecies, Spe_Atom_Cut1,
582                            Ngrid1, Grid_Origin, Gxyz, tv, gtv, rgtv, Left_tv, Right_tv );
583 
584       /* output: TRAN_region[], TRAN_grid_bound */
585     }
586 
587     if (Solver!=4 || TRAN_SCF_skip==0){
588 
589       CompTime[myid][3] += DFT(MD_iter,(MD_iter-1)%orbitalOpt_per_MDIter+1);
590 
591       iterout(MD_iter+MD_Current_Iter,MD_TimeStep*(MD_iter+MD_Current_Iter-1),filepath,filename);
592 
593       /* MD or geometry optimization */
594       if (ML_flag==0) CompTime[myid][4] += MD_pac(MD_iter,argv[1]);
595     }
596 
597     MD_iter++;
598     Temp_MD_iter++;
599 
600   } while(MD_Opt_OK==0 && (MD_iter+MD_Current_Iter)<=MD_IterNumber);
601 
602   if ( TRAN_output_hks ) {
603      /* left is dummy */
604      TRAN_RestartFile(mpi_comm_level1, "write","left",filepath,TRAN_hksoutfilename);
605   }
606 
607   /****************************************************
608                calculate Voronoi charge
609   ****************************************************/
610 
611   if (Voronoi_Charge_flag==1) Voronoi_Charge();
612 
613   /****************************************************
614         calculate Voronoi orbital magnetic moment
615   ****************************************************/
616 
617   if (Voronoi_OrbM_flag==1) Voronoi_Orbital_Moment();
618 
619   /****************************************************
620         output analysis of decomposed energies
621   ****************************************************/
622 
623   if (Energy_Decomposition_flag==1) Output_Energy_Decomposition();
624 
625   /****************************************************
626   making of a file *.frac for the fractional coordinates
627   ****************************************************/
628 
629   Make_FracCoord(argv[1]);
630 
631   /****************************************************
632    generate Wannier functions added by Hongming Weng
633   ****************************************************/
634 
635   /* hmweng */
636   if(Wannier_Func_Calc){
637     if (myid==Host_ID) printf("Calling Generate_Wannier...\n");fflush(0);
638 
639     Generate_Wannier(argv[1]);
640   }
641 
642   /*********************************************************
643      Electronic transport calculations based on NEGF:
644      transmission, current, eigen channel analysis, and
645      real space analysis of current
646   *********************************************************/
647 
648   if (Solver==4 && TRAN_analysis==1) {
649 
650     /* if SCF is skipped, calculate values of basis functions on each grid */
651     if (1<=TRAN_SCF_skip) i = Set_Orbitals_Grid(0);
652 
653     if (SpinP_switch==3) {
654       TRAN_Main_Analysis_NC( mpi_comm_level1, argc, argv, Matomnum, M2G,
655                              GridN_Atom, GridListAtom, CellListAtom,
656                              Orbs_Grid, TNumGrid );
657     }
658     else {
659       TRAN_Main_Analysis( mpi_comm_level1, argc, argv, Matomnum, M2G,
660                           GridN_Atom, GridListAtom, CellListAtom,
661                           Orbs_Grid, TNumGrid );
662     }
663   }
664 
665   /****************************************************
666                   Making of output files
667   ****************************************************/
668 
669   if (OutData_bin_flag)
670     CompTime[myid][20] = OutData_Binary(argv[1]);
671   else
672     CompTime[myid][20] = OutData(argv[1]);
673 
674   /****************************************************
675     write connectivity, Hamiltonian, overlap, density
676     matrices, and etc. to a file, filename.scfout
677   ****************************************************/
678 
679   if (HS_fileout==1) SCF2File("write",argv[1]);
680 
681   /* elapsed time */
682 
683   dtime(&TEtime);
684   CompTime[myid][0] = TEtime - TStime;
685   Output_CompTime();
686   for (i=0; i<numprocs; i++){
687     free(CompTime[i]);
688   }
689   free(CompTime);
690 
691   /* merge log files */
692   Merge_LogFile(argv[1]);
693 
694   /* free arrays for NEGF */
695 
696   if (Solver==4){
697 
698     TRAN_Deallocate_Atoms();
699     TRAN_Deallocate_RestartFile("left");
700     TRAN_Deallocate_RestartFile("right");
701   }
702 
703   /* free arrays */
704 
705   Free_Arrays(0);
706   /* print memory */
707 
708   PrintMemory("total",0,"sum");
709 
710   MPI_Barrier(MPI_COMM_WORLD);
711   if (myid==Host_ID){
712     printf("\nThe calculation was normally finished.\n");
713   }
714 
715   MPI_Finalize();
716 
717   return 0;
718 }
719